diff --git a/depend b/depend index 7fdf369158..b9d91faa2a 100644 --- a/depend +++ b/depend @@ -19732,6 +19732,7 @@ vm_trace.$(OBJEXT): {$(VPATH)}vm_opts.h vm_trace.$(OBJEXT): {$(VPATH)}vm_sync.h vm_trace.$(OBJEXT): {$(VPATH)}vm_trace.c vm_trace.$(OBJEXT): {$(VPATH)}yjit.h +vm_trace.$(OBJEXT): {$(VPATH)}zjit.h weakmap.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h weakmap.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h weakmap.$(OBJEXT): $(CCAN_DIR)/list/list.h diff --git a/jit.c b/jit.c index 0709c6d8f0..efecbef354 100644 --- a/jit.c +++ b/jit.c @@ -492,3 +492,51 @@ rb_jit_vm_unlock(unsigned int *recursive_lock_level, const char *file, int line) { rb_vm_lock_leave(recursive_lock_level, file, line); } + +void +rb_iseq_reset_jit_func(const rb_iseq_t *iseq) +{ + RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq)); + iseq->body->jit_entry = NULL; + iseq->body->jit_exception = NULL; + // Enable re-compiling this ISEQ. Event when it's invalidated for TracePoint, + // we'd like to re-compile ISEQs that haven't been converted to trace_* insns. + iseq->body->jit_entry_calls = 0; + iseq->body->jit_exception_calls = 0; +} + +// Callback data for rb_jit_for_each_iseq +struct iseq_callback_data { + rb_iseq_callback callback; + void *data; +}; + +// Heap-walking callback for rb_jit_for_each_iseq +static int +for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data) +{ + const struct iseq_callback_data *callback_data = (struct iseq_callback_data *)data; + VALUE v = (VALUE)vstart; + for (; v != (VALUE)vend; v += stride) { + void *ptr = rb_asan_poisoned_object_p(v); + rb_asan_unpoison_object(v, false); + + if (rb_obj_is_iseq(v)) { + rb_iseq_t *iseq = (rb_iseq_t *)v; + callback_data->callback(iseq, callback_data->data); + } + + if (ptr) { + rb_asan_poison_object(v); + } + } + return 0; +} + +// Walk all ISEQs in the heap and invoke the callback - shared between YJIT and ZJIT +void +rb_jit_for_each_iseq(rb_iseq_callback callback, void *data) +{ + struct iseq_callback_data callback_data = { .callback = callback, .data = data }; + rb_objspace_each_objects(for_each_iseq_i, (void *)&callback_data); +} diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb index e79e80fb44..0353a48eec 100644 --- a/test/ruby/test_zjit.rb +++ b/test/ruby/test_zjit.rb @@ -2202,6 +2202,44 @@ class TestZJIT < Test::Unit::TestCase } end + def test_global_tracepoint + assert_compiles 'true', %q{ + def foo = 1 + + foo + foo + + called = false + + tp = TracePoint.new(:return) { |event| + if event.method_id == :foo + called = true + end + } + tp.enable do + foo + end + called + } + end + + def test_local_tracepoint + assert_compiles 'true', %q{ + def foo = 1 + + foo + foo + + called = false + + tp = TracePoint.new(:return) { |_| called = true } + tp.enable(target: method(:foo)) do + foo + end + called + } + end + private # Assert that every method call in `test_script` can be compiled by ZJIT diff --git a/vm_trace.c b/vm_trace.c index cb4feff147..1069d36dd0 100644 --- a/vm_trace.c +++ b/vm_trace.c @@ -35,6 +35,7 @@ #include "vm_core.h" #include "ruby/ractor.h" #include "yjit.h" +#include "zjit.h" #include "builtin.h" @@ -135,6 +136,7 @@ update_global_event_hook(rb_event_flag_t prev_events, rb_event_flag_t new_events // Do this after event flags updates so other ractors see updated vm events // when they wake up. rb_yjit_tracing_invalidate_all(); + rb_zjit_tracing_invalidate_all(); } } @@ -1285,6 +1287,7 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line) } rb_yjit_tracing_invalidate_all(); + rb_zjit_tracing_invalidate_all(); ruby_vm_event_local_num++; diff --git a/yjit.c b/yjit.c index bca0df96fd..b38f860ed5 100644 --- a/yjit.c +++ b/yjit.c @@ -413,18 +413,6 @@ rb_iseq_set_yjit_payload(const rb_iseq_t *iseq, void *payload) iseq->body->yjit_payload = payload; } -void -rb_iseq_reset_jit_func(const rb_iseq_t *iseq) -{ - RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq)); - iseq->body->jit_entry = NULL; - iseq->body->jit_exception = NULL; - // Enable re-compiling this ISEQ. Event when it's invalidated for TracePoint, - // we'd like to re-compile ISEQs that haven't been converted to trace_* insns. - iseq->body->jit_entry_calls = 0; - iseq->body->jit_exception_calls = 0; -} - rb_proc_t * rb_yjit_get_proc_ptr(VALUE procv) { @@ -643,41 +631,6 @@ rb_yjit_constcache_shareable(const struct iseq_inline_constant_cache_entry *ice) return (ice->flags & IMEMO_CONST_CACHE_SHAREABLE) != 0; } -// Used for passing a callback and other data over rb_objspace_each_objects -struct iseq_callback_data { - rb_iseq_callback callback; - void *data; -}; - -// Heap-walking callback for rb_yjit_for_each_iseq(). -static int -for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data) -{ - const struct iseq_callback_data *callback_data = (struct iseq_callback_data *)data; - VALUE v = (VALUE)vstart; - for (; v != (VALUE)vend; v += stride) { - void *ptr = rb_asan_poisoned_object_p(v); - rb_asan_unpoison_object(v, false); - - if (rb_obj_is_iseq(v)) { - rb_iseq_t *iseq = (rb_iseq_t *)v; - callback_data->callback(iseq, callback_data->data); - } - - asan_poison_object_if(ptr, v); - } - return 0; -} - -// Iterate through the whole GC heap and invoke a callback for each iseq. -// Used for global code invalidation. -void -rb_yjit_for_each_iseq(rb_iseq_callback callback, void *data) -{ - struct iseq_callback_data callback_data = { .callback = callback, .data = data }; - rb_objspace_each_objects(for_each_iseq_i, (void *)&callback_data); -} - // For running write barriers from Rust. Required when we add a new edge in the // object graph from `old` to `young`. void diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs index d30fb6c779..2fc85431e0 100644 --- a/yjit/bindgen/src/main.rs +++ b/yjit/bindgen/src/main.rs @@ -333,7 +333,6 @@ fn main() { .allowlist_function("rb_yjit_constcache_shareable") .allowlist_function("rb_iseq_reset_jit_func") .allowlist_function("rb_yjit_dump_iseq_loc") - .allowlist_function("rb_yjit_for_each_iseq") .allowlist_function("rb_yjit_obj_written") .allowlist_function("rb_yjit_str_simple_append") .allowlist_function("rb_RSTRING_PTR") @@ -355,6 +354,7 @@ fn main() { .allowlist_function("rb_jit_multi_ractor_p") .allowlist_function("rb_jit_vm_lock_then_barrier") .allowlist_function("rb_jit_vm_unlock") + .allowlist_function("rb_jit_for_each_iseq") .allowlist_type("robject_offsets") // from vm_sync.h diff --git a/yjit/src/core.rs b/yjit/src/core.rs index d42726bcc7..f8c80c0c86 100644 --- a/yjit/src/core.rs +++ b/yjit/src/core.rs @@ -1818,7 +1818,7 @@ pub fn for_each_iseq(mut callback: F) { callback(iseq); } let mut data: &mut dyn FnMut(IseqPtr) = &mut callback; - unsafe { rb_yjit_for_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) }; + unsafe { rb_jit_for_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) }; } /// Iterate over all on-stack ISEQs diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index 4d52b675a0..429330168b 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -1185,7 +1185,6 @@ extern "C" { pub fn rb_full_cfunc_return(ec: *mut rb_execution_context_t, return_value: VALUE); pub fn rb_iseq_get_yjit_payload(iseq: *const rb_iseq_t) -> *mut ::std::os::raw::c_void; pub fn rb_iseq_set_yjit_payload(iseq: *const rb_iseq_t, payload: *mut ::std::os::raw::c_void); - pub fn rb_iseq_reset_jit_func(iseq: *const rb_iseq_t); pub fn rb_yjit_get_proc_ptr(procv: VALUE) -> *mut rb_proc_t; pub fn rb_get_symbol_id(namep: VALUE) -> ID; pub fn rb_get_def_bmethod_proc(def: *mut rb_method_definition_t) -> VALUE; @@ -1219,7 +1218,6 @@ extern "C" { pub fn rb_RSTRUCT_SET(st: VALUE, k: ::std::os::raw::c_int, v: VALUE); pub fn rb_ENCODING_GET(obj: VALUE) -> ::std::os::raw::c_int; pub fn rb_yjit_constcache_shareable(ice: *const iseq_inline_constant_cache_entry) -> bool; - pub fn rb_yjit_for_each_iseq(callback: rb_iseq_callback, data: *mut ::std::os::raw::c_void); pub fn rb_yjit_obj_written( old: VALUE, young: VALUE, @@ -1328,4 +1326,6 @@ extern "C" { file: *const ::std::os::raw::c_char, line: ::std::os::raw::c_int, ); + pub fn rb_iseq_reset_jit_func(iseq: *const rb_iseq_t); + pub fn rb_jit_for_each_iseq(callback: rb_iseq_callback, data: *mut ::std::os::raw::c_void); } diff --git a/zjit.h b/zjit.h index adf47046f8..45e91fa43c 100644 --- a/zjit.h +++ b/zjit.h @@ -23,6 +23,7 @@ void rb_zjit_constant_state_changed(ID id); void rb_zjit_iseq_mark(void *payload); void rb_zjit_iseq_update_references(void *payload); void rb_zjit_before_ractor_spawn(void); +void rb_zjit_tracing_invalidate_all(void); #else #define rb_zjit_enabled_p false static inline void rb_zjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception) {} @@ -33,6 +34,7 @@ static inline void rb_zjit_cme_invalidate(const rb_callable_method_entry_t *cme) static inline void rb_zjit_invalidate_ep_is_bp(const rb_iseq_t *iseq) {} static inline void rb_zjit_constant_state_changed(ID id) {} static inline void rb_zjit_before_ractor_spawn(void) {} +static inline void rb_zjit_tracing_invalidate_all(void) {} #endif // #if USE_ZJIT #endif // #ifndef ZJIT_H diff --git a/zjit/bindgen/src/main.rs b/zjit/bindgen/src/main.rs index e57d0ae015..c608d482e2 100644 --- a/zjit/bindgen/src/main.rs +++ b/zjit/bindgen/src/main.rs @@ -349,7 +349,6 @@ fn main() { .allowlist_function("rb_full_cfunc_return") .allowlist_function("rb_assert_(iseq|cme)_handle") .allowlist_function("rb_IMEMO_TYPE_P") - .allowlist_function("rb_iseq_reset_jit_func") .allowlist_function("rb_RSTRING_PTR") .allowlist_function("rb_RSTRING_LEN") .allowlist_function("rb_ENCODING_GET") @@ -368,6 +367,8 @@ fn main() { .allowlist_function("rb_jit_multi_ractor_p") .allowlist_function("rb_jit_vm_lock_then_barrier") .allowlist_function("rb_jit_vm_unlock") + .allowlist_function("rb_jit_for_each_iseq") + .allowlist_function("rb_iseq_reset_jit_func") .allowlist_type("robject_offsets") // from vm_sync.h diff --git a/zjit/src/cruby.rs b/zjit/src/cruby.rs index 899ed4d892..b82edc6633 100644 --- a/zjit/src/cruby.rs +++ b/zjit/src/cruby.rs @@ -88,7 +88,7 @@ #![allow(unused_imports)] use std::convert::From; -use std::ffi::{CString, CStr}; +use std::ffi::{c_void, CString, CStr}; use std::fmt::{Debug, Formatter}; use std::os::raw::{c_char, c_int, c_uint}; use std::panic::{catch_unwind, UnwindSafe}; @@ -293,6 +293,17 @@ pub fn iseq_opcode_at_idx(iseq: IseqPtr, insn_idx: u32) -> u32 { unsafe { rb_iseq_opcode_at_pc(iseq, pc) as u32 } } +/// Iterate over all existing ISEQs +pub fn for_each_iseq(mut callback: F) { + unsafe extern "C" fn callback_wrapper(iseq: IseqPtr, data: *mut c_void) { + // SAFETY: points to the local below + let callback: &mut &mut dyn FnMut(IseqPtr) -> bool = unsafe { std::mem::transmute(&mut *data) }; + callback(iseq); + } + let mut data: &mut dyn FnMut(IseqPtr) = &mut callback; + unsafe { rb_jit_for_each_iseq(Some(callback_wrapper), (&mut data) as *mut _ as *mut c_void) }; +} + /// Return a poison value to be set above the stack top to verify leafness. #[cfg(not(test))] pub fn vm_stack_canary() -> u64 { diff --git a/zjit/src/cruby_bindings.inc.rs b/zjit/src/cruby_bindings.inc.rs index 88b9097697..9572756688 100644 --- a/zjit/src/cruby_bindings.inc.rs +++ b/zjit/src/cruby_bindings.inc.rs @@ -1029,4 +1029,6 @@ unsafe extern "C" { file: *const ::std::os::raw::c_char, line: ::std::os::raw::c_int, ); + pub fn rb_iseq_reset_jit_func(iseq: *const rb_iseq_t); + pub fn rb_jit_for_each_iseq(callback: rb_iseq_callback, data: *mut ::std::os::raw::c_void); } diff --git a/zjit/src/invariants.rs b/zjit/src/invariants.rs index 14fea76d1b..9935336bc0 100644 --- a/zjit/src/invariants.rs +++ b/zjit/src/invariants.rs @@ -269,3 +269,25 @@ pub extern "C" fn rb_zjit_before_ractor_spawn() { cb.mark_all_executable(); }); } + +#[unsafe(no_mangle)] +pub extern "C" fn rb_zjit_tracing_invalidate_all() { + use crate::gc::{get_or_create_iseq_payload, IseqStatus}; + use crate::cruby::{for_each_iseq, rb_iseq_reset_jit_func}; + + if !zjit_enabled_p() { + return; + } + + // Stop other ractors since we are going to patch machine code. + with_vm_lock(src_loc!(), || { + debug!("Invalidating all ZJIT compiled code due to TracePoint"); + + for_each_iseq(|iseq| { + let payload = get_or_create_iseq_payload(iseq); + + payload.status = IseqStatus::NotCompiled; + unsafe { rb_iseq_reset_jit_func(iseq) }; + }); + }); +}