#include "internal.h" #include "internal/sanitizers.h" #include "internal/string.h" #include "internal/hash.h" #include "internal/variable.h" #include "internal/compile.h" #include "internal/class.h" #include "internal/fixnum.h" #include "internal/numeric.h" #include "internal/gc.h" #include "internal/vm.h" #include "vm_core.h" #include "vm_callinfo.h" #include "builtin.h" #include "insns.inc" #include "insns_info.inc" #include "vm_sync.h" #include "yjit.h" #include "vm_insnhelper.h" #include "probes.h" #include "probes_helper.h" #include "iseq.h" #include "ruby/debug.h" #include "internal/cont.h" // For mmapp(), sysconf() #ifndef _WIN32 #include #include #endif #include // Address space reservation. Memory pages are mapped on an as needed basis. // See the Rust mm module for details. uint8_t * rb_zjit_reserve_addr_space(uint32_t mem_size) { #ifndef _WIN32 uint8_t *mem_block; // On Linux #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE) uint32_t const page_size = (uint32_t)sysconf(_SC_PAGESIZE); uint8_t *const cfunc_sample_addr = (void *)(uintptr_t)&rb_zjit_reserve_addr_space; uint8_t *const probe_region_end = cfunc_sample_addr + INT32_MAX; // Align the requested address to page size uint8_t *req_addr = align_ptr(cfunc_sample_addr, page_size); // Probe for addresses close to this function using MAP_FIXED_NOREPLACE // to improve odds of being in range for 32-bit relative call instructions. do { mem_block = mmap( req_addr, mem_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0 ); // If we succeeded, stop if (mem_block != MAP_FAILED) { ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_zjit_reserve_addr_space"); break; } // -4MiB. Downwards to probe away from the heap. (On x86/A64 Linux // main_code_addr < heap_addr, and in case we are in a shared // library mapped higher than the heap, downwards is still better // since it's towards the end of the heap rather than the stack.) req_addr -= 4 * 1024 * 1024; } while (req_addr < probe_region_end); // On MacOS and other platforms #else // Try to map a chunk of memory as executable mem_block = mmap( (void *)rb_zjit_reserve_addr_space, mem_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); #endif // Fallback if (mem_block == MAP_FAILED) { // Try again without the address hint (e.g., valgrind) mem_block = mmap( NULL, mem_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ); if (mem_block != MAP_FAILED) { ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_zjit_reserve_addr_space:fallback"); } } // Check that the memory mapping was successful if (mem_block == MAP_FAILED) { perror("ruby: zjit: mmap:"); if(errno == ENOMEM) { // No crash report if it's only insufficient memory exit(EXIT_FAILURE); } rb_bug("mmap failed"); } return mem_block; #else // Windows not supported for now return NULL; #endif } void rb_zjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception) { RB_VM_LOCK_ENTER(); rb_vm_barrier(); // Compile a block version starting at the current instruction uint8_t *rb_zjit_iseq_gen_entry_point(const rb_iseq_t *iseq, rb_execution_context_t *ec); // defined in Rust uintptr_t code_ptr = (uintptr_t)rb_zjit_iseq_gen_entry_point(iseq, ec); // TODO: support jit_exception iseq->body->jit_entry = (rb_jit_func_t)code_ptr; RB_VM_LOCK_LEAVE(); } unsigned int rb_iseq_encoded_size(const rb_iseq_t *iseq) { return iseq->body->iseq_size; } // Get the opcode given a program counter. Can return trace opcode variants. int rb_iseq_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc) { // YJIT should only use iseqs after AST to bytecode compilation RUBY_ASSERT_ALWAYS(FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED)); const VALUE at_pc = *pc; return rb_vm_insn_addr2opcode((const void *)at_pc); } // Get the PC for a given index in an iseq VALUE * rb_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx) { RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq)); RUBY_ASSERT_ALWAYS(insn_idx < iseq->body->iseq_size); VALUE *encoded = iseq->body->iseq_encoded; VALUE *pc = &encoded[insn_idx]; return pc; }