mirror of
https://github.com/ruby/ruby.git
synced 2025-09-15 16:44:01 +02:00
YJIT: Fancier opt_getinlinecache
Make sure `opt_getinlinecache` is in a block all on its own, and invalidate it from the interpreter when `opt_setinlinecache`. It will recompile with a filled cache the second time around. This lets YJIT runs well when the IC for constant is cold.
This commit is contained in:
parent
e81d1f4ae3
commit
b626dd7211
9 changed files with 146 additions and 73 deletions
34
compile.c
34
compile.c
|
@ -2259,6 +2259,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
|||
VALUE *generated_iseq;
|
||||
rb_event_flag_t events = 0;
|
||||
long data = 0;
|
||||
long getinlinecache_idx = -1;
|
||||
|
||||
int insn_num, code_index, insns_info_index, sp = 0;
|
||||
int stack_max = fix_sp_depth(iseq, anchor);
|
||||
|
@ -2362,6 +2363,11 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
|||
types = insn_op_types(insn);
|
||||
len = insn_len(insn);
|
||||
|
||||
if (insn == BIN(opt_getinlinecache)) {
|
||||
assert(getinlinecache_idx < 0 && "one get per set, no nesting");
|
||||
getinlinecache_idx = code_index;
|
||||
}
|
||||
|
||||
for (j = 0; types[j]; j++) {
|
||||
char type = types[j];
|
||||
/* printf("--> [%c - (%d-%d)]\n", type, k, j); */
|
||||
|
@ -2419,6 +2425,13 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
|||
}
|
||||
generated_iseq[code_index + 1 + j] = (VALUE)ic;
|
||||
FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
|
||||
|
||||
if (insn == BIN(opt_setinlinecache) && type == TS_IC) {
|
||||
assert(getinlinecache_idx >= 0);
|
||||
// Store index to the matching opt_getinlinecache on the IC for YJIT
|
||||
ic->get_insn_idx = (unsigned)getinlinecache_idx;
|
||||
getinlinecache_idx = -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TS_CALLDATA:
|
||||
|
@ -11107,6 +11120,7 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
|
|||
unsigned int code_index;
|
||||
ibf_offset_t reading_pos = bytecode_offset;
|
||||
VALUE *code = ALLOC_N(VALUE, iseq_size);
|
||||
long getinlinecache_idx = -1;
|
||||
|
||||
struct rb_iseq_constant_body *load_body = iseq->body;
|
||||
struct rb_call_data *cd_entries = load_body->call_data;
|
||||
|
@ -11114,13 +11128,22 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
|
|||
|
||||
for (code_index=0; code_index<iseq_size;) {
|
||||
/* opcode */
|
||||
const VALUE insn = code[code_index++] = ibf_load_small_value(load, &reading_pos);
|
||||
const VALUE insn = code[code_index] = ibf_load_small_value(load, &reading_pos);
|
||||
const char *types = insn_op_types(insn);
|
||||
int op_index;
|
||||
|
||||
if (insn == BIN(opt_getinlinecache)) {
|
||||
assert(getinlinecache_idx < 0 && "one get per set, no nesting");
|
||||
getinlinecache_idx = code_index;
|
||||
}
|
||||
|
||||
code_index++;
|
||||
|
||||
/* operands */
|
||||
for (op_index=0; types[op_index]; op_index++, code_index++) {
|
||||
switch (types[op_index]) {
|
||||
char type = types[op_index];
|
||||
switch (type) {
|
||||
case TS_CDHASH:
|
||||
case TS_VALUE:
|
||||
{
|
||||
VALUE op = ibf_load_small_value(load, &reading_pos);
|
||||
|
@ -11168,6 +11191,13 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
|
|||
{
|
||||
VALUE op = ibf_load_small_value(load, &reading_pos);
|
||||
code[code_index] = (VALUE)&is_entries[op];
|
||||
|
||||
if (insn == BIN(opt_setinlinecache) && type == TS_IC) {
|
||||
assert(getinlinecache_idx >= 0);
|
||||
// Store index to the matching opt_getinlinecache on the IC for YJIT
|
||||
is_entries[op].ic_cache.get_insn_idx = (unsigned)getinlinecache_idx;
|
||||
getinlinecache_idx = -1;
|
||||
}
|
||||
}
|
||||
FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
|
||||
break;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue