Register allocator and deoptimizer for tracing JIT.

This commit is contained in:
Dmitry Stogov 2020-04-20 16:02:03 +03:00
parent d757be640f
commit b0b43e86ae
4 changed files with 850 additions and 43 deletions

View file

@ -1370,7 +1370,7 @@ static uint32_t zend_interval_intersection(zend_lifetime_interval *ival1, zend_l
/* See "Optimized Interval Splitting in a Linear Scan Register Allocator",
Christian Wimmer VEE'05 (2005), Figure 4. Allocation without spilling */
static int zend_jit_try_allocate_free_reg(const zend_op_array *op_array, zend_ssa *ssa, zend_lifetime_interval *current, zend_regset available, zend_regset *hints, zend_lifetime_interval *active, zend_lifetime_interval *inactive, zend_lifetime_interval **list, zend_lifetime_interval **free)
static int zend_jit_try_allocate_free_reg(const zend_op_array *op_array, const zend_op **ssa_opcodes, zend_ssa *ssa, zend_lifetime_interval *current, zend_regset available, zend_regset *hints, zend_lifetime_interval *active, zend_lifetime_interval *inactive, zend_lifetime_interval **list, zend_lifetime_interval **free)
{
zend_lifetime_interval *it;
uint32_t freeUntilPos[ZREG_NUM];
@ -1403,7 +1403,9 @@ static int zend_jit_try_allocate_free_reg(const zend_op_array *op_array, zend_ss
while (it) {
if (current->range.start != zend_interval_end(it)) {
freeUntilPos[it->reg] = 0;
} else if (zend_jit_may_reuse_reg(op_array->opcodes + current->range.start, ssa->ops + current->range.start, ssa, current->ssa_var, it->ssa_var)) {
} else if (zend_jit_may_reuse_reg(
ssa_opcodes ? ssa_opcodes[current->range.start] : op_array->opcodes + current->range.start,
ssa->ops + current->range.start, ssa, current->ssa_var, it->ssa_var)) {
if (!ZEND_REGSET_IN(*hints, it->reg) &&
/* TODO: Avoid most often scratch registers. Find a better way ??? */
(!current->used_as_hint ||
@ -1468,7 +1470,7 @@ static int zend_jit_try_allocate_free_reg(const zend_op_array *op_array, zend_ss
}
while (line <= range->end) {
regset = zend_jit_get_scratch_regset(
op_array->opcodes + line,
ssa_opcodes ? ssa_opcodes[line] : op_array->opcodes + line,
ssa->ops + line,
op_array, ssa, current->ssa_var, line == last_use_line);
ZEND_REGSET_FOREACH(regset, reg) {
@ -1600,7 +1602,7 @@ static int zend_jit_allocate_blocked_reg(void)
/* See "Optimized Interval Splitting in a Linear Scan Register Allocator",
Christian Wimmer VEE'10 (2005), Figure 2. */
static zend_lifetime_interval* zend_jit_linear_scan(const zend_op_array *op_array, zend_ssa *ssa, zend_lifetime_interval *list)
static zend_lifetime_interval* zend_jit_linear_scan(const zend_op_array *op_array, const zend_op **ssa_opcodes, zend_ssa *ssa, zend_lifetime_interval *list)
{
zend_lifetime_interval *unhandled, *active, *inactive, *handled, *free;
zend_lifetime_interval *current, **p, *q;
@ -1659,7 +1661,7 @@ static zend_lifetime_interval* zend_jit_linear_scan(const zend_op_array *op_arra
}
}
if (zend_jit_try_allocate_free_reg(op_array, ssa, current, available, &hints, active, inactive, &unhandled, &free) ||
if (zend_jit_try_allocate_free_reg(op_array, ssa_opcodes, ssa, current, available, &hints, active, inactive, &unhandled, &free) ||
zend_jit_allocate_blocked_reg()) {
ZEND_REGSET_EXCL(available, current->reg);
current->list_next = active;
@ -1792,7 +1794,7 @@ static zend_lifetime_interval** zend_jit_allocate_registers(const zend_op_array
}
/* Linear Scan Register Allocation */
list = zend_jit_linear_scan(op_array, ssa, list);
list = zend_jit_linear_scan(op_array, NULL, ssa, list);
if (list) {
intervals = zend_arena_calloc(&CG(arena), ssa->vars_count, sizeof(zend_lifetime_interval*));
@ -2132,7 +2134,7 @@ static int zend_jit(const zend_op_array *op_array, zend_ssa *ssa, const zend_op
}
}
if (ssa->cfg.blocks[b].flags & ZEND_BB_LOOP_HEADER) {
if (!zend_jit_check_timeout(&dasm_state, op_array->opcodes + ssa->cfg.blocks[b].start)) {
if (!zend_jit_check_timeout(&dasm_state, op_array->opcodes + ssa->cfg.blocks[b].start, NULL)) {
goto jit_failure;
}
}

View file

@ -303,6 +303,7 @@ typedef struct _zend_jit_trace_start_rec {
typedef struct _zend_jit_trace_exit_info {
const zend_op *opline; /* opline where VM should continue execution */
const zend_op_array *op_array;
uint32_t stack_size;
uint32_t stack_offset;
} zend_jit_trace_exit_info;
@ -430,6 +431,7 @@ struct _zend_jit_trace_stack_frame {
} while (0)
typedef struct _zend_jit_globals {
zend_jit_trace_rec *current_trace;
zend_jit_trace_stack_frame *current_frame;
const zend_op *bad_root_cache_opline[ZEND_JIT_TRACE_BAD_ROOT_SLOTS];

File diff suppressed because it is too large Load diff

View file

@ -2382,6 +2382,11 @@ static int zend_jit_trace_exit_stub(dasm_State **Dst)
|.else
| add r4, 8*4+8*8 /* CPU regs + SSE regs */
|.endif
| // check for interrupt (try to avoid this ???)
| MEM_OP2_1_ZTS cmp, byte, executor_globals, vm_interrupt, 0, r0
| jne ->interrupt_handler
| // execute_data = EG(current_execute_data)
| MEM_OP2_2_ZTS mov, FP, aword, executor_globals, current_execute_data, r0
| test eax, eax
@ -2720,7 +2725,7 @@ static int zend_jit_set_valid_ip(dasm_State **Dst, const zend_op *opline)
return 1;
}
static int zend_jit_check_timeout(dasm_State **Dst, const zend_op *opline)
static int zend_jit_check_timeout(dasm_State **Dst, const zend_op *opline, const void *exit_addr)
{
#if 0
if (!zend_jit_set_valid_ip(Dst, opline)) {
@ -2731,13 +2736,21 @@ static int zend_jit_check_timeout(dasm_State **Dst, const zend_op *opline)
#else
| MEM_OP2_1_ZTS cmp, byte, executor_globals, vm_interrupt, 0, r0
if (last_valid_opline == opline) {
if (exit_addr) {
| jne &exit_addr
} else {
| jne ->interrupt_handler
}
} else {
| jne >1
|.cold_code
|1:
| LOAD_IP_ADDR opline
if (exit_addr) {
| jmp &exit_addr
} else {
| jmp ->interrupt_handler
}
|.code
}
#endif
@ -8472,7 +8485,7 @@ static int zend_jit_do_fcall(dasm_State **Dst, const zend_op *opline, const zend
|.code
// TODO: Can we avoid checking for interrupts after each call ???
if (!zend_jit_check_timeout(Dst, opline + 1)) {
if (!zend_jit_check_timeout(Dst, opline + 1, NULL)) {
return 0;
}
if (opline->opcode != ZEND_DO_ICALL) {
@ -9448,13 +9461,17 @@ static int zend_jit_leave_func(dasm_State **Dst, const zend_op *opline, const ze
&& trace->op == ZEND_JIT_TRACE_BACK
&& trace->recursive) {
const zend_op *next_opline = trace->opline;
uint32_t exit_point = zend_jit_trace_get_exit_point(opline, NULL, trace);
const void *exit_addr = zend_jit_trace_get_exit_addr(exit_point);
uint32_t exit_point;
const void *exit_addr;
zend_jit_trace_stack_frame *current_frame;
trace++;
ZEND_ASSERT(trace->op == ZEND_JIT_TRACE_VM || trace->op == ZEND_JIT_TRACE_END);
next_opline = trace->opline;
current_frame = JIT_G(current_frame);
JIT_G(current_frame) = NULL;
exit_point = zend_jit_trace_get_exit_point(opline, NULL, trace);
JIT_G(current_frame) = current_frame;
exit_addr = zend_jit_trace_get_exit_addr(exit_point);
if (!exit_addr) {
return 0;
@ -11385,10 +11402,23 @@ static zend_regset zend_jit_get_scratch_regset(const zend_op *opline, const zend
break;
}
if (zend_jit_trigger == ZEND_JIT_ON_HOT_TRACE) {
if (ssa_op == ssa->ops
&& JIT_G(current_trace)[ZEND_JIT_TRACE_START_REC_SIZE].op == ZEND_JIT_TRACE_INIT_CALL
&& JIT_G(current_trace)[ZEND_JIT_TRACE_START_REC_SIZE].fake) {
ZEND_REGSET_INCL(regset, ZREG_R0);
ZEND_REGSET_INCL(regset, ZREG_R1);
}
}
#if ZTS
/* %r0 is used to check EG(vm_interrupt) */
if (zend_jit_trigger == ZEND_JIT_ON_HOT_TRACE) {
// TODO: loop detection ???
if (ssa_op == ssa->ops
&& (JIT_G(current_trace)->stop == ZEND_JIT_TRACE_STOP_LOOP
|| JIT_G(current_trace)->stop == ZEND_JIT_TRACE_STOP_RECURSIVE_CALL)) {
ZEND_REGSET_INCL(regset, ZREG_R0);
}
} else {
uint32_t b = ssa->cfg.map[ssa_op - ssa->ops];