mirror of
https://github.com/php/php-src.git
synced 2025-08-15 21:48:51 +02:00
Merge branch 'PHP-8.4'
* PHP-8.4: Update IR
This commit is contained in:
commit
4c84ed4d98
3 changed files with 112 additions and 69 deletions
|
@ -1386,6 +1386,11 @@ bool ir_use_list_add(ir_ctx *ctx, ir_ref to, ir_ref ref)
|
||||||
if (old_size < new_size) {
|
if (old_size < new_size) {
|
||||||
/* Reallocate the whole edges buffer (this is inefficient) */
|
/* Reallocate the whole edges buffer (this is inefficient) */
|
||||||
ctx->use_edges = ir_mem_realloc(ctx->use_edges, new_size);
|
ctx->use_edges = ir_mem_realloc(ctx->use_edges, new_size);
|
||||||
|
} else if (n == ctx->use_edges_count) {
|
||||||
|
ctx->use_edges[n] = ref;
|
||||||
|
use_list->count++;
|
||||||
|
ctx->use_edges_count++;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
memcpy(ctx->use_edges + ctx->use_edges_count, ctx->use_edges + use_list->refs, use_list->count * sizeof(ir_ref));
|
memcpy(ctx->use_edges + ctx->use_edges_count, ctx->use_edges + use_list->refs, use_list->count * sizeof(ir_ref));
|
||||||
use_list->refs = ctx->use_edges_count;
|
use_list->refs = ctx->use_edges_count;
|
||||||
|
@ -1416,20 +1421,39 @@ void ir_use_list_sort(ir_ctx *ctx, ir_ref ref)
|
||||||
|
|
||||||
void ir_replace(ir_ctx *ctx, ir_ref ref, ir_ref new_ref)
|
void ir_replace(ir_ctx *ctx, ir_ref ref, ir_ref new_ref)
|
||||||
{
|
{
|
||||||
int i, j, n, use;
|
int i, j, n, *p, use;
|
||||||
ir_insn *insn;
|
ir_insn *insn;
|
||||||
|
ir_use_list *use_list;
|
||||||
|
|
||||||
IR_ASSERT(ref != new_ref);
|
IR_ASSERT(ref != new_ref);
|
||||||
n = ctx->use_lists[ref].count;
|
use_list = &ctx->use_lists[ref];
|
||||||
for (i = 0; i < n; i++) {
|
n = use_list->count;
|
||||||
use = ctx->use_edges[ctx->use_lists[ref].refs + i];
|
p = ctx->use_edges + use_list->refs;
|
||||||
|
|
||||||
|
if (new_ref < 0) {
|
||||||
|
/* constant or IR_UNUSED */
|
||||||
|
for (; n; p++, n--) {
|
||||||
|
use = *p;
|
||||||
IR_ASSERT(use != ref);
|
IR_ASSERT(use != ref);
|
||||||
insn = &ctx->ir_base[use];
|
insn = &ctx->ir_base[use];
|
||||||
j = ir_insn_find_op(insn, ref);
|
j = ir_insn_find_op(insn, ref);
|
||||||
IR_ASSERT(j > 0);
|
IR_ASSERT(j > 0);
|
||||||
ir_insn_set_op(insn, j, new_ref);
|
ir_insn_set_op(insn, j, new_ref);
|
||||||
if (!IR_IS_CONST_REF(new_ref)) {
|
}
|
||||||
ir_use_list_add(ctx, new_ref, use);
|
} else {
|
||||||
|
for (i = 0; i < n; p++, i++) {
|
||||||
|
use = *p;
|
||||||
|
IR_ASSERT(use != ref);
|
||||||
|
insn = &ctx->ir_base[use];
|
||||||
|
j = ir_insn_find_op(insn, ref);
|
||||||
|
IR_ASSERT(j > 0);
|
||||||
|
ir_insn_set_op(insn, j, new_ref);
|
||||||
|
if (ir_use_list_add(ctx, new_ref, use)) {
|
||||||
|
/* restore after reallocation */
|
||||||
|
use_list = &ctx->use_lists[ref];
|
||||||
|
n = use_list->count;
|
||||||
|
p = &ctx->use_edges[use_list->refs + i];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -323,13 +323,14 @@ IR_FOLD(ADD(C_I16, C_I16))
|
||||||
IR_FOLD(ADD(C_I32, C_I32))
|
IR_FOLD(ADD(C_I32, C_I32))
|
||||||
{
|
{
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type || (sizeof(void*) == 4 && IR_OPT_TYPE(opt) == IR_ADDR));
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type || (sizeof(void*) == 4 && IR_OPT_TYPE(opt) == IR_ADDR));
|
||||||
IR_FOLD_CONST_I(op1_insn->val.i32 + op2_insn->val.i32);
|
/* Here and below we use "unsigned math" to prevent undefined signed overflow behavior */
|
||||||
|
IR_FOLD_CONST_I((int32_t)(op1_insn->val.u32 + op2_insn->val.u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
IR_FOLD(ADD(C_I64, C_I64))
|
IR_FOLD(ADD(C_I64, C_I64))
|
||||||
{
|
{
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type || (sizeof(void*) == 8 && IR_OPT_TYPE(opt) == IR_ADDR));
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type || (sizeof(void*) == 8 && IR_OPT_TYPE(opt) == IR_ADDR));
|
||||||
IR_FOLD_CONST_I(op1_insn->val.i64 + op2_insn->val.i64);
|
IR_FOLD_CONST_I(op1_insn->val.u64 + op2_insn->val.u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
IR_FOLD(ADD(C_DOUBLE, C_DOUBLE))
|
IR_FOLD(ADD(C_DOUBLE, C_DOUBLE))
|
||||||
|
@ -393,13 +394,13 @@ IR_FOLD(SUB(C_I16, C_I16))
|
||||||
IR_FOLD(SUB(C_I32, C_I32))
|
IR_FOLD(SUB(C_I32, C_I32))
|
||||||
{
|
{
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
||||||
IR_FOLD_CONST_I(op1_insn->val.i32 - op2_insn->val.i32);
|
IR_FOLD_CONST_I((int32_t)(op1_insn->val.u32 - op2_insn->val.u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
IR_FOLD(SUB(C_I64, C_I64))
|
IR_FOLD(SUB(C_I64, C_I64))
|
||||||
{
|
{
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
||||||
IR_FOLD_CONST_I(op1_insn->val.i64 - op2_insn->val.i64);
|
IR_FOLD_CONST_I(op1_insn->val.u64 - op2_insn->val.u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
IR_FOLD(SUB(C_DOUBLE, C_DOUBLE))
|
IR_FOLD(SUB(C_DOUBLE, C_DOUBLE))
|
||||||
|
@ -463,13 +464,13 @@ IR_FOLD(MUL(C_I16, C_I16))
|
||||||
IR_FOLD(MUL(C_I32, C_I32))
|
IR_FOLD(MUL(C_I32, C_I32))
|
||||||
{
|
{
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
||||||
IR_FOLD_CONST_I(op1_insn->val.i32 * op2_insn->val.i32);
|
IR_FOLD_CONST_I((int32_t)(op1_insn->val.u32 * op2_insn->val.u32));
|
||||||
}
|
}
|
||||||
|
|
||||||
IR_FOLD(MUL(C_I64, C_I64))
|
IR_FOLD(MUL(C_I64, C_I64))
|
||||||
{
|
{
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
||||||
IR_FOLD_CONST_I(op1_insn->val.i64 * op2_insn->val.i64);
|
IR_FOLD_CONST_I(op1_insn->val.u64 * op2_insn->val.u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
IR_FOLD(MUL(C_DOUBLE, C_DOUBLE))
|
IR_FOLD(MUL(C_DOUBLE, C_DOUBLE))
|
||||||
|
@ -556,7 +557,7 @@ IR_FOLD(NEG(C_I32))
|
||||||
IR_FOLD(NEG(C_I64))
|
IR_FOLD(NEG(C_I64))
|
||||||
{
|
{
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
||||||
IR_FOLD_CONST_I(-op1_insn->val.i64);
|
IR_FOLD_CONST_I(-op1_insn->val.u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
IR_FOLD(NEG(C_DOUBLE))
|
IR_FOLD(NEG(C_DOUBLE))
|
||||||
|
@ -580,7 +581,7 @@ IR_FOLD(ABS(C_I64))
|
||||||
if (op1_insn->val.i64 >= 0) {
|
if (op1_insn->val.i64 >= 0) {
|
||||||
IR_FOLD_COPY(op1);
|
IR_FOLD_COPY(op1);
|
||||||
} else {
|
} else {
|
||||||
IR_FOLD_CONST_I(-op1_insn->val.i64);
|
IR_FOLD_CONST_I(-op1_insn->val.u64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -680,7 +681,7 @@ IR_FOLD(MUL_OV(C_I64, C_I64))
|
||||||
int64_t min = - max - 1;
|
int64_t min = - max - 1;
|
||||||
int64_t res;
|
int64_t res;
|
||||||
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
|
||||||
res = op1_insn->val.i64 * op2_insn->val.i64;
|
res = op1_insn->val.u64 * op2_insn->val.u64;
|
||||||
if (op1_insn->val.i64 != 0 && res / op1_insn->val.i64 != op2_insn->val.i64 && res >= min && res <= max) {
|
if (op1_insn->val.i64 != 0 && res / op1_insn->val.i64 != op2_insn->val.i64 && res >= min && res <= max) {
|
||||||
IR_FOLD_NEXT;
|
IR_FOLD_NEXT;
|
||||||
}
|
}
|
||||||
|
@ -2518,7 +2519,7 @@ IR_FOLD(ADD(ADD, C_I64))
|
||||||
{
|
{
|
||||||
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
||||||
/* (x + c1) + c2 => x + (c1 + c2) */
|
/* (x + c1) + c2 => x + (c1 + c2) */
|
||||||
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 + op2_insn->val.i64;
|
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64;
|
||||||
op1 = op1_insn->op1;
|
op1 = op1_insn->op1;
|
||||||
op2 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
op2 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
||||||
IR_FOLD_RESTART;
|
IR_FOLD_RESTART;
|
||||||
|
@ -2556,8 +2557,8 @@ IR_FOLD(ADD(SUB, C_I64))
|
||||||
{
|
{
|
||||||
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
||||||
/* (x - c1) + c2 => x + (c2 - c1) */
|
/* (x - c1) + c2 => x + (c2 - c1) */
|
||||||
val.i64 = op2_insn->val.i64 - ctx->ir_base[op1_insn->op2].val.i64;
|
val.i64 = op2_insn->val.u64 - ctx->ir_base[op1_insn->op2].val.u64;
|
||||||
if (val.i64 < 0 && val.i64 - 1 < 0) {
|
if (val.i64 < 0 && val.i64 != INT64_MIN) {
|
||||||
val.i64 = -val.i64;
|
val.i64 = -val.i64;
|
||||||
opt++; /* ADD -> SUB */
|
opt++; /* ADD -> SUB */
|
||||||
}
|
}
|
||||||
|
@ -2566,7 +2567,7 @@ IR_FOLD(ADD(SUB, C_I64))
|
||||||
IR_FOLD_RESTART;
|
IR_FOLD_RESTART;
|
||||||
} else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) {
|
} else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) {
|
||||||
/* (c1 - x) + c2 => (c1 + c2) - x */
|
/* (c1 - x) + c2 => (c1 + c2) - x */
|
||||||
val.i64 = ctx->ir_base[op1_insn->op1].val.i64 + op2_insn->val.i64;
|
val.i64 = ctx->ir_base[op1_insn->op1].val.u64 + op2_insn->val.u64;
|
||||||
opt++; /* ADD -> SUB */
|
opt++; /* ADD -> SUB */
|
||||||
op2 = op1_insn->op2;
|
op2 = op1_insn->op2;
|
||||||
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
||||||
|
@ -2599,8 +2600,8 @@ IR_FOLD(SUB(ADD, C_I64))
|
||||||
{
|
{
|
||||||
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
||||||
/* (x + c1) - c2 => x + (c1 - c2) */
|
/* (x + c1) - c2 => x + (c1 - c2) */
|
||||||
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 - op2_insn->val.i64;
|
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 - op2_insn->val.u64;
|
||||||
if (val.i64 < 0 && val.i64 - 1 < 0) {
|
if (val.i64 < 0 && val.i64 != INT64_MIN) {
|
||||||
val.i64 = -val.i64;
|
val.i64 = -val.i64;
|
||||||
} else {
|
} else {
|
||||||
opt--; /* SUB -> ADD */
|
opt--; /* SUB -> ADD */
|
||||||
|
@ -2635,7 +2636,7 @@ IR_FOLD(SUB(C_I64, ADD))
|
||||||
{
|
{
|
||||||
if (IR_IS_CONST_REF(op2_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op2_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op2].op)) {
|
||||||
/* c1 - (x + c2) => (c1 - c2) - x */
|
/* c1 - (x + c2) => (c1 - c2) - x */
|
||||||
val.i64 = op1_insn->val.i64 - ctx->ir_base[op2_insn->op2].val.i64;
|
val.i64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op2].val.u64;
|
||||||
op2 = op2_insn->op1;
|
op2 = op2_insn->op1;
|
||||||
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
||||||
IR_FOLD_RESTART;
|
IR_FOLD_RESTART;
|
||||||
|
@ -2652,7 +2653,7 @@ IR_FOLD(SUB(SUB, C_ADDR))
|
||||||
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
||||||
/* (x - c1) - c2 => x - (c1 + c2) */
|
/* (x - c1) - c2 => x - (c1 + c2) */
|
||||||
val.u64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64;
|
val.u64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64;
|
||||||
if (val.i64 < 0 && val.i64 - 1 < 0) {
|
if (val.i64 < 0 && val.i64 != INT64_MIN) {
|
||||||
val.i64 = -val.i64;
|
val.i64 = -val.i64;
|
||||||
opt--; /* SUB -> ADD */
|
opt--; /* SUB -> ADD */
|
||||||
}
|
}
|
||||||
|
@ -2676,8 +2677,8 @@ IR_FOLD(SUB(SUB, C_I64))
|
||||||
{
|
{
|
||||||
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
||||||
/* (x - c1) - c2 => x - (c1 + c2) */
|
/* (x - c1) - c2 => x - (c1 + c2) */
|
||||||
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 + op2_insn->val.i64;
|
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64;
|
||||||
if (val.i64 < 0 && val.i64 - 1 < 0) {
|
if (val.i64 < 0 && val.i64 != INT64_MIN) {
|
||||||
val.i64 = -val.i64;
|
val.i64 = -val.i64;
|
||||||
opt--; /* SUB -> ADD */
|
opt--; /* SUB -> ADD */
|
||||||
}
|
}
|
||||||
|
@ -2686,7 +2687,7 @@ IR_FOLD(SUB(SUB, C_I64))
|
||||||
IR_FOLD_RESTART;
|
IR_FOLD_RESTART;
|
||||||
} else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) {
|
} else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) {
|
||||||
/* (c1 - x) - c2 => (c1 - c2) - x */
|
/* (c1 - x) - c2 => (c1 - c2) - x */
|
||||||
val.i64 = ctx->ir_base[op1_insn->op1].val.i64 - op2_insn->val.i64;
|
val.i64 = ctx->ir_base[op1_insn->op1].val.u64 - op2_insn->val.u64;
|
||||||
op2 = op1_insn->op2;
|
op2 = op1_insn->op2;
|
||||||
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
||||||
IR_FOLD_RESTART;
|
IR_FOLD_RESTART;
|
||||||
|
@ -2709,7 +2710,7 @@ IR_FOLD(SUB(C_ADDR, SUB))
|
||||||
} else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) {
|
} else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) {
|
||||||
/* c1 - (c2 - x) => x + (c1 - c2) */
|
/* c1 - (c2 - x) => x + (c1 - c2) */
|
||||||
val.u64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op1].val.u64;
|
val.u64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op1].val.u64;
|
||||||
if (val.i64 < 0 && val.i64 - 1 < 0) {
|
if (val.i64 < 0 && val.i64 != INT64_MIN) {
|
||||||
val.i64 = -val.i64;
|
val.i64 = -val.i64;
|
||||||
opt++; /* ADD -> SUB */
|
opt++; /* ADD -> SUB */
|
||||||
}
|
}
|
||||||
|
@ -2727,14 +2728,14 @@ IR_FOLD(SUB(C_I64, SUB))
|
||||||
{
|
{
|
||||||
if (IR_IS_CONST_REF(op2_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op2_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op2].op)) {
|
||||||
/* c1 - (x - c2) => (c1 + c2) - x */
|
/* c1 - (x - c2) => (c1 + c2) - x */
|
||||||
val.i64 = op1_insn->val.i64 + ctx->ir_base[op2_insn->op2].val.i64;
|
val.i64 = op1_insn->val.u64 + ctx->ir_base[op2_insn->op2].val.u64;
|
||||||
op2 = op2_insn->op1;
|
op2 = op2_insn->op1;
|
||||||
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
||||||
IR_FOLD_RESTART;
|
IR_FOLD_RESTART;
|
||||||
} else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) {
|
} else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) {
|
||||||
/* c1 - (c2 - x) => x + (c1 - c2) */
|
/* c1 - (c2 - x) => x + (c1 - c2) */
|
||||||
val.i64 = op1_insn->val.i64 - ctx->ir_base[op2_insn->op1].val.i64;
|
val.i64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op1].val.u64;
|
||||||
if (val.i64 < 0 && val.i64 - 1 < 0) {
|
if (val.i64 < 0 && val.i64 != INT64_MIN) {
|
||||||
val.i64 = -val.i64;
|
val.i64 = -val.i64;
|
||||||
opt++; /* ADD -> SUB */
|
opt++; /* ADD -> SUB */
|
||||||
}
|
}
|
||||||
|
@ -2768,7 +2769,7 @@ IR_FOLD(MUL(MUL, C_I64))
|
||||||
{
|
{
|
||||||
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
|
||||||
/* (x * c1) * c2 => x * (c1 * c2) */
|
/* (x * c1) * c2 => x * (c1 * c2) */
|
||||||
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 * op2_insn->val.i64;
|
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 * op2_insn->val.u64;
|
||||||
op1 = op1_insn->op1;
|
op1 = op1_insn->op1;
|
||||||
op2 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
op2 = ir_const(ctx, val, IR_OPT_TYPE(opt));
|
||||||
IR_FOLD_RESTART;
|
IR_FOLD_RESTART;
|
||||||
|
|
|
@ -385,7 +385,7 @@ static void ir_sccp_remove_insn2(ir_ctx *ctx, ir_ref ref, ir_bitqueue *worklist)
|
||||||
|
|
||||||
static void ir_sccp_replace_insn(ir_ctx *ctx, ir_insn *_values, ir_ref ref, ir_ref new_ref, ir_bitqueue *worklist)
|
static void ir_sccp_replace_insn(ir_ctx *ctx, ir_insn *_values, ir_ref ref, ir_ref new_ref, ir_bitqueue *worklist)
|
||||||
{
|
{
|
||||||
ir_ref j, n, *p, use, k, l;
|
ir_ref j, n, *p, use, i;
|
||||||
ir_insn *insn;
|
ir_insn *insn;
|
||||||
ir_use_list *use_list;
|
ir_use_list *use_list;
|
||||||
|
|
||||||
|
@ -409,40 +409,48 @@ static void ir_sccp_replace_insn(ir_ctx *ctx, ir_insn *_values, ir_ref ref, ir_r
|
||||||
|
|
||||||
use_list = &ctx->use_lists[ref];
|
use_list = &ctx->use_lists[ref];
|
||||||
n = use_list->count;
|
n = use_list->count;
|
||||||
for (j = 0, p = &ctx->use_edges[use_list->refs]; j < n; j++, p++) {
|
p = &ctx->use_edges[use_list->refs];
|
||||||
|
if (new_ref <= 0) {
|
||||||
|
/* constant or IR_UNUSED */
|
||||||
|
for (; n; p++, n--) {
|
||||||
use = *p;
|
use = *p;
|
||||||
if (IR_IS_FEASIBLE(use)) {
|
/* we may skip nodes that are going to be removed by SCCP (TOP, CONST and COPY) */
|
||||||
|
if (_values[use].op > IR_COPY) {
|
||||||
insn = &ctx->ir_base[use];
|
insn = &ctx->ir_base[use];
|
||||||
l = insn->inputs_count;
|
i = ir_insn_find_op(insn, ref);
|
||||||
for (k = 1; k <= l; k++) {
|
if (!i) continue;
|
||||||
if (ir_insn_op(insn, k) == ref) {
|
IR_ASSERT(i > 0);
|
||||||
ir_insn_set_op(insn, k, new_ref);
|
ir_insn_set_op(insn, i, new_ref);
|
||||||
|
/* schedule folding */
|
||||||
|
ir_bitqueue_add(worklist, use);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if IR_COMBO_COPY_PROPAGATION
|
} else {
|
||||||
if (new_ref > 0 && IR_IS_BOTTOM(use)) {
|
for (j = 0; j < n; j++, p++) {
|
||||||
|
use = *p;
|
||||||
|
/* we may skip nodes that are going to be removed by SCCP (TOP, CONST and COPY) */
|
||||||
|
if (_values[use].optx == IR_BOTTOM) {
|
||||||
|
insn = &ctx->ir_base[use];
|
||||||
|
i = ir_insn_find_op(insn, ref);
|
||||||
|
IR_ASSERT(i > 0);
|
||||||
|
ir_insn_set_op(insn, i, new_ref);
|
||||||
if (ir_use_list_add(ctx, new_ref, use)) {
|
if (ir_use_list_add(ctx, new_ref, use)) {
|
||||||
/* restore after reallocation */
|
/* restore after reallocation */
|
||||||
use_list = &ctx->use_lists[ref];
|
use_list = &ctx->use_lists[ref];
|
||||||
n = use_list->count;
|
n = use_list->count;
|
||||||
p = &ctx->use_edges[use_list->refs + j];
|
p = &ctx->use_edges[use_list->refs + j];
|
||||||
}
|
}
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/* we may skip nodes that are going to be removed by SCCP (TOP, CONST and COPY) */
|
|
||||||
if (worklist && _values[use].op > IR_COPY) {
|
|
||||||
/* schedule folding */
|
/* schedule folding */
|
||||||
ir_bitqueue_add(worklist, use);
|
ir_bitqueue_add(worklist, use);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CLEAR_USES(ref);
|
CLEAR_USES(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ir_sccp_replace_insn2(ir_ctx *ctx, ir_ref ref, ir_ref new_ref, ir_bitqueue *worklist)
|
static void ir_sccp_replace_insn2(ir_ctx *ctx, ir_ref ref, ir_ref new_ref, ir_bitqueue *worklist)
|
||||||
{
|
{
|
||||||
ir_ref j, n, *p, use, k, l;
|
ir_ref i, j, n, *p, use;
|
||||||
ir_insn *insn;
|
ir_insn *insn;
|
||||||
ir_use_list *use_list;
|
ir_use_list *use_list;
|
||||||
|
|
||||||
|
@ -468,29 +476,37 @@ static void ir_sccp_replace_insn2(ir_ctx *ctx, ir_ref ref, ir_ref new_ref, ir_bi
|
||||||
|
|
||||||
use_list = &ctx->use_lists[ref];
|
use_list = &ctx->use_lists[ref];
|
||||||
n = use_list->count;
|
n = use_list->count;
|
||||||
for (j = 0, p = &ctx->use_edges[use_list->refs]; j < n; j++, p++) {
|
p = &ctx->use_edges[use_list->refs];
|
||||||
|
if (new_ref <= 0) {
|
||||||
|
/* constant or IR_UNUSED */
|
||||||
|
for (; n; p++, n--) {
|
||||||
use = *p;
|
use = *p;
|
||||||
|
IR_ASSERT(use != ref);
|
||||||
insn = &ctx->ir_base[use];
|
insn = &ctx->ir_base[use];
|
||||||
l = insn->inputs_count;
|
i = ir_insn_find_op(insn, ref);
|
||||||
for (k = 1; k <= l; k++) {
|
IR_ASSERT(i > 0);
|
||||||
if (ir_insn_op(insn, k) == ref) {
|
ir_insn_set_op(insn, i, new_ref);
|
||||||
ir_insn_set_op(insn, k, new_ref);
|
/* schedule folding */
|
||||||
|
ir_bitqueue_add(worklist, use);
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
#if IR_COMBO_COPY_PROPAGATION
|
for (j = 0; j < n; j++, p++) {
|
||||||
if (new_ref > 0) {
|
use = *p;
|
||||||
|
IR_ASSERT(use != ref);
|
||||||
|
insn = &ctx->ir_base[use];
|
||||||
|
i = ir_insn_find_op(insn, ref);
|
||||||
|
IR_ASSERT(i > 0);
|
||||||
|
ir_insn_set_op(insn, i, new_ref);
|
||||||
if (ir_use_list_add(ctx, new_ref, use)) {
|
if (ir_use_list_add(ctx, new_ref, use)) {
|
||||||
/* restore after reallocation */
|
/* restore after reallocation */
|
||||||
use_list = &ctx->use_lists[ref];
|
use_list = &ctx->use_lists[ref];
|
||||||
n = use_list->count;
|
n = use_list->count;
|
||||||
p = &ctx->use_edges[use_list->refs + j];
|
p = &ctx->use_edges[use_list->refs + j];
|
||||||
}
|
}
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/* schedule folding */
|
/* schedule folding */
|
||||||
ir_bitqueue_add(worklist, use);
|
ir_bitqueue_add(worklist, use);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
CLEAR_USES(ref);
|
CLEAR_USES(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2483,7 +2499,9 @@ int ir_sccp(ir_ctx *ctx)
|
||||||
} else if (value->op == IR_TOP) {
|
} else if (value->op == IR_TOP) {
|
||||||
/* remove unreachable instruction */
|
/* remove unreachable instruction */
|
||||||
insn = &ctx->ir_base[i];
|
insn = &ctx->ir_base[i];
|
||||||
if (ir_op_flags[insn->op] & (IR_OP_FLAG_DATA|IR_OP_FLAG_MEM)) {
|
if (insn->op == IR_NOP) {
|
||||||
|
/* already removed */
|
||||||
|
} else if (ir_op_flags[insn->op] & (IR_OP_FLAG_DATA|IR_OP_FLAG_MEM)) {
|
||||||
if (insn->op != IR_PARAM && (insn->op != IR_VAR || _values[insn->op1].op == IR_TOP)) {
|
if (insn->op != IR_PARAM && (insn->op != IR_VAR || _values[insn->op1].op == IR_TOP)) {
|
||||||
ir_sccp_remove_insn(ctx, _values, i, &worklist2);
|
ir_sccp_remove_insn(ctx, _values, i, &worklist2);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue