mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-17 17:44:40 +02:00
8285301: C2: assert(!requires_atomic_access) failed: can't ensure atomicity
Reviewed-by: kvn, dlong
This commit is contained in:
parent
40f19c014f
commit
0a4a6403bb
10 changed files with 110 additions and 131 deletions
|
@ -91,19 +91,19 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
|
||||||
MemNode::MemOrd mo = access.mem_node_mo();
|
MemNode::MemOrd mo = access.mem_node_mo();
|
||||||
|
|
||||||
Node* store;
|
Node* store;
|
||||||
|
BasicType bt = access.type();
|
||||||
if (access.is_parse_access()) {
|
if (access.is_parse_access()) {
|
||||||
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
|
||||||
GraphKit* kit = parse_access.kit();
|
GraphKit* kit = parse_access.kit();
|
||||||
if (access.type() == T_DOUBLE) {
|
if (bt == T_DOUBLE) {
|
||||||
Node* new_val = kit->dprecision_rounding(val.node());
|
Node* new_val = kit->dprecision_rounding(val.node());
|
||||||
val.set_node(new_val);
|
val.set_node(new_val);
|
||||||
}
|
}
|
||||||
|
|
||||||
store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
|
store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), bt,
|
||||||
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
|
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
|
||||||
} else {
|
} else {
|
||||||
assert(!requires_atomic_access, "not yet supported");
|
|
||||||
assert(access.is_opt_access(), "either parse or opt access");
|
assert(access.is_opt_access(), "either parse or opt access");
|
||||||
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
||||||
Node* ctl = opt_access.ctl();
|
Node* ctl = opt_access.ctl();
|
||||||
|
@ -113,7 +113,7 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
|
||||||
int alias = gvn.C->get_alias_index(adr_type);
|
int alias = gvn.C->get_alias_index(adr_type);
|
||||||
Node* mem = mm->memory_at(alias);
|
Node* mem = mm->memory_at(alias);
|
||||||
|
|
||||||
StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), access.type(), mo);
|
StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), bt, mo, requires_atomic_access);
|
||||||
if (unaligned) {
|
if (unaligned) {
|
||||||
st->set_unaligned_access();
|
st->set_unaligned_access();
|
||||||
}
|
}
|
||||||
|
@ -156,12 +156,11 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
|
||||||
Node* control = control_dependent ? kit->control() : NULL;
|
Node* control = control_dependent ? kit->control() : NULL;
|
||||||
|
|
||||||
if (immutable) {
|
if (immutable) {
|
||||||
assert(!requires_atomic_access, "can't ensure atomicity");
|
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
Node* mem = kit->immutable_memory();
|
Node* mem = kit->immutable_memory();
|
||||||
load = LoadNode::make(kit->gvn(), control, mem, adr,
|
load = LoadNode::make(kit->gvn(), control, mem, adr,
|
||||||
adr_type, val_type, access.type(), mo, dep, unaligned,
|
adr_type, val_type, access.type(), mo, dep, requires_atomic_access,
|
||||||
mismatched, unsafe, access.barrier_data());
|
unaligned, mismatched, unsafe, access.barrier_data());
|
||||||
load = kit->gvn().transform(load);
|
load = kit->gvn().transform(load);
|
||||||
} else {
|
} else {
|
||||||
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
|
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
|
||||||
|
@ -169,15 +168,14 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
|
||||||
access.barrier_data());
|
access.barrier_data());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(!requires_atomic_access, "not yet supported");
|
|
||||||
assert(access.is_opt_access(), "either parse or opt access");
|
assert(access.is_opt_access(), "either parse or opt access");
|
||||||
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
||||||
Node* control = control_dependent ? opt_access.ctl() : NULL;
|
Node* control = control_dependent ? opt_access.ctl() : NULL;
|
||||||
MergeMemNode* mm = opt_access.mem();
|
MergeMemNode* mm = opt_access.mem();
|
||||||
PhaseGVN& gvn = opt_access.gvn();
|
PhaseGVN& gvn = opt_access.gvn();
|
||||||
Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
|
Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
|
||||||
load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo,
|
load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep,
|
||||||
dep, unaligned, mismatched, unsafe, access.barrier_data());
|
requires_atomic_access, unaligned, mismatched, unsafe, access.barrier_data());
|
||||||
load = gvn.transform(load);
|
load = gvn.transform(load);
|
||||||
}
|
}
|
||||||
access.set_raw_access(load);
|
access.set_raw_access(load);
|
||||||
|
|
|
@ -1535,14 +1535,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
|
||||||
const TypePtr* adr_type = NULL; // debug-mode-only argument
|
const TypePtr* adr_type = NULL; // debug-mode-only argument
|
||||||
debug_only(adr_type = C->get_adr_type(adr_idx));
|
debug_only(adr_type = C->get_adr_type(adr_idx));
|
||||||
Node* mem = memory(adr_idx);
|
Node* mem = memory(adr_idx);
|
||||||
Node* ld;
|
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
|
||||||
if (require_atomic_access && bt == T_LONG) {
|
|
||||||
ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
|
|
||||||
} else if (require_atomic_access && bt == T_DOUBLE) {
|
|
||||||
ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
|
|
||||||
} else {
|
|
||||||
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
|
|
||||||
}
|
|
||||||
ld = _gvn.transform(ld);
|
ld = _gvn.transform(ld);
|
||||||
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
|
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
|
||||||
// Improve graph before escape analysis and boxing elimination.
|
// Improve graph before escape analysis and boxing elimination.
|
||||||
|
@ -1562,14 +1555,7 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
|
||||||
const TypePtr* adr_type = NULL;
|
const TypePtr* adr_type = NULL;
|
||||||
debug_only(adr_type = C->get_adr_type(adr_idx));
|
debug_only(adr_type = C->get_adr_type(adr_idx));
|
||||||
Node *mem = memory(adr_idx);
|
Node *mem = memory(adr_idx);
|
||||||
Node* st;
|
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
|
||||||
if (require_atomic_access && bt == T_LONG) {
|
|
||||||
st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
|
|
||||||
} else if (require_atomic_access && bt == T_DOUBLE) {
|
|
||||||
st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
|
|
||||||
} else {
|
|
||||||
st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
|
|
||||||
}
|
|
||||||
if (unaligned) {
|
if (unaligned) {
|
||||||
st->as_Store()->set_unaligned_access();
|
st->as_Store()->set_unaligned_access();
|
||||||
}
|
}
|
||||||
|
|
|
@ -358,12 +358,7 @@ Node* IdealKit::load(Node* ctl,
|
||||||
const TypePtr* adr_type = NULL; // debug-mode-only argument
|
const TypePtr* adr_type = NULL; // debug-mode-only argument
|
||||||
debug_only(adr_type = C->get_adr_type(adr_idx));
|
debug_only(adr_type = C->get_adr_type(adr_idx));
|
||||||
Node* mem = memory(adr_idx);
|
Node* mem = memory(adr_idx);
|
||||||
Node* ld;
|
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, LoadNode::DependsOnlyOnTest, require_atomic_access);
|
||||||
if (require_atomic_access && bt == T_LONG) {
|
|
||||||
ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo);
|
|
||||||
} else {
|
|
||||||
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
|
|
||||||
}
|
|
||||||
return transform(ld);
|
return transform(ld);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,12 +370,7 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
|
||||||
const TypePtr* adr_type = NULL;
|
const TypePtr* adr_type = NULL;
|
||||||
debug_only(adr_type = C->get_adr_type(adr_idx));
|
debug_only(adr_type = C->get_adr_type(adr_idx));
|
||||||
Node *mem = memory(adr_idx);
|
Node *mem = memory(adr_idx);
|
||||||
Node* st;
|
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
|
||||||
if (require_atomic_access && bt == T_LONG) {
|
|
||||||
st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);
|
|
||||||
} else {
|
|
||||||
st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
|
|
||||||
}
|
|
||||||
if (mismatched) {
|
if (mismatched) {
|
||||||
st->as_Store()->set_mismatched_access();
|
st->as_Store()->set_mismatched_access();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1017,7 +1017,7 @@ bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem,
|
||||||
Node* sval = transform_later(
|
Node* sval = transform_later(
|
||||||
LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr, s_adr_type,
|
LoadNode::make(_igvn, *ctrl, (*mem)->memory_at(s_alias_idx), sptr, s_adr_type,
|
||||||
TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest,
|
TypeInt::INT, T_INT, MemNode::unordered, LoadNode::DependsOnlyOnTest,
|
||||||
false /*unaligned*/, is_mismatched));
|
false /*require_atomic_access*/, false /*unaligned*/, is_mismatched));
|
||||||
Node* st = transform_later(
|
Node* st = transform_later(
|
||||||
StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(d_alias_idx), dptr, adr_type,
|
StoreNode::make(_igvn, *ctrl, (*mem)->memory_at(d_alias_idx), dptr, adr_type,
|
||||||
sval, T_INT, MemNode::unordered));
|
sval, T_INT, MemNode::unordered));
|
||||||
|
|
|
@ -861,8 +861,8 @@ bool LoadNode::is_immutable_value(Node* adr) {
|
||||||
|
|
||||||
//----------------------------LoadNode::make-----------------------------------
|
//----------------------------LoadNode::make-----------------------------------
|
||||||
// Polymorphic factory method:
|
// Polymorphic factory method:
|
||||||
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo,
|
Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, BasicType bt, MemOrd mo,
|
||||||
ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
|
ControlDependency control_dependency, bool require_atomic_access, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
|
||||||
Compile* C = gvn.C;
|
Compile* C = gvn.C;
|
||||||
|
|
||||||
// sanity check the alias category against the created node type
|
// sanity check the alias category against the created node type
|
||||||
|
@ -884,9 +884,9 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
|
||||||
case T_INT: load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
|
case T_INT: load = new LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
|
||||||
case T_CHAR: load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
|
case T_CHAR: load = new LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
|
||||||
case T_SHORT: load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
|
case T_SHORT: load = new LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break;
|
||||||
case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency); break;
|
case T_LONG: load = new LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic_access); break;
|
||||||
case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
|
case T_FLOAT: load = new LoadFNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
|
||||||
case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency); break;
|
case T_DOUBLE: load = new LoadDNode (ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic_access); break;
|
||||||
case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break;
|
case T_ADDRESS: load = new LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo, control_dependency); break;
|
||||||
case T_OBJECT:
|
case T_OBJECT:
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
|
@ -922,42 +922,6 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
|
||||||
return load;
|
return load;
|
||||||
}
|
}
|
||||||
|
|
||||||
LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
|
|
||||||
ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
|
|
||||||
bool require_atomic = true;
|
|
||||||
LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
|
|
||||||
if (unaligned) {
|
|
||||||
load->set_unaligned_access();
|
|
||||||
}
|
|
||||||
if (mismatched) {
|
|
||||||
load->set_mismatched_access();
|
|
||||||
}
|
|
||||||
if (unsafe) {
|
|
||||||
load->set_unsafe_access();
|
|
||||||
}
|
|
||||||
load->set_barrier_data(barrier_data);
|
|
||||||
return load;
|
|
||||||
}
|
|
||||||
|
|
||||||
LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
|
|
||||||
ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
|
|
||||||
bool require_atomic = true;
|
|
||||||
LoadDNode* load = new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
|
|
||||||
if (unaligned) {
|
|
||||||
load->set_unaligned_access();
|
|
||||||
}
|
|
||||||
if (mismatched) {
|
|
||||||
load->set_mismatched_access();
|
|
||||||
}
|
|
||||||
if (unsafe) {
|
|
||||||
load->set_unsafe_access();
|
|
||||||
}
|
|
||||||
load->set_barrier_data(barrier_data);
|
|
||||||
return load;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
//------------------------------hash-------------------------------------------
|
//------------------------------hash-------------------------------------------
|
||||||
uint LoadNode::hash() const {
|
uint LoadNode::hash() const {
|
||||||
// unroll addition of interesting fields
|
// unroll addition of interesting fields
|
||||||
|
@ -1289,7 +1253,7 @@ Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) {
|
||||||
}
|
}
|
||||||
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
||||||
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
||||||
is_unaligned_access(), is_mismatched_access());
|
false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct an equivalent signed load.
|
// Construct an equivalent signed load.
|
||||||
|
@ -1309,7 +1273,7 @@ Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) {
|
||||||
}
|
}
|
||||||
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
||||||
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
||||||
is_unaligned_access(), is_mismatched_access());
|
false /*require_atomic_access*/, is_unaligned_access(), is_mismatched_access());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LoadNode::has_reinterpret_variant(const Type* rt) {
|
bool LoadNode::has_reinterpret_variant(const Type* rt) {
|
||||||
|
@ -1332,9 +1296,12 @@ Node* LoadNode::convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt) {
|
||||||
if (raw_type == NULL) {
|
if (raw_type == NULL) {
|
||||||
is_mismatched = true; // conservatively match all non-raw accesses as mismatched
|
is_mismatched = true; // conservatively match all non-raw accesses as mismatched
|
||||||
}
|
}
|
||||||
|
const int op = Opcode();
|
||||||
|
bool require_atomic_access = (op == Op_LoadL && ((LoadLNode*)this)->require_atomic_access()) ||
|
||||||
|
(op == Op_LoadD && ((LoadDNode*)this)->require_atomic_access());
|
||||||
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
||||||
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
raw_adr_type(), rt, bt, _mo, _control_dependency,
|
||||||
is_unaligned_access(), is_mismatched);
|
require_atomic_access, is_unaligned_access(), is_mismatched);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool StoreNode::has_reinterpret_variant(const Type* vt) {
|
bool StoreNode::has_reinterpret_variant(const Type* vt) {
|
||||||
|
@ -1352,7 +1319,11 @@ bool StoreNode::has_reinterpret_variant(const Type* vt) {
|
||||||
Node* StoreNode::convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt) {
|
Node* StoreNode::convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Type* vt) {
|
||||||
BasicType bt = vt->basic_type();
|
BasicType bt = vt->basic_type();
|
||||||
assert(has_reinterpret_variant(vt), "no reinterpret variant: %s %s", Name(), type2name(bt));
|
assert(has_reinterpret_variant(vt), "no reinterpret variant: %s %s", Name(), type2name(bt));
|
||||||
StoreNode* st = StoreNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), raw_adr_type(), val, bt, _mo);
|
const int op = Opcode();
|
||||||
|
bool require_atomic_access = (op == Op_StoreL && ((StoreLNode*)this)->require_atomic_access()) ||
|
||||||
|
(op == Op_StoreD && ((StoreDNode*)this)->require_atomic_access());
|
||||||
|
StoreNode* st = StoreNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address),
|
||||||
|
raw_adr_type(), val, bt, _mo, require_atomic_access);
|
||||||
|
|
||||||
bool is_mismatched = is_mismatched_access();
|
bool is_mismatched = is_mismatched_access();
|
||||||
const TypeRawPtr* raw_type = gvn.type(in(MemNode::Memory))->isa_rawptr();
|
const TypeRawPtr* raw_type = gvn.type(in(MemNode::Memory))->isa_rawptr();
|
||||||
|
@ -2582,7 +2553,7 @@ Node* LoadRangeNode::Identity(PhaseGVN* phase) {
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
//---------------------------StoreNode::make-----------------------------------
|
//---------------------------StoreNode::make-----------------------------------
|
||||||
// Polymorphic factory method:
|
// Polymorphic factory method:
|
||||||
StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
|
StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo, bool require_atomic_access) {
|
||||||
assert((mo == unordered || mo == release), "unexpected");
|
assert((mo == unordered || mo == release), "unexpected");
|
||||||
Compile* C = gvn.C;
|
Compile* C = gvn.C;
|
||||||
assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
|
assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
|
||||||
|
@ -2594,9 +2565,9 @@ StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const
|
||||||
case T_INT: return new StoreINode(ctl, mem, adr, adr_type, val, mo);
|
case T_INT: return new StoreINode(ctl, mem, adr, adr_type, val, mo);
|
||||||
case T_CHAR:
|
case T_CHAR:
|
||||||
case T_SHORT: return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
|
case T_SHORT: return new StoreCNode(ctl, mem, adr, adr_type, val, mo);
|
||||||
case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo);
|
case T_LONG: return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
|
||||||
case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
|
case T_FLOAT: return new StoreFNode(ctl, mem, adr, adr_type, val, mo);
|
||||||
case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo);
|
case T_DOUBLE: return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic_access);
|
||||||
case T_METADATA:
|
case T_METADATA:
|
||||||
case T_ADDRESS:
|
case T_ADDRESS:
|
||||||
case T_OBJECT:
|
case T_OBJECT:
|
||||||
|
@ -2620,17 +2591,6 @@ StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
StoreLNode* StoreLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
|
|
||||||
bool require_atomic = true;
|
|
||||||
return new StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
|
|
||||||
}
|
|
||||||
|
|
||||||
StoreDNode* StoreDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
|
|
||||||
bool require_atomic = true;
|
|
||||||
return new StoreDNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//--------------------------bottom_type----------------------------------------
|
//--------------------------bottom_type----------------------------------------
|
||||||
const Type *StoreNode::bottom_type() const {
|
const Type *StoreNode::bottom_type() const {
|
||||||
return Type::MEMORY;
|
return Type::MEMORY;
|
||||||
|
|
|
@ -226,10 +226,10 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Polymorphic factory method:
|
// Polymorphic factory method:
|
||||||
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
|
static Node* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
|
||||||
const TypePtr* at, const Type *rt, BasicType bt,
|
const TypePtr* at, const Type* rt, BasicType bt,
|
||||||
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
|
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
|
||||||
bool unaligned = false, bool mismatched = false, bool unsafe = false,
|
bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false,
|
||||||
uint8_t barrier_data = 0);
|
uint8_t barrier_data = 0);
|
||||||
|
|
||||||
virtual uint hash() const; // Check the type
|
virtual uint hash() const; // Check the type
|
||||||
|
@ -414,9 +414,7 @@ public:
|
||||||
virtual int store_Opcode() const { return Op_StoreL; }
|
virtual int store_Opcode() const { return Op_StoreL; }
|
||||||
virtual BasicType memory_type() const { return T_LONG; }
|
virtual BasicType memory_type() const { return T_LONG; }
|
||||||
bool require_atomic_access() const { return _require_atomic_access; }
|
bool require_atomic_access() const { return _require_atomic_access; }
|
||||||
static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
|
|
||||||
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
|
|
||||||
bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
virtual void dump_spec(outputStream *st) const {
|
virtual void dump_spec(outputStream *st) const {
|
||||||
LoadNode::dump_spec(st);
|
LoadNode::dump_spec(st);
|
||||||
|
@ -466,9 +464,7 @@ public:
|
||||||
virtual int store_Opcode() const { return Op_StoreD; }
|
virtual int store_Opcode() const { return Op_StoreD; }
|
||||||
virtual BasicType memory_type() const { return T_DOUBLE; }
|
virtual BasicType memory_type() const { return T_DOUBLE; }
|
||||||
bool require_atomic_access() const { return _require_atomic_access; }
|
bool require_atomic_access() const { return _require_atomic_access; }
|
||||||
static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
|
|
||||||
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
|
|
||||||
bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
virtual void dump_spec(outputStream *st) const {
|
virtual void dump_spec(outputStream *st) const {
|
||||||
LoadNode::dump_spec(st);
|
LoadNode::dump_spec(st);
|
||||||
|
@ -609,8 +605,9 @@ public:
|
||||||
// procedure must indicate that the store requires `release'
|
// procedure must indicate that the store requires `release'
|
||||||
// semantics, if the stored value is an object reference that might
|
// semantics, if the stored value is an object reference that might
|
||||||
// point to a new object and may become externally visible.
|
// point to a new object and may become externally visible.
|
||||||
static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
|
static StoreNode* make(PhaseGVN& gvn, Node* c, Node* mem, Node* adr,
|
||||||
const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
|
const TypePtr* at, Node* val, BasicType bt,
|
||||||
|
MemOrd mo, bool require_atomic_access = false);
|
||||||
|
|
||||||
virtual uint hash() const; // Check the type
|
virtual uint hash() const; // Check the type
|
||||||
|
|
||||||
|
@ -691,7 +688,7 @@ public:
|
||||||
virtual int Opcode() const;
|
virtual int Opcode() const;
|
||||||
virtual BasicType memory_type() const { return T_LONG; }
|
virtual BasicType memory_type() const { return T_LONG; }
|
||||||
bool require_atomic_access() const { return _require_atomic_access; }
|
bool require_atomic_access() const { return _require_atomic_access; }
|
||||||
static StoreLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
virtual void dump_spec(outputStream *st) const {
|
virtual void dump_spec(outputStream *st) const {
|
||||||
StoreNode::dump_spec(st);
|
StoreNode::dump_spec(st);
|
||||||
|
@ -727,7 +724,7 @@ public:
|
||||||
virtual int Opcode() const;
|
virtual int Opcode() const;
|
||||||
virtual BasicType memory_type() const { return T_DOUBLE; }
|
virtual BasicType memory_type() const { return T_DOUBLE; }
|
||||||
bool require_atomic_access() const { return _require_atomic_access; }
|
bool require_atomic_access() const { return _require_atomic_access; }
|
||||||
static StoreDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
virtual void dump_spec(outputStream *st) const {
|
virtual void dump_spec(outputStream *st) const {
|
||||||
StoreNode::dump_spec(st);
|
StoreNode::dump_spec(st);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,7 +26,7 @@ import java.util.Random;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @test
|
* @test
|
||||||
* @bug 8251871
|
* @bug 8251871 8285301
|
||||||
* @summary Optimize arrayCopy using AVX-512 masked instructions.
|
* @summary Optimize arrayCopy using AVX-512 masked instructions.
|
||||||
*
|
*
|
||||||
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
||||||
|
@ -50,6 +50,8 @@ import java.util.Random;
|
||||||
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
||||||
* -XX:UseAVX=3 -XX:+UnlockDiagnosticVMOptions -XX:ArrayOperationPartialInlineSize=64 -XX:MaxVectorSize=64 -XX:ArrayCopyLoadStoreMaxElem=16
|
* -XX:UseAVX=3 -XX:+UnlockDiagnosticVMOptions -XX:ArrayOperationPartialInlineSize=64 -XX:MaxVectorSize=64 -XX:ArrayCopyLoadStoreMaxElem=16
|
||||||
* compiler.arraycopy.TestArrayCopyConjoint
|
* compiler.arraycopy.TestArrayCopyConjoint
|
||||||
|
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+UnlockExperimentalVMOptions -XX:+AlwaysAtomicAccesses
|
||||||
|
* compiler.arraycopy.TestArrayCopyConjoint
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -26,7 +26,7 @@ import java.util.Random;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @test
|
* @test
|
||||||
* @bug 8251871
|
* @bug 8251871 8285301
|
||||||
* @summary Optimize arrayCopy using AVX-512 masked instructions.
|
* @summary Optimize arrayCopy using AVX-512 masked instructions.
|
||||||
*
|
*
|
||||||
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
||||||
|
@ -50,6 +50,8 @@ import java.util.Random;
|
||||||
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions
|
||||||
* -XX:UseAVX=3 -XX:+UnlockDiagnosticVMOptions -XX:ArrayOperationPartialInlineSize=64 -XX:MaxVectorSize=64 -XX:ArrayCopyLoadStoreMaxElem=16
|
* -XX:UseAVX=3 -XX:+UnlockDiagnosticVMOptions -XX:ArrayOperationPartialInlineSize=64 -XX:MaxVectorSize=64 -XX:ArrayCopyLoadStoreMaxElem=16
|
||||||
* compiler.arraycopy.TestArrayCopyDisjoint
|
* compiler.arraycopy.TestArrayCopyDisjoint
|
||||||
|
* @run main/othervm/timeout=600 -XX:-TieredCompilation -Xbatch -XX:+UnlockExperimentalVMOptions -XX:+AlwaysAtomicAccesses
|
||||||
|
* compiler.arraycopy.TestArrayCopyDisjoint
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @test
|
* @test
|
||||||
* @bug 6700100 8156760 8248226
|
* @bug 6700100 8156760 8248226 8285301
|
||||||
* @summary small instance clone as loads/stores
|
* @summary small instance clone as loads/stores
|
||||||
* @library /
|
* @library /
|
||||||
*
|
*
|
||||||
|
@ -42,6 +42,10 @@
|
||||||
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestInstanceCloneAsLoadsStores::m*
|
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestInstanceCloneAsLoadsStores::m*
|
||||||
* -XX:+IgnoreUnrecognizedVMOptions -XX:-ReduceInitialCardMarks -XX:-ReduceBulkZeroing
|
* -XX:+IgnoreUnrecognizedVMOptions -XX:-ReduceInitialCardMarks -XX:-ReduceBulkZeroing
|
||||||
* compiler.arraycopy.TestInstanceCloneAsLoadsStores
|
* compiler.arraycopy.TestInstanceCloneAsLoadsStores
|
||||||
|
* @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
|
||||||
|
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestInstanceCloneAsLoadsStores::m*
|
||||||
|
* -XX:+UnlockExperimentalVMOptions -XX:+AlwaysAtomicAccesses
|
||||||
|
* compiler.arraycopy.TestInstanceCloneAsLoadsStores
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package compiler.arraycopy;
|
package compiler.arraycopy;
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test TestAlwaysAtomicAccesses
|
||||||
|
* @bug 8285301
|
||||||
|
* @summary Test memory accesses from compiled code with AlwaysAtomicAccesses.
|
||||||
|
* @run main/othervm -Xcomp -XX:+UnlockExperimentalVMOptions -XX:+AlwaysAtomicAccesses
|
||||||
|
* compiler.membars.TestAlwaysAtomicAccesses
|
||||||
|
*/
|
||||||
|
|
||||||
|
package compiler.membars;
|
||||||
|
|
||||||
|
public class TestAlwaysAtomicAccesses {
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
// Nothing to do here. Compilations are triggered by -Xcomp.
|
||||||
|
System.out.println("Test passed");
|
||||||
|
}
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue