mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-17 17:44:40 +02:00
8212243: More gc interface tweaks for arraycopy
Reviewed-by: kvn, eosterlund
This commit is contained in:
parent
0fade4897e
commit
8ab8d45552
14 changed files with 259 additions and 122 deletions
|
@ -554,6 +554,12 @@ void ciInstanceKlass::compute_injected_fields() {
|
||||||
_has_injected_fields = has_injected_fields;
|
_has_injected_fields = has_injected_fields;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ciInstanceKlass::has_object_fields() const {
|
||||||
|
GUARDED_VM_ENTRY(
|
||||||
|
return get_instanceKlass()->nonstatic_oop_map_size() > 0;
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciInstanceKlass::find_method
|
// ciInstanceKlass::find_method
|
||||||
//
|
//
|
||||||
|
|
|
@ -202,6 +202,8 @@ public:
|
||||||
return _has_injected_fields > 0 ? true : false;
|
return _has_injected_fields > 0 ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool has_object_fields() const;
|
||||||
|
|
||||||
// nth nonstatic field (presented by ascending address)
|
// nth nonstatic field (presented by ascending address)
|
||||||
ciField* nonstatic_field_at(int i) {
|
ciField* nonstatic_field_at(int i) {
|
||||||
assert(_nonstatic_fields != NULL, "");
|
assert(_nonstatic_fields != NULL, "");
|
||||||
|
|
|
@ -594,8 +594,6 @@ void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* off
|
||||||
|
|
||||||
Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
|
Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
|
||||||
DecoratorSet decorators = access.decorators();
|
DecoratorSet decorators = access.decorators();
|
||||||
GraphKit* kit = access.kit();
|
|
||||||
|
|
||||||
Node* adr = access.addr().node();
|
Node* adr = access.addr().node();
|
||||||
Node* obj = access.base();
|
Node* obj = access.base();
|
||||||
|
|
||||||
|
@ -606,7 +604,8 @@ Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) c
|
||||||
bool is_unordered = (decorators & MO_UNORDERED) != 0;
|
bool is_unordered = (decorators & MO_UNORDERED) != 0;
|
||||||
bool need_cpu_mem_bar = !is_unordered || mismatched || !in_heap;
|
bool need_cpu_mem_bar = !is_unordered || mismatched || !in_heap;
|
||||||
|
|
||||||
Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : kit->top();
|
Node* top = Compile::current()->top();
|
||||||
|
Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
|
||||||
Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
|
Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
|
||||||
|
|
||||||
// If we are reading the value of the referent field of a Reference
|
// If we are reading the value of the referent field of a Reference
|
||||||
|
@ -616,12 +615,16 @@ Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) c
|
||||||
// Also we need to add memory barrier to prevent commoning reads
|
// Also we need to add memory barrier to prevent commoning reads
|
||||||
// from this field across safepoint since GC can change its value.
|
// from this field across safepoint since GC can change its value.
|
||||||
bool need_read_barrier = in_heap && (on_weak ||
|
bool need_read_barrier = in_heap && (on_weak ||
|
||||||
(unknown && offset != kit->top() && obj != kit->top()));
|
(unknown && offset != top && obj != top));
|
||||||
|
|
||||||
if (!access.is_oop() || !need_read_barrier) {
|
if (!access.is_oop() || !need_read_barrier) {
|
||||||
return load;
|
return load;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(access.is_parse_access(), "entry not supported at optimization time");
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
GraphKit* kit = parse_access.kit();
|
||||||
|
|
||||||
if (on_weak) {
|
if (on_weak) {
|
||||||
// Use the pre-barrier to record the value in the referent field
|
// Use the pre-barrier to record the value in the referent field
|
||||||
pre_barrier(kit, false /* do_load */,
|
pre_barrier(kit, false /* do_load */,
|
||||||
|
|
|
@ -35,10 +35,12 @@
|
||||||
// By default this is a no-op.
|
// By default this is a no-op.
|
||||||
void BarrierSetC2::resolve_address(C2Access& access) const { }
|
void BarrierSetC2::resolve_address(C2Access& access) const { }
|
||||||
|
|
||||||
void* C2Access::barrier_set_state() const {
|
void* C2ParseAccess::barrier_set_state() const {
|
||||||
return _kit->barrier_set_state();
|
return _kit->barrier_set_state();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
PhaseGVN& C2ParseAccess::gvn() const { return _kit->gvn(); }
|
||||||
|
|
||||||
bool C2Access::needs_cpu_membar() const {
|
bool C2Access::needs_cpu_membar() const {
|
||||||
bool mismatched = (_decorators & C2_MISMATCHED) != 0;
|
bool mismatched = (_decorators & C2_MISMATCHED) != 0;
|
||||||
bool is_unordered = (_decorators & MO_UNORDERED) != 0;
|
bool is_unordered = (_decorators & MO_UNORDERED) != 0;
|
||||||
|
@ -70,7 +72,6 @@ bool C2Access::needs_cpu_membar() const {
|
||||||
|
|
||||||
Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
|
Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
|
||||||
DecoratorSet decorators = access.decorators();
|
DecoratorSet decorators = access.decorators();
|
||||||
GraphKit* kit = access.kit();
|
|
||||||
|
|
||||||
bool mismatched = (decorators & C2_MISMATCHED) != 0;
|
bool mismatched = (decorators & C2_MISMATCHED) != 0;
|
||||||
bool unaligned = (decorators & C2_UNALIGNED) != 0;
|
bool unaligned = (decorators & C2_UNALIGNED) != 0;
|
||||||
|
@ -79,22 +80,49 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
|
||||||
bool in_native = (decorators & IN_NATIVE) != 0;
|
bool in_native = (decorators & IN_NATIVE) != 0;
|
||||||
assert(!in_native, "not supported yet");
|
assert(!in_native, "not supported yet");
|
||||||
|
|
||||||
|
MemNode::MemOrd mo = access.mem_node_mo();
|
||||||
|
|
||||||
|
Node* store;
|
||||||
|
if (access.is_parse_access()) {
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
|
||||||
|
GraphKit* kit = parse_access.kit();
|
||||||
if (access.type() == T_DOUBLE) {
|
if (access.type() == T_DOUBLE) {
|
||||||
Node* new_val = kit->dstore_rounding(val.node());
|
Node* new_val = kit->dstore_rounding(val.node());
|
||||||
val.set_node(new_val);
|
val.set_node(new_val);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemNode::MemOrd mo = access.mem_node_mo();
|
store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
|
||||||
|
|
||||||
Node* store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), access.type(),
|
|
||||||
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched);
|
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched);
|
||||||
access.set_raw_access(store);
|
access.set_raw_access(store);
|
||||||
|
} else {
|
||||||
|
assert(!requires_atomic_access, "not yet supported");
|
||||||
|
assert(access.is_opt_access(), "either parse or opt access");
|
||||||
|
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
||||||
|
Node* ctl = opt_access.ctl();
|
||||||
|
MergeMemNode* mm = opt_access.mem();
|
||||||
|
PhaseGVN& gvn = opt_access.gvn();
|
||||||
|
const TypePtr* adr_type = access.addr().type();
|
||||||
|
int alias = gvn.C->get_alias_index(adr_type);
|
||||||
|
Node* mem = mm->memory_at(alias);
|
||||||
|
|
||||||
|
StoreNode* st = StoreNode::make(gvn, ctl, mem, access.addr().node(), adr_type, val.node(), access.type(), mo);
|
||||||
|
if (unaligned) {
|
||||||
|
st->set_unaligned_access();
|
||||||
|
}
|
||||||
|
if (mismatched) {
|
||||||
|
st->set_mismatched_access();
|
||||||
|
}
|
||||||
|
store = gvn.transform(st);
|
||||||
|
if (store == st) {
|
||||||
|
mm->set_memory_at(alias, st);
|
||||||
|
}
|
||||||
|
}
|
||||||
return store;
|
return store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
|
Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
|
||||||
DecoratorSet decorators = access.decorators();
|
DecoratorSet decorators = access.decorators();
|
||||||
GraphKit* kit = access.kit();
|
|
||||||
|
|
||||||
Node* adr = access.addr().node();
|
Node* adr = access.addr().node();
|
||||||
const TypePtr* adr_type = access.addr().type();
|
const TypePtr* adr_type = access.addr().type();
|
||||||
|
@ -109,9 +137,13 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
|
||||||
|
|
||||||
MemNode::MemOrd mo = access.mem_node_mo();
|
MemNode::MemOrd mo = access.mem_node_mo();
|
||||||
LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
|
LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
|
||||||
Node* control = control_dependent ? kit->control() : NULL;
|
|
||||||
|
|
||||||
Node* load;
|
Node* load;
|
||||||
|
if (access.is_parse_access()) {
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
GraphKit* kit = parse_access.kit();
|
||||||
|
Node* control = control_dependent ? kit->control() : NULL;
|
||||||
|
|
||||||
if (in_native) {
|
if (in_native) {
|
||||||
load = kit->make_load(control, adr, val_type, access.type(), mo);
|
load = kit->make_load(control, adr, val_type, access.type(), mo);
|
||||||
} else {
|
} else {
|
||||||
|
@ -119,6 +151,17 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
|
||||||
dep, requires_atomic_access, unaligned, mismatched);
|
dep, requires_atomic_access, unaligned, mismatched);
|
||||||
}
|
}
|
||||||
access.set_raw_access(load);
|
access.set_raw_access(load);
|
||||||
|
} else {
|
||||||
|
assert(!requires_atomic_access, "not yet supported");
|
||||||
|
assert(access.is_opt_access(), "either parse or opt access");
|
||||||
|
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
||||||
|
Node* control = control_dependent ? opt_access.ctl() : NULL;
|
||||||
|
MergeMemNode* mm = opt_access.mem();
|
||||||
|
PhaseGVN& gvn = opt_access.gvn();
|
||||||
|
Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
|
||||||
|
load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep, unaligned, mismatched);
|
||||||
|
load = gvn.transform(load);
|
||||||
|
}
|
||||||
|
|
||||||
return load;
|
return load;
|
||||||
}
|
}
|
||||||
|
@ -130,7 +173,11 @@ class C2AccessFence: public StackObj {
|
||||||
public:
|
public:
|
||||||
C2AccessFence(C2Access& access) :
|
C2AccessFence(C2Access& access) :
|
||||||
_access(access), _leading_membar(NULL) {
|
_access(access), _leading_membar(NULL) {
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = NULL;
|
||||||
|
if (access.is_parse_access()) {
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
kit = parse_access.kit();
|
||||||
|
}
|
||||||
DecoratorSet decorators = access.decorators();
|
DecoratorSet decorators = access.decorators();
|
||||||
|
|
||||||
bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
|
bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
|
||||||
|
@ -141,6 +188,7 @@ public:
|
||||||
bool is_release = (decorators & MO_RELEASE) != 0;
|
bool is_release = (decorators & MO_RELEASE) != 0;
|
||||||
|
|
||||||
if (is_atomic) {
|
if (is_atomic) {
|
||||||
|
assert(kit != NULL, "unsupported at optimization time");
|
||||||
// Memory-model-wise, a LoadStore acts like a little synchronized
|
// Memory-model-wise, a LoadStore acts like a little synchronized
|
||||||
// block, so needs barriers on each side. These don't translate
|
// block, so needs barriers on each side. These don't translate
|
||||||
// into actual barriers on most machines, but we still need rest of
|
// into actual barriers on most machines, but we still need rest of
|
||||||
|
@ -159,6 +207,7 @@ public:
|
||||||
// floating down past the volatile write. Also prevents commoning
|
// floating down past the volatile write. Also prevents commoning
|
||||||
// another volatile read.
|
// another volatile read.
|
||||||
if (is_volatile || is_release) {
|
if (is_volatile || is_release) {
|
||||||
|
assert(kit != NULL, "unsupported at optimization time");
|
||||||
_leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
|
_leading_membar = kit->insert_mem_bar(Op_MemBarRelease);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -168,11 +217,13 @@ public:
|
||||||
// so there's no problems making a strong assert about mixing users
|
// so there's no problems making a strong assert about mixing users
|
||||||
// of safe & unsafe memory.
|
// of safe & unsafe memory.
|
||||||
if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||||
|
assert(kit != NULL, "unsupported at optimization time");
|
||||||
_leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
|
_leading_membar = kit->insert_mem_bar(Op_MemBarVolatile);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (access.needs_cpu_membar()) {
|
if (access.needs_cpu_membar()) {
|
||||||
|
assert(kit != NULL, "unsupported at optimization time");
|
||||||
kit->insert_mem_bar(Op_MemBarCPUOrder);
|
kit->insert_mem_bar(Op_MemBarCPUOrder);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +236,11 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
~C2AccessFence() {
|
~C2AccessFence() {
|
||||||
GraphKit* kit = _access.kit();
|
GraphKit* kit = NULL;
|
||||||
|
if (_access.is_parse_access()) {
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(_access);
|
||||||
|
kit = parse_access.kit();
|
||||||
|
}
|
||||||
DecoratorSet decorators = _access.decorators();
|
DecoratorSet decorators = _access.decorators();
|
||||||
|
|
||||||
bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
|
bool is_write = (decorators & C2_WRITE_ACCESS) != 0;
|
||||||
|
@ -202,6 +257,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_atomic) {
|
if (is_atomic) {
|
||||||
|
assert(kit != NULL, "unsupported at optimization time");
|
||||||
if (is_acquire || is_volatile) {
|
if (is_acquire || is_volatile) {
|
||||||
Node* n = _access.raw_access();
|
Node* n = _access.raw_access();
|
||||||
Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
|
Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
|
||||||
|
@ -212,6 +268,7 @@ public:
|
||||||
} else if (is_write) {
|
} else if (is_write) {
|
||||||
// If not multiple copy atomic, we do the MemBarVolatile before the load.
|
// If not multiple copy atomic, we do the MemBarVolatile before the load.
|
||||||
if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||||
|
assert(kit != NULL, "unsupported at optimization time");
|
||||||
Node* n = _access.raw_access();
|
Node* n = _access.raw_access();
|
||||||
Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar
|
Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar
|
||||||
if (_leading_membar != NULL) {
|
if (_leading_membar != NULL) {
|
||||||
|
@ -220,6 +277,7 @@ public:
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (is_volatile || is_acquire) {
|
if (is_volatile || is_acquire) {
|
||||||
|
assert(kit != NULL, "unsupported at optimization time");
|
||||||
Node* n = _access.raw_access();
|
Node* n = _access.raw_access();
|
||||||
assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
|
assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
|
||||||
Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
|
Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n);
|
||||||
|
@ -295,7 +353,7 @@ void C2Access::fixup_decorators() {
|
||||||
if (!needs_cpu_membar() && adr_type->isa_instptr()) {
|
if (!needs_cpu_membar() && adr_type->isa_instptr()) {
|
||||||
assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
|
assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
|
||||||
intptr_t offset = Type::OffsetBot;
|
intptr_t offset = Type::OffsetBot;
|
||||||
AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset);
|
AddPNode::Ideal_base_and_offset(adr, &gvn(), offset);
|
||||||
if (offset >= 0) {
|
if (offset >= 0) {
|
||||||
int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
|
int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
|
||||||
if (offset < s) {
|
if (offset < s) {
|
||||||
|
@ -310,26 +368,28 @@ void C2Access::fixup_decorators() {
|
||||||
|
|
||||||
//--------------------------- atomic operations---------------------------------
|
//--------------------------- atomic operations---------------------------------
|
||||||
|
|
||||||
void BarrierSetC2::pin_atomic_op(C2AtomicAccess& access) const {
|
void BarrierSetC2::pin_atomic_op(C2AtomicParseAccess& access) const {
|
||||||
if (!access.needs_pinning()) {
|
if (!access.needs_pinning()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// SCMemProjNodes represent the memory state of a LoadStore. Their
|
// SCMemProjNodes represent the memory state of a LoadStore. Their
|
||||||
// main role is to prevent LoadStore nodes from being optimized away
|
// main role is to prevent LoadStore nodes from being optimized away
|
||||||
// when their results aren't used.
|
// when their results aren't used.
|
||||||
GraphKit* kit = access.kit();
|
assert(access.is_parse_access(), "entry not supported at optimization time");
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
GraphKit* kit = parse_access.kit();
|
||||||
Node* load_store = access.raw_access();
|
Node* load_store = access.raw_access();
|
||||||
assert(load_store != NULL, "must pin atomic op");
|
assert(load_store != NULL, "must pin atomic op");
|
||||||
Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
|
Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
|
||||||
kit->set_memory(proj, access.alias_idx());
|
kit->set_memory(proj, access.alias_idx());
|
||||||
}
|
}
|
||||||
|
|
||||||
void C2AtomicAccess::set_memory() {
|
void C2AtomicParseAccess::set_memory() {
|
||||||
Node *mem = _kit->memory(_alias_idx);
|
Node *mem = _kit->memory(_alias_idx);
|
||||||
_memory = mem;
|
_memory = mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const {
|
Node* new_val, const Type* value_type) const {
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
MemNode::MemOrd mo = access.mem_node_mo();
|
MemNode::MemOrd mo = access.mem_node_mo();
|
||||||
|
@ -386,7 +446,7 @@ Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node*
|
||||||
return load_store;
|
return load_store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const {
|
Node* new_val, const Type* value_type) const {
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
DecoratorSet decorators = access.decorators();
|
DecoratorSet decorators = access.decorators();
|
||||||
|
@ -460,7 +520,7 @@ Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node
|
||||||
return load_store;
|
return load_store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
|
Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
Node* mem = access.memory();
|
Node* mem = access.memory();
|
||||||
Node* adr = access.addr().node();
|
Node* adr = access.addr().node();
|
||||||
|
@ -508,7 +568,7 @@ Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_va
|
||||||
return load_store;
|
return load_store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
|
Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
|
||||||
Node* load_store = NULL;
|
Node* load_store = NULL;
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
Node* adr = access.addr().node();
|
Node* adr = access.addr().node();
|
||||||
|
@ -538,27 +598,27 @@ Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val
|
||||||
return load_store;
|
return load_store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
|
Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const {
|
Node* new_val, const Type* value_type) const {
|
||||||
C2AccessFence fence(access);
|
C2AccessFence fence(access);
|
||||||
resolve_address(access);
|
resolve_address(access);
|
||||||
return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
|
return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
|
Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const {
|
Node* new_val, const Type* value_type) const {
|
||||||
C2AccessFence fence(access);
|
C2AccessFence fence(access);
|
||||||
resolve_address(access);
|
resolve_address(access);
|
||||||
return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
|
return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
|
Node* BarrierSetC2::atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
|
||||||
C2AccessFence fence(access);
|
C2AccessFence fence(access);
|
||||||
resolve_address(access);
|
resolve_address(access);
|
||||||
return atomic_xchg_at_resolved(access, new_val, value_type);
|
return atomic_xchg_at_resolved(access, new_val, value_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
|
Node* BarrierSetC2::atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
|
||||||
C2AccessFence fence(access);
|
C2AccessFence fence(access);
|
||||||
resolve_address(access);
|
resolve_address(access);
|
||||||
return atomic_add_at_resolved(access, new_val, value_type);
|
return atomic_add_at_resolved(access, new_val, value_type);
|
||||||
|
@ -594,7 +654,7 @@ void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool i
|
||||||
|
|
||||||
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
|
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
|
||||||
|
|
||||||
ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, false, false);
|
ArrayCopyNode* ac = ArrayCopyNode::make(kit, false, src_base, NULL, dst_base, NULL, countx, true, false);
|
||||||
ac->set_clonebasic();
|
ac->set_clonebasic();
|
||||||
Node* n = kit->gvn().transform(ac);
|
Node* n = kit->gvn().transform(ac);
|
||||||
if (n == ac) {
|
if (n == ac) {
|
||||||
|
@ -731,3 +791,8 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem,
|
||||||
}
|
}
|
||||||
return fast_oop;
|
return fast_oop;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const {
|
||||||
|
// no barrier
|
||||||
|
igvn.replace_node(ac, call);
|
||||||
|
}
|
||||||
|
|
|
@ -49,6 +49,10 @@ const DecoratorSet C2_UNSAFE_ACCESS = DECORATOR_LAST << 6;
|
||||||
const DecoratorSet C2_WRITE_ACCESS = DECORATOR_LAST << 7;
|
const DecoratorSet C2_WRITE_ACCESS = DECORATOR_LAST << 7;
|
||||||
// This denotes that the access reads state.
|
// This denotes that the access reads state.
|
||||||
const DecoratorSet C2_READ_ACCESS = DECORATOR_LAST << 8;
|
const DecoratorSet C2_READ_ACCESS = DECORATOR_LAST << 8;
|
||||||
|
// A nearby allocation?
|
||||||
|
const DecoratorSet C2_TIGHLY_COUPLED_ALLOC = DECORATOR_LAST << 9;
|
||||||
|
// Loads and stores from an arraycopy being optimized
|
||||||
|
const DecoratorSet C2_ARRAY_COPY = DECORATOR_LAST << 10;
|
||||||
|
|
||||||
class GraphKit;
|
class GraphKit;
|
||||||
class IdealKit;
|
class IdealKit;
|
||||||
|
@ -88,7 +92,6 @@ public:
|
||||||
// BarrierSetC2 backend hierarchy, for loads and stores, to reduce boiler plate.
|
// BarrierSetC2 backend hierarchy, for loads and stores, to reduce boiler plate.
|
||||||
class C2Access: public StackObj {
|
class C2Access: public StackObj {
|
||||||
protected:
|
protected:
|
||||||
GraphKit* _kit;
|
|
||||||
DecoratorSet _decorators;
|
DecoratorSet _decorators;
|
||||||
BasicType _type;
|
BasicType _type;
|
||||||
Node* _base;
|
Node* _base;
|
||||||
|
@ -96,22 +99,17 @@ protected:
|
||||||
Node* _raw_access;
|
Node* _raw_access;
|
||||||
|
|
||||||
void fixup_decorators();
|
void fixup_decorators();
|
||||||
void* barrier_set_state() const;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
C2Access(GraphKit* kit, DecoratorSet decorators,
|
C2Access(DecoratorSet decorators,
|
||||||
BasicType type, Node* base, C2AccessValuePtr& addr) :
|
BasicType type, Node* base, C2AccessValuePtr& addr) :
|
||||||
_kit(kit),
|
|
||||||
_decorators(decorators),
|
_decorators(decorators),
|
||||||
_type(type),
|
_type(type),
|
||||||
_base(base),
|
_base(base),
|
||||||
_addr(addr),
|
_addr(addr),
|
||||||
_raw_access(NULL)
|
_raw_access(NULL)
|
||||||
{
|
{}
|
||||||
fixup_decorators();
|
|
||||||
}
|
|
||||||
|
|
||||||
GraphKit* kit() const { return _kit; }
|
|
||||||
DecoratorSet decorators() const { return _decorators; }
|
DecoratorSet decorators() const { return _decorators; }
|
||||||
Node* base() const { return _base; }
|
Node* base() const { return _base; }
|
||||||
C2AccessValuePtr& addr() const { return _addr; }
|
C2AccessValuePtr& addr() const { return _addr; }
|
||||||
|
@ -126,23 +124,48 @@ public:
|
||||||
MemNode::MemOrd mem_node_mo() const;
|
MemNode::MemOrd mem_node_mo() const;
|
||||||
bool needs_cpu_membar() const;
|
bool needs_cpu_membar() const;
|
||||||
|
|
||||||
|
virtual PhaseGVN& gvn() const = 0;
|
||||||
|
virtual bool is_parse_access() const { return false; }
|
||||||
|
virtual bool is_opt_access() const { return false; }
|
||||||
|
};
|
||||||
|
|
||||||
|
// C2Access for parse time calls to the BarrierSetC2 backend.
|
||||||
|
class C2ParseAccess: public C2Access {
|
||||||
|
protected:
|
||||||
|
GraphKit* _kit;
|
||||||
|
|
||||||
|
void* barrier_set_state() const;
|
||||||
|
|
||||||
|
public:
|
||||||
|
C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
|
||||||
|
BasicType type, Node* base, C2AccessValuePtr& addr) :
|
||||||
|
C2Access(decorators, type, base, addr),
|
||||||
|
_kit(kit) {
|
||||||
|
fixup_decorators();
|
||||||
|
}
|
||||||
|
|
||||||
|
GraphKit* kit() const { return _kit; }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T barrier_set_state_as() const {
|
T barrier_set_state_as() const {
|
||||||
return reinterpret_cast<T>(barrier_set_state());
|
return reinterpret_cast<T>(barrier_set_state());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual PhaseGVN& gvn() const;
|
||||||
|
virtual bool is_parse_access() const { return true; }
|
||||||
};
|
};
|
||||||
|
|
||||||
// This class wraps a bunch of context parameters thare are passed around in the
|
// This class wraps a bunch of context parameters thare are passed around in the
|
||||||
// BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
|
// BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
|
||||||
class C2AtomicAccess: public C2Access {
|
class C2AtomicParseAccess: public C2ParseAccess {
|
||||||
Node* _memory;
|
Node* _memory;
|
||||||
uint _alias_idx;
|
uint _alias_idx;
|
||||||
bool _needs_pinning;
|
bool _needs_pinning;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
C2AtomicAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
|
C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
|
||||||
Node* base, C2AccessValuePtr& addr, uint alias_idx) :
|
Node* base, C2AccessValuePtr& addr, uint alias_idx) :
|
||||||
C2Access(kit, decorators, type, base, addr),
|
C2ParseAccess(kit, decorators, type, base, addr),
|
||||||
_memory(NULL),
|
_memory(NULL),
|
||||||
_alias_idx(alias_idx),
|
_alias_idx(alias_idx),
|
||||||
_needs_pinning(true) {}
|
_needs_pinning(true) {}
|
||||||
|
@ -157,6 +180,31 @@ public:
|
||||||
void set_needs_pinning(bool value) { _needs_pinning = value; }
|
void set_needs_pinning(bool value) { _needs_pinning = value; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// C2Access for optimization time calls to the BarrierSetC2 backend.
|
||||||
|
class C2OptAccess: public C2Access {
|
||||||
|
PhaseGVN& _gvn;
|
||||||
|
MergeMemNode* _mem;
|
||||||
|
Node* _ctl;
|
||||||
|
|
||||||
|
public:
|
||||||
|
C2OptAccess(PhaseGVN& gvn, Node* ctl, MergeMemNode* mem, DecoratorSet decorators,
|
||||||
|
BasicType type, Node* base, C2AccessValuePtr& addr) :
|
||||||
|
C2Access(decorators, type, base, addr),
|
||||||
|
_gvn(gvn), _mem(mem), _ctl(ctl) {
|
||||||
|
fixup_decorators();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
MergeMemNode* mem() const { return _mem; }
|
||||||
|
Node* ctl() const { return _ctl; }
|
||||||
|
// void set_mem(Node* mem) { _mem = mem; }
|
||||||
|
void set_ctl(Node* ctl) { _ctl = ctl; }
|
||||||
|
|
||||||
|
virtual PhaseGVN& gvn() const { return _gvn; }
|
||||||
|
virtual bool is_opt_access() const { return true; }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
// This is the top-level class for the backend of the Access API in C2.
|
// This is the top-level class for the backend of the Access API in C2.
|
||||||
// The top-level class is responsible for performing raw accesses. The
|
// The top-level class is responsible for performing raw accesses. The
|
||||||
// various GC barrier sets inherit from the BarrierSetC2 class to sprinkle
|
// various GC barrier sets inherit from the BarrierSetC2 class to sprinkle
|
||||||
|
@ -167,25 +215,25 @@ protected:
|
||||||
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
|
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
|
||||||
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
|
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
|
||||||
|
|
||||||
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* val_type) const;
|
Node* new_val, const Type* val_type) const;
|
||||||
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const;
|
Node* new_val, const Type* value_type) const;
|
||||||
virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const;
|
virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
|
||||||
virtual Node* atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const;
|
virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
|
||||||
void pin_atomic_op(C2AtomicAccess& access) const;
|
void pin_atomic_op(C2AtomicParseAccess& access) const;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// This is the entry-point for the backend to perform accesses through the Access API.
|
// This is the entry-point for the backend to perform accesses through the Access API.
|
||||||
virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
|
virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
|
||||||
virtual Node* load_at(C2Access& access, const Type* val_type) const;
|
virtual Node* load_at(C2Access& access, const Type* val_type) const;
|
||||||
|
|
||||||
virtual Node* atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
|
virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* val_type) const;
|
Node* new_val, const Type* val_type) const;
|
||||||
virtual Node* atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
|
virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* val_type) const;
|
Node* new_val, const Type* val_type) const;
|
||||||
virtual Node* atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const;
|
virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
|
||||||
virtual Node* atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const;
|
virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
|
||||||
|
|
||||||
virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
|
virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
|
||||||
|
|
||||||
|
@ -203,6 +251,7 @@ public:
|
||||||
Expansion
|
Expansion
|
||||||
};
|
};
|
||||||
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; }
|
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; }
|
||||||
|
virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const;
|
||||||
|
|
||||||
// Support for GC barriers emitted during parsing
|
// Support for GC barriers emitted during parsing
|
||||||
virtual bool has_load_barriers() const { return false; }
|
virtual bool has_load_barriers() const { return false; }
|
||||||
|
|
|
@ -32,7 +32,6 @@
|
||||||
|
|
||||||
Node* ModRefBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
|
Node* ModRefBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
|
||||||
DecoratorSet decorators = access.decorators();
|
DecoratorSet decorators = access.decorators();
|
||||||
GraphKit* kit = access.kit();
|
|
||||||
|
|
||||||
const TypePtr* adr_type = access.addr().type();
|
const TypePtr* adr_type = access.addr().type();
|
||||||
Node* adr = access.addr().node();
|
Node* adr = access.addr().node();
|
||||||
|
@ -41,11 +40,16 @@ Node* ModRefBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val
|
||||||
bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
|
bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
|
||||||
bool in_heap = (decorators & IN_HEAP) != 0;
|
bool in_heap = (decorators & IN_HEAP) != 0;
|
||||||
bool use_precise = is_array || anonymous;
|
bool use_precise = is_array || anonymous;
|
||||||
|
bool tighly_coupled_alloc = (decorators & C2_TIGHLY_COUPLED_ALLOC) != 0;
|
||||||
|
|
||||||
if (!access.is_oop() || (!in_heap && !anonymous)) {
|
if (!access.is_oop() || tighly_coupled_alloc || (!in_heap && !anonymous)) {
|
||||||
return BarrierSetC2::store_at_resolved(access, val);
|
return BarrierSetC2::store_at_resolved(access, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(access.is_parse_access(), "entry not supported at optimization time");
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
GraphKit* kit = parse_access.kit();
|
||||||
|
|
||||||
uint adr_idx = kit->C->get_alias_index(adr_type);
|
uint adr_idx = kit->C->get_alias_index(adr_type);
|
||||||
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
|
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
|
||||||
|
|
||||||
|
@ -58,7 +62,7 @@ Node* ModRefBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val
|
||||||
return store;
|
return store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* ModRefBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
Node* ModRefBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const {
|
Node* new_val, const Type* value_type) const {
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
|
|
||||||
|
@ -78,7 +82,7 @@ Node* ModRefBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* ModRefBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
Node* ModRefBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const {
|
Node* new_val, const Type* value_type) const {
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
|
|
||||||
|
@ -114,7 +118,7 @@ Node* ModRefBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access
|
||||||
return load_store;
|
return load_store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* ModRefBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
|
Node* ModRefBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
|
|
||||||
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
|
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
|
||||||
|
|
|
@ -54,11 +54,11 @@ protected:
|
||||||
|
|
||||||
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
|
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
|
||||||
|
|
||||||
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const;
|
Node* new_val, const Type* value_type) const;
|
||||||
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const;
|
Node* new_val, const Type* value_type) const;
|
||||||
virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const;
|
virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_GC_SHARED_C2_MODREFBARRIERSETC2_HPP
|
#endif // SHARE_GC_SHARED_C2_MODREFBARRIERSETC2_HPP
|
||||||
|
|
|
@ -474,10 +474,10 @@ bool LoadBarrierNode::has_true_uses() const {
|
||||||
|
|
||||||
// == Accesses ==
|
// == Accesses ==
|
||||||
|
|
||||||
Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
|
Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicParseAccess& access) const {
|
||||||
assert(!UseCompressedOops, "Not allowed");
|
assert(!UseCompressedOops, "Not allowed");
|
||||||
CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
|
CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access();
|
||||||
PhaseGVN& gvn = access.kit()->gvn();
|
PhaseGVN& gvn = access.gvn();
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
|
|
||||||
|
@ -566,7 +566,7 @@ Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicAccess& access) const {
|
||||||
return phi;
|
return phi;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicAccess& access) const {
|
Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicParseAccess& access) const {
|
||||||
CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
|
CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access();
|
||||||
GraphKit* kit = access.kit();
|
GraphKit* kit = access.kit();
|
||||||
PhaseGVN& gvn = kit->gvn();
|
PhaseGVN& gvn = kit->gvn();
|
||||||
|
@ -665,7 +665,7 @@ Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool barrier_needed(C2Access access) {
|
static bool barrier_needed(C2Access& access) {
|
||||||
return ZBarrierSet::barrier_needed(access.decorators(), access.type());
|
return ZBarrierSet::barrier_needed(access.decorators(), access.type());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -677,7 +677,9 @@ Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) co
|
||||||
|
|
||||||
bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
|
bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
|
||||||
|
|
||||||
GraphKit* kit = access.kit();
|
assert(access.is_parse_access(), "entry not supported at optimization time");
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
GraphKit* kit = parse_access.kit();
|
||||||
PhaseGVN& gvn = kit->gvn();
|
PhaseGVN& gvn = kit->gvn();
|
||||||
Node* adr = access.addr().node();
|
Node* adr = access.addr().node();
|
||||||
Node* heap_base_oop = access.base();
|
Node* heap_base_oop = access.base();
|
||||||
|
@ -707,11 +709,11 @@ Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) co
|
||||||
}
|
}
|
||||||
return p;
|
return p;
|
||||||
} else {
|
} else {
|
||||||
return load_barrier(access.kit(), p, access.addr().node(), weak, true, true);
|
return load_barrier(parse_access.kit(), p, access.addr().node(), weak, true, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* val_type) const {
|
Node* new_val, const Type* val_type) const {
|
||||||
Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
|
Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
|
||||||
if (!barrier_needed(access)) {
|
if (!barrier_needed(access)) {
|
||||||
|
@ -722,7 +724,7 @@ Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node
|
||||||
return make_cmpx_loadbarrier(access);
|
return make_cmpx_loadbarrier(access);
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
|
Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
|
||||||
Node* new_val, const Type* value_type) const {
|
Node* new_val, const Type* value_type) const {
|
||||||
Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
|
Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
|
||||||
if (!barrier_needed(access)) {
|
if (!barrier_needed(access)) {
|
||||||
|
@ -746,7 +748,7 @@ Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Nod
|
||||||
return load_store;
|
return load_store;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
|
Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
|
||||||
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
|
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
|
||||||
if (!barrier_needed(access)) {
|
if (!barrier_needed(access)) {
|
||||||
return result;
|
return result;
|
||||||
|
@ -755,7 +757,9 @@ Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_v
|
||||||
Node* load_store = access.raw_access();
|
Node* load_store = access.raw_access();
|
||||||
Node* adr = access.addr().node();
|
Node* adr = access.addr().node();
|
||||||
|
|
||||||
return load_barrier(access.kit(), load_store, adr, false, false, false);
|
assert(access.is_parse_access(), "entry not supported at optimization time");
|
||||||
|
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
|
||||||
|
return load_barrier(parse_access.kit(), load_store, adr, false, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// == Macro Expansion ==
|
// == Macro Expansion ==
|
||||||
|
|
|
@ -156,8 +156,8 @@ public:
|
||||||
class ZBarrierSetC2 : public BarrierSetC2 {
|
class ZBarrierSetC2 : public BarrierSetC2 {
|
||||||
private:
|
private:
|
||||||
ZBarrierSetC2State* state() const;
|
ZBarrierSetC2State* state() const;
|
||||||
Node* make_cas_loadbarrier(C2AtomicAccess& access) const;
|
Node* make_cas_loadbarrier(C2AtomicParseAccess& access) const;
|
||||||
Node* make_cmpx_loadbarrier(C2AtomicAccess& access) const;
|
Node* make_cmpx_loadbarrier(C2AtomicParseAccess& access) const;
|
||||||
void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
|
void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
|
||||||
void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
|
void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
|
||||||
void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
|
void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
|
||||||
|
@ -165,15 +165,15 @@ private:
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
|
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
|
||||||
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access,
|
virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access,
|
||||||
Node* expected_val,
|
Node* expected_val,
|
||||||
Node* new_val,
|
Node* new_val,
|
||||||
const Type* val_type) const;
|
const Type* val_type) const;
|
||||||
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access,
|
virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access,
|
||||||
Node* expected_val,
|
Node* expected_val,
|
||||||
Node* new_val,
|
Node* new_val,
|
||||||
const Type* value_type) const;
|
const Type* value_type) const;
|
||||||
virtual Node* atomic_xchg_at_resolved(C2AtomicAccess& access,
|
virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access,
|
||||||
Node* new_val,
|
Node* new_val,
|
||||||
const Type* val_type) const;
|
const Type* val_type) const;
|
||||||
|
|
||||||
|
|
|
@ -148,6 +148,28 @@ int ArrayCopyNode::get_count(PhaseGVN *phase) const {
|
||||||
return get_length_if_constant(phase);
|
return get_length_if_constant(phase);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
|
||||||
|
DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY;
|
||||||
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
|
C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
|
||||||
|
Node* res = bs->load_at(access, type);
|
||||||
|
ctl = access.ctl();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) {
|
||||||
|
DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY;
|
||||||
|
if (is_alloc_tightly_coupled()) {
|
||||||
|
decorators |= C2_TIGHLY_COUPLED_ALLOC;
|
||||||
|
}
|
||||||
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
|
C2AccessValue value(val, type);
|
||||||
|
C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
|
||||||
|
bs->store_at(access, value);
|
||||||
|
ctl = access.ctl();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
|
Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
|
||||||
if (!is_clonebasic()) {
|
if (!is_clonebasic()) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -182,6 +204,7 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c
|
||||||
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
|
ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
|
||||||
assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
|
assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
|
||||||
|
|
||||||
|
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
ciField* field = ik->nonstatic_field_at(i);
|
ciField* field = ik->nonstatic_field_at(i);
|
||||||
int fieldidx = phase->C->alias_type(field)->index();
|
int fieldidx = phase->C->alias_type(field)->index();
|
||||||
|
@ -203,11 +226,8 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c
|
||||||
type = Type::get_const_basic_type(bt);
|
type = Type::get_const_basic_type(bt);
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* v = LoadNode::make(*phase, ctl, mem->memory_at(fieldidx), next_src, adr_type, type, bt, MemNode::unordered);
|
Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt);
|
||||||
v = phase->transform(v);
|
store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt);
|
||||||
Node* s = StoreNode::make(*phase, ctl, mem->memory_at(fieldidx), next_dest, adr_type, v, bt, MemNode::unordered);
|
|
||||||
s = phase->transform(s);
|
|
||||||
mem->set_memory_at(fieldidx, s);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!finish_transform(phase, can_reshape, ctl, mem)) {
|
if (!finish_transform(phase, can_reshape, ctl, mem)) {
|
||||||
|
@ -368,28 +388,18 @@ Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
|
||||||
if (!forward_ctl->is_top()) {
|
if (!forward_ctl->is_top()) {
|
||||||
// copy forward
|
// copy forward
|
||||||
mm = mm->clone()->as_MergeMem();
|
mm = mm->clone()->as_MergeMem();
|
||||||
uint alias_idx_src = phase->C->get_alias_index(atp_src);
|
|
||||||
uint alias_idx_dest = phase->C->get_alias_index(atp_dest);
|
|
||||||
Node *start_mem_src = mm->memory_at(alias_idx_src);
|
|
||||||
Node *start_mem_dest = mm->memory_at(alias_idx_dest);
|
|
||||||
Node* mem = start_mem_dest;
|
|
||||||
bool same_alias = (alias_idx_src == alias_idx_dest);
|
|
||||||
|
|
||||||
if (count > 0) {
|
if (count > 0) {
|
||||||
Node* v = LoadNode::make(*phase, forward_ctl, start_mem_src, adr_src, atp_src, value_type, copy_type, MemNode::unordered);
|
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||||
v = phase->transform(v);
|
Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type);
|
||||||
mem = StoreNode::make(*phase, forward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered);
|
store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
|
||||||
mem = phase->transform(mem);
|
|
||||||
for (int i = 1; i < count; i++) {
|
for (int i = 1; i < count; i++) {
|
||||||
Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
|
Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
|
||||||
Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
|
Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
|
||||||
Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
|
Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
|
||||||
v = LoadNode::make(*phase, forward_ctl, same_alias ? mem : start_mem_src, next_src, atp_src, value_type, copy_type, MemNode::unordered);
|
v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type);
|
||||||
v = phase->transform(v);
|
store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
|
||||||
mem = StoreNode::make(*phase, forward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered);
|
|
||||||
mem = phase->transform(mem);
|
|
||||||
}
|
}
|
||||||
mm->set_memory_at(alias_idx_dest, mem);
|
|
||||||
} else if(can_reshape) {
|
} else if(can_reshape) {
|
||||||
PhaseIterGVN* igvn = phase->is_IterGVN();
|
PhaseIterGVN* igvn = phase->is_IterGVN();
|
||||||
igvn->_worklist.push(adr_src);
|
igvn->_worklist.push(adr_src);
|
||||||
|
@ -416,31 +426,20 @@ Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
|
||||||
if (!backward_ctl->is_top()) {
|
if (!backward_ctl->is_top()) {
|
||||||
// copy backward
|
// copy backward
|
||||||
mm = mm->clone()->as_MergeMem();
|
mm = mm->clone()->as_MergeMem();
|
||||||
uint alias_idx_src = phase->C->get_alias_index(atp_src);
|
|
||||||
uint alias_idx_dest = phase->C->get_alias_index(atp_dest);
|
|
||||||
Node *start_mem_src = mm->memory_at(alias_idx_src);
|
|
||||||
Node *start_mem_dest = mm->memory_at(alias_idx_dest);
|
|
||||||
Node* mem = start_mem_dest;
|
|
||||||
|
|
||||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||||
assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
|
assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
|
||||||
bool same_alias = (alias_idx_src == alias_idx_dest);
|
|
||||||
|
|
||||||
if (count > 0) {
|
if (count > 0) {
|
||||||
for (int i = count-1; i >= 1; i--) {
|
for (int i = count-1; i >= 1; i--) {
|
||||||
Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
|
Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
|
||||||
Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
|
Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
|
||||||
Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
|
Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
|
||||||
Node* v = LoadNode::make(*phase, backward_ctl, same_alias ? mem : start_mem_src, next_src, atp_src, value_type, copy_type, MemNode::unordered);
|
Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type);
|
||||||
v = phase->transform(v);
|
store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
|
||||||
mem = StoreNode::make(*phase, backward_ctl,mem,next_dest,atp_dest,v, copy_type, MemNode::unordered);
|
|
||||||
mem = phase->transform(mem);
|
|
||||||
}
|
}
|
||||||
Node* v = LoadNode::make(*phase, backward_ctl, same_alias ? mem : start_mem_src, adr_src, atp_src, value_type, copy_type, MemNode::unordered);
|
Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type);
|
||||||
v = phase->transform(v);
|
store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
|
||||||
mem = StoreNode::make(*phase, backward_ctl, mem, adr_dest, atp_dest, v, copy_type, MemNode::unordered);
|
|
||||||
mem = phase->transform(mem);
|
|
||||||
mm->set_memory_at(alias_idx_dest, mem);
|
|
||||||
} else if(can_reshape) {
|
} else if(can_reshape) {
|
||||||
PhaseIterGVN* igvn = phase->is_IterGVN();
|
PhaseIterGVN* igvn = phase->is_IterGVN();
|
||||||
igvn->_worklist.push(adr_src);
|
igvn->_worklist.push(adr_src);
|
||||||
|
|
|
@ -110,6 +110,9 @@ private:
|
||||||
Node* ctl, Node *mem);
|
Node* ctl, Node *mem);
|
||||||
static bool may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call);
|
static bool may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call);
|
||||||
|
|
||||||
|
static Node* load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* addr, const TypePtr* adr_type, const Type *type, BasicType bt);
|
||||||
|
void store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* addr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
|
|
@ -1566,7 +1566,7 @@ Node* GraphKit::access_store_at(Node* obj,
|
||||||
|
|
||||||
C2AccessValuePtr addr(adr, adr_type);
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
C2AccessValue value(val, val_type);
|
C2AccessValue value(val, val_type);
|
||||||
C2Access access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
|
C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
|
||||||
if (access.is_raw()) {
|
if (access.is_raw()) {
|
||||||
return _barrier_set->BarrierSetC2::store_at(access, value);
|
return _barrier_set->BarrierSetC2::store_at(access, value);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1585,7 +1585,7 @@ Node* GraphKit::access_load_at(Node* obj, // containing obj
|
||||||
}
|
}
|
||||||
|
|
||||||
C2AccessValuePtr addr(adr, adr_type);
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
C2Access access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
|
C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
|
||||||
if (access.is_raw()) {
|
if (access.is_raw()) {
|
||||||
return _barrier_set->BarrierSetC2::load_at(access, val_type);
|
return _barrier_set->BarrierSetC2::load_at(access, val_type);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1602,7 +1602,7 @@ Node* GraphKit::access_load(Node* adr, // actual adress to load val at
|
||||||
}
|
}
|
||||||
|
|
||||||
C2AccessValuePtr addr(adr, NULL);
|
C2AccessValuePtr addr(adr, NULL);
|
||||||
C2Access access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
|
C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
|
||||||
if (access.is_raw()) {
|
if (access.is_raw()) {
|
||||||
return _barrier_set->BarrierSetC2::load_at(access, val_type);
|
return _barrier_set->BarrierSetC2::load_at(access, val_type);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1620,7 +1620,7 @@ Node* GraphKit::access_atomic_cmpxchg_val_at(Node* obj,
|
||||||
BasicType bt,
|
BasicType bt,
|
||||||
DecoratorSet decorators) {
|
DecoratorSet decorators) {
|
||||||
C2AccessValuePtr addr(adr, adr_type);
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
|
C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
|
||||||
bt, obj, addr, alias_idx);
|
bt, obj, addr, alias_idx);
|
||||||
if (access.is_raw()) {
|
if (access.is_raw()) {
|
||||||
return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
|
return _barrier_set->BarrierSetC2::atomic_cmpxchg_val_at(access, expected_val, new_val, value_type);
|
||||||
|
@ -1639,7 +1639,7 @@ Node* GraphKit::access_atomic_cmpxchg_bool_at(Node* obj,
|
||||||
BasicType bt,
|
BasicType bt,
|
||||||
DecoratorSet decorators) {
|
DecoratorSet decorators) {
|
||||||
C2AccessValuePtr addr(adr, adr_type);
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
|
C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
|
||||||
bt, obj, addr, alias_idx);
|
bt, obj, addr, alias_idx);
|
||||||
if (access.is_raw()) {
|
if (access.is_raw()) {
|
||||||
return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
|
return _barrier_set->BarrierSetC2::atomic_cmpxchg_bool_at(access, expected_val, new_val, value_type);
|
||||||
|
@ -1657,7 +1657,7 @@ Node* GraphKit::access_atomic_xchg_at(Node* obj,
|
||||||
BasicType bt,
|
BasicType bt,
|
||||||
DecoratorSet decorators) {
|
DecoratorSet decorators) {
|
||||||
C2AccessValuePtr addr(adr, adr_type);
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
|
C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS,
|
||||||
bt, obj, addr, alias_idx);
|
bt, obj, addr, alias_idx);
|
||||||
if (access.is_raw()) {
|
if (access.is_raw()) {
|
||||||
return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type);
|
return _barrier_set->BarrierSetC2::atomic_xchg_at(access, new_val, value_type);
|
||||||
|
@ -1675,7 +1675,7 @@ Node* GraphKit::access_atomic_add_at(Node* obj,
|
||||||
BasicType bt,
|
BasicType bt,
|
||||||
DecoratorSet decorators) {
|
DecoratorSet decorators) {
|
||||||
C2AccessValuePtr addr(adr, adr_type);
|
C2AccessValuePtr addr(adr, adr_type);
|
||||||
C2AtomicAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
|
C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
|
||||||
if (access.is_raw()) {
|
if (access.is_raw()) {
|
||||||
return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
|
return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -940,6 +940,7 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
|
||||||
}
|
}
|
||||||
k -= (oc2 - use->outcnt());
|
k -= (oc2 - use->outcnt());
|
||||||
}
|
}
|
||||||
|
_igvn.remove_dead_node(use);
|
||||||
} else if (use->is_ArrayCopy()) {
|
} else if (use->is_ArrayCopy()) {
|
||||||
// Disconnect ArrayCopy node
|
// Disconnect ArrayCopy node
|
||||||
ArrayCopyNode* ac = use->as_ArrayCopy();
|
ArrayCopyNode* ac = use->as_ArrayCopy();
|
||||||
|
|
|
@ -72,7 +72,6 @@ Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem,
|
||||||
Node* parm2, Node* parm3,
|
Node* parm2, Node* parm3,
|
||||||
Node* parm4, Node* parm5,
|
Node* parm4, Node* parm5,
|
||||||
Node* parm6, Node* parm7) {
|
Node* parm6, Node* parm7) {
|
||||||
int size = call_type->domain()->cnt();
|
|
||||||
Node* call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
|
Node* call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
|
||||||
call->init_req(TypeFunc::Control, ctrl);
|
call->init_req(TypeFunc::Control, ctrl);
|
||||||
call->init_req(TypeFunc::I_O , top());
|
call->init_req(TypeFunc::I_O , top());
|
||||||
|
@ -1107,7 +1106,9 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) {
|
||||||
Node* call = make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, src, dest, length XTOP);
|
Node* call = make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, src, dest, length XTOP);
|
||||||
transform_later(call);
|
transform_later(call);
|
||||||
|
|
||||||
_igvn.replace_node(ac, call);
|
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||||
|
bs->clone_barrier_at_expansion(ac, call, _igvn);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
} else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_cloneoop()) {
|
} else if (ac->is_copyof() || ac->is_copyofrange() || ac->is_cloneoop()) {
|
||||||
Node* mem = ac->in(TypeFunc::Memory);
|
Node* mem = ac->in(TypeFunc::Memory);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue