8208601: Introduce native oop barriers in C2 for OopHandle

Reviewed-by: neliasso, kvn
This commit is contained in:
Erik Österlund 2018-08-22 13:06:33 +02:00
parent d8ffa83e7a
commit ce61e39060
10 changed files with 134 additions and 21 deletions

View file

@ -104,14 +104,18 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
bool pinned = (decorators & C2_PINNED_LOAD) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
assert(!in_native, "not supported yet");
MemNode::MemOrd mo = access.mem_node_mo();
LoadNode::ControlDependency dep = pinned ? LoadNode::Pinned : LoadNode::DependsOnlyOnTest;
Node* control = control_dependent ? kit->control() : NULL;
Node* load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
dep, requires_atomic_access, unaligned, mismatched);
Node* load;
if (in_native) {
load = kit->make_load(control, adr, val_type, access.type(), mo);
} else {
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
dep, requires_atomic_access, unaligned, mismatched);
}
access.set_raw_access(load);
return load;

View file

@ -194,6 +194,7 @@ public:
virtual bool array_copy_requires_gc_barriers(BasicType type) const { return false; }
// Support for GC barriers emitted during parsing
virtual bool has_load_barriers() const { return false; }
virtual bool is_gc_barrier_node(Node* node) const { return false; }
virtual Node* step_over_gc_barrier(Node* c) const { return c; }

View file

@ -68,7 +68,26 @@ ZBarrierSetC2State* ZBarrierSetC2::state() const {
}
bool ZBarrierSetC2::is_gc_barrier_node(Node* node) const {
return node->is_LoadBarrier();
// 1. This step follows potential oop projections of a load barrier before expansion
if (node->is_Proj()) {
node = node->in(0);
}
// 2. This step checks for unexpanded load barriers
if (node->is_LoadBarrier()) {
return true;
}
// 3. This step checks for the phi corresponding to an optimized load barrier expansion
if (node->is_Phi()) {
PhiNode* phi = node->as_Phi();
Node* n = phi->in(1);
if (n != NULL && (n->is_LoadBarrierSlowReg() || n->is_LoadBarrierWeakSlowReg())) {
return true;
}
}
return false;
}
void ZBarrierSetC2::register_potential_barrier_node(Node* node) const {
@ -637,7 +656,10 @@ Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak
if (barrier == transformed_barrier) {
kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)));
}
return gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
Node* result = gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop));
assert(is_gc_barrier_node(result), "sanity");
assert(step_over_gc_barrier(result) == val, "sanity");
return result;
} else {
return val;
}
@ -963,6 +985,9 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
traverse(preceding_barrier_node, result_region, result_phi, -1);
#endif
assert(is_gc_barrier_node(result_phi), "sanity");
assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
return;
}
@ -1376,6 +1401,32 @@ void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node,
}
}
Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
Node* node = c;
// 1. This step follows potential oop projections of a load barrier before expansion
if (node->is_Proj()) {
node = node->in(0);
}
// 2. This step checks for unexpanded load barriers
if (node->is_LoadBarrier()) {
return node->in(LoadBarrierNode::Oop);
}
// 3. This step checks for the phi corresponding to an optimized load barrier expansion
if (node->is_Phi()) {
PhiNode* phi = node->as_Phi();
Node* n = phi->in(1);
if (n != NULL && (n->is_LoadBarrierSlowReg() || n->is_LoadBarrierWeakSlowReg())) {
assert(c == node, "projections from step 1 should only be seen before macro expansion");
return phi->in(2);
}
}
return c;
}
// == Verification ==
#ifdef ASSERT

View file

@ -101,7 +101,9 @@ public:
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
init_class_id(Class_LoadBarrierSlowReg);
}
virtual const char * name() {
return "LoadBarrierSlowRegNode";
@ -123,7 +125,9 @@ public:
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {}
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
init_class_id(Class_LoadBarrierWeakSlowReg);
}
virtual const char * name() {
return "LoadBarrierWeakSlowRegNode";
@ -182,6 +186,7 @@ public:
bool oop_reload_allowed = true) const;
virtual void* create_barrier_state(Arena* comp_arena) const;
virtual bool has_load_barriers() const { return true; }
virtual bool is_gc_barrier_node(Node* node) const;
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful) const;
@ -190,7 +195,7 @@ public:
virtual void register_potential_barrier_node(Node* node) const;
virtual void unregister_potential_barrier_node(Node* node) const;
virtual bool array_copy_requires_gc_barriers(BasicType type) const { return true; }
virtual Node* step_over_gc_barrier(Node* c) const { return c; }
virtual Node* step_over_gc_barrier(Node* c) const;
// If the BarrierSetC2 state has kept macro nodes in its compilation unit state to be
// expanded later, then now is the time to do so.
virtual bool expand_macro_nodes(PhaseMacroExpand* macro) const;

View file

@ -1595,6 +1595,23 @@ Node* GraphKit::access_load_at(Node* obj, // containing obj
}
}
Node* GraphKit::access_load(Node* adr, // actual adress to load val at
const Type* val_type,
BasicType bt,
DecoratorSet decorators) {
if (stopped()) {
return top(); // Dead path ?
}
C2AccessValuePtr addr(adr, NULL);
C2Access access(this, decorators | C2_READ_ACCESS, bt, NULL, addr);
if (access.is_raw()) {
return _barrier_set->BarrierSetC2::load_at(access, val_type);
} else {
return _barrier_set->load_at(access, val_type);
}
}
Node* GraphKit::access_atomic_cmpxchg_val_at(Node* ctl,
Node* obj,
Node* adr,

View file

@ -582,12 +582,17 @@ class GraphKit : public Phase {
DecoratorSet decorators);
Node* access_load_at(Node* obj, // containing obj
Node* adr, // actual adress to store val at
Node* adr, // actual adress to load val at
const TypePtr* adr_type,
const Type* val_type,
BasicType bt,
DecoratorSet decorators);
Node* access_load(Node* adr, // actual adress to load val at
const Type* val_type,
BasicType bt,
DecoratorSet decorators);
Node* access_atomic_cmpxchg_val_at(Node* ctl,
Node* obj,
Node* adr,

View file

@ -3028,7 +3028,7 @@ Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
// mirror = ((OopHandle)mirror)->resolve();
return make_load(NULL, load, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
}
//-----------------------load_klass_from_mirror_common-------------------------

View file

@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/objArrayKlass.hpp"
@ -2209,6 +2211,12 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
const TypeOopPtr* toop = phase->type(adr)->isa_oopptr();
if (toop == NULL) return this;
// Step over potential GC barrier for OopHandle resolve
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
if (bs->is_gc_barrier_node(base)) {
base = bs->step_over_gc_barrier(base);
}
// We can fetch the klass directly through an AllocateNode.
// This works even if the klass is not constant (clone or newArray).
if (offset == oopDesc::klass_offset_in_bytes()) {
@ -2226,10 +2234,6 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
// mirror go completely dead. (Current exception: Class
// mirrors may appear in debug info, but we could clean them out by
// introducing a new debug info operator for Klass.java_mirror).
//
// If the code pattern requires a barrier for
// mirror = ((OopHandle)mirror)->resolve();
// this won't match.
if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass()
&& offset == java_lang_Class::klass_offset_in_bytes()) {

View file

@ -1639,14 +1639,24 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
}
// Loading the java mirror from a Klass requires two loads and the type
// of the mirror load depends on the type of 'n'. See LoadNode::Value().
// If the code pattern requires a barrier for
// mirror = ((OopHandle)mirror)->resolve();
// this won't match.
// LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
bool has_load_barriers = bs->has_load_barriers();
if (use_op == Op_LoadP && use->bottom_type()->isa_rawptr()) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
const Type* ut = u->bottom_type();
if (u->Opcode() == Op_LoadP && ut->isa_instptr()) {
if (has_load_barriers) {
// Search for load barriers behind the load
for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
Node* b = u->fast_out(i3);
if (bs->is_gc_barrier_node(b)) {
_worklist.push(b);
}
}
}
_worklist.push(u);
}
}
@ -1788,14 +1798,23 @@ void PhaseCCP::analyze() {
}
// Loading the java mirror from a Klass requires two loads and the type
// of the mirror load depends on the type of 'n'. See LoadNode::Value().
// If the code pattern requires a barrier for
// mirror = ((OopHandle)mirror)->resolve();
// this won't match.
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
bool has_load_barriers = bs->has_load_barriers();
if (m_op == Op_LoadP && m->bottom_type()->isa_rawptr()) {
for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = m->fast_out(i2);
const Type* ut = u->bottom_type();
if (u->Opcode() == Op_LoadP && ut->isa_instptr() && ut != type(u)) {
if (has_load_barriers) {
// Search for load barriers behind the load
for (DUIterator_Fast i3max, i3 = u->fast_outs(i3max); i3 < i3max; i3++) {
Node* b = u->fast_out(i3);
if (bs->is_gc_barrier_node(b)) {
_worklist.push(b);
}
}
}
worklist.push(u);
}
}

View file

@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/callnode.hpp"
@ -878,8 +880,13 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
// Return the klass node for (indirect load from OopHandle)
// LoadP(LoadP(AddP(foo:Klass, #java_mirror)))
// LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror))))
// or NULL if not matching.
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
if (bs->is_gc_barrier_node(n)) {
n = bs->step_over_gc_barrier(n);
}
if (n->Opcode() != Op_LoadP) return NULL;
const TypeInstPtr* tp = phase->type(n)->isa_instptr();