8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering

Add a field to C2 LoadNode and StoreNode classes which indicates whether the load/store should do an acquire/release on platforms which support it.

Reviewed-by: kvn
This commit is contained in:
Goetz Lindenmaier 2013-11-15 11:05:32 -08:00
parent d8b9e9f681
commit 13b13f5259
17 changed files with 351 additions and 255 deletions

View file

@ -907,7 +907,7 @@ bool LoadNode::is_immutable_value(Node* adr) {
//----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method:
Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) {
Compile* C = gvn.C;
// sanity check the alias category against the created node type
@ -923,34 +923,34 @@ Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const Type
rt->isa_oopptr() || is_immutable_value(adr),
"raw memory operations should have control edge");
switch (bt) {
case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() );
case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() );
case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int() );
case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int() );
case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int() );
case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long() );
case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt );
case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt );
case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr() );
case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo);
case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo);
case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo);
case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo);
case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo);
case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo);
case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo);
case T_OBJECT:
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop()));
Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo));
return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
} else
#endif
{
assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo);
}
}
ShouldNotReachHere();
return (LoadNode*)NULL;
}
LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) {
LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
bool require_atomic = true;
return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic);
return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
}
@ -2032,12 +2032,12 @@ Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* a
#ifdef _LP64
if (adr_type->is_ptr_to_narrowklass()) {
assert(UseCompressedClassPointers, "no compressed klasses");
Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
}
#endif
assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
return new (C) LoadKlassNode(ctl, mem, adr, at, tk);
return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
}
//------------------------------Value------------------------------------------
@ -2347,45 +2347,46 @@ Node* LoadRangeNode::Identity( PhaseTransform *phase ) {
//=============================================================================
//---------------------------StoreNode::make-----------------------------------
// Polymorphic factory method:
StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
assert((mo == unordered || mo == release), "unexpected");
Compile* C = gvn.C;
assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
ctl != NULL, "raw memory operations should have control edge");
assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
ctl != NULL, "raw memory operations should have control edge");
switch (bt) {
case T_BOOLEAN:
case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val);
case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val);
case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo);
case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo);
case T_CHAR:
case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val);
case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val);
case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val);
case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val);
case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo);
case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo);
case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo);
case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo);
case T_METADATA:
case T_ADDRESS:
case T_OBJECT:
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo);
} else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
(UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
adr->bottom_type()->isa_rawptr())) {
val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
}
#endif
{
return new (C) StorePNode(ctl, mem, adr, adr_type, val);
return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo);
}
}
ShouldNotReachHere();
return (StoreNode*)NULL;
}
StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) {
StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
bool require_atomic = true;
return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic);
return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
}
@ -2778,12 +2779,12 @@ Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
Node *zero = phase->makecon(TypeLong::ZERO);
Node *off = phase->MakeConX(BytesPerLong);
mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
count--;
while( count-- ) {
mem = phase->transform(mem);
adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
}
return mem;
}
@ -2827,7 +2828,7 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
mem = phase->transform(mem);
offset += BytesPerInt;
}
@ -2888,7 +2889,7 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest,
Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
adr = phase->transform(adr);
const TypePtr* atp = TypeRawPtr::BOTTOM;
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
mem = phase->transform(mem);
done_offset += BytesPerInt;
}
@ -3762,14 +3763,14 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size,
++new_long;
off[nst] = offset;
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->longcon(con), T_LONG);
phase->longcon(con), T_LONG, MemNode::unordered);
} else {
// Omit either if it is a zero.
if (con0 != 0) {
++new_int;
off[nst] = offset;
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->intcon(con0), T_INT);
phase->intcon(con0), T_INT, MemNode::unordered);
}
if (con1 != 0) {
++new_int;
@ -3777,7 +3778,7 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size,
adr = make_raw_address(offset, phase);
off[nst] = offset;
st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
phase->intcon(con1), T_INT);
phase->intcon(con1), T_INT, MemNode::unordered);
}
}