6667618: disable LoadL->ConvL2I ==> LoadI optimization

This optimization causes problems (sizes of Load and Store nodes do not match) for objects initialization code and Escape Analysis

Reviewed-by: jrose, never
This commit is contained in:
Vladimir Kozlov 2008-02-29 19:57:41 -08:00
parent 09d7f4bbcf
commit dc6ad19d70
2 changed files with 10 additions and 41 deletions

View file

@ -982,34 +982,9 @@ Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
return new (phase->C, 3) AddINode(add1,add2); return new (phase->C, 3) AddINode(add1,add2);
} }
// Fold up with a prior LoadL: LoadL->ConvL2I ==> LoadI // Disable optimization: LoadL->ConvL2I ==> LoadI.
// Requires we understand the 'endianess' of Longs. // It causes problems (sizes of Load and Store nodes do not match)
if( andl_op == Op_LoadL ) { // in objects initialization code and Escape Analysis.
Node *adr = andl->in(MemNode::Address);
// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
#ifndef VM_LITTLE_ENDIAN
// The transformation can cause problems on BIG_ENDIAN architectures
// where the jint is not the same address as the jlong. Specifically, we
// will fail to insert an anti-dependence in GCM between the LoadI and a
// subsequent StoreL because different memory offsets provoke
// flatten_alias_type() into indicating two different types. See bug
// 4755222.
// Node *base = adr->is_AddP() ? adr->in(AddPNode::Base) : adr;
// adr = phase->transform( new (phase->C, 4) AddPNode(base,adr,phase->MakeConX(sizeof(jint))));
return NULL;
#else
if (phase->C->alias_type(andl->adr_type())->is_volatile()) {
// Picking up the low half by itself bypasses the atomic load and we could
// end up with more than one non-atomic load. See bugs 4432655 and 4526490.
// We could go to the trouble of iterating over andl's output edges and
// punting only if there's more than one real use, but we don't bother.
return NULL;
}
return new (phase->C, 3) LoadINode(andl->in(MemNode::Control),andl->in(MemNode::Memory),adr,((LoadLNode*)andl)->raw_adr_type());
#endif
}
return NULL; return NULL;
} }

View file

@ -108,19 +108,13 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
// Avoid independent memory operations // Avoid independent memory operations
Node* old_mem = mem; Node* old_mem = mem;
if (mem->is_Proj() && mem->in(0)->is_Initialize()) { // The code which unhooks non-raw memories from complete (macro-expanded)
InitializeNode* init = mem->in(0)->as_Initialize(); // initializations was removed. After macro-expansion all stores catched
if (init->is_complete()) { // i.e., after macro expansion // by Initialize node became raw stores and there is no information
const TypePtr* tp = t_adr->is_ptr(); // which memory slices they modify. So it is unsafe to move any memory
uint alias_idx = phase->C->get_alias_index(tp); // operation above these stores. Also in most cases hooked non-raw memories
// Free this slice from the init. It was hooked, temporarily, // were already unhooked by using information from detect_ptr_independence()
// by GraphKit::set_output_for_allocation. // and find_previous_store().
if (alias_idx > Compile::AliasIdxRaw) {
mem = init->memory(alias_idx);
// ...but not with the raw-pointer slice.
}
}
}
if (mem->is_MergeMem()) { if (mem->is_MergeMem()) {
MergeMemNode* mmem = mem->as_MergeMem(); MergeMemNode* mmem = mem->as_MergeMem();