mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-20 11:04:34 +02:00
Merge
This commit is contained in:
commit
c78419253b
6 changed files with 131 additions and 23 deletions
|
@ -1138,8 +1138,10 @@ IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// We want to sometimes use logical operations on pointers, in particular in GC barriers.
|
||||||
} else if (opr_type != T_LONG) {
|
// Since 64bit logical operations do not current support operands on stack, we have to make sure
|
||||||
|
// T_OBJECT doesn't get spilled along with T_LONG.
|
||||||
|
} else if (opr_type != T_LONG LP64_ONLY(&& opr_type != T_OBJECT)) {
|
||||||
// integer instruction (note: long operands must always be in register)
|
// integer instruction (note: long operands must always be in register)
|
||||||
switch (op->code()) {
|
switch (op->code()) {
|
||||||
case lir_cmp:
|
case lir_cmp:
|
||||||
|
|
|
@ -848,6 +848,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
NOT_PRODUCT( verify_barriers(); )
|
||||||
// Now that we know the size of all the monitors we can add a fixed slot
|
// Now that we know the size of all the monitors we can add a fixed slot
|
||||||
// for the original deopt pc.
|
// for the original deopt pc.
|
||||||
|
|
||||||
|
@ -3373,6 +3374,72 @@ void Compile::verify_graph_edges(bool no_dead_code) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify GC barriers consistency
|
||||||
|
// Currently supported:
|
||||||
|
// - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
|
||||||
|
void Compile::verify_barriers() {
|
||||||
|
if (UseG1GC) {
|
||||||
|
// Verify G1 pre-barriers
|
||||||
|
const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
|
||||||
|
|
||||||
|
ResourceArea *area = Thread::current()->resource_area();
|
||||||
|
Unique_Node_List visited(area);
|
||||||
|
Node_List worklist(area);
|
||||||
|
// We're going to walk control flow backwards starting from the Root
|
||||||
|
worklist.push(_root);
|
||||||
|
while (worklist.size() > 0) {
|
||||||
|
Node* x = worklist.pop();
|
||||||
|
if (x == NULL || x == top()) continue;
|
||||||
|
if (visited.member(x)) {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
visited.push(x);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (x->is_Region()) {
|
||||||
|
for (uint i = 1; i < x->req(); i++) {
|
||||||
|
worklist.push(x->in(i));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
worklist.push(x->in(0));
|
||||||
|
// We are looking for the pattern:
|
||||||
|
// /->ThreadLocal
|
||||||
|
// If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
|
||||||
|
// \->ConI(0)
|
||||||
|
// We want to verify that the If and the LoadB have the same control
|
||||||
|
// See GraphKit::g1_write_barrier_pre()
|
||||||
|
if (x->is_If()) {
|
||||||
|
IfNode *iff = x->as_If();
|
||||||
|
if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
|
||||||
|
CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
|
||||||
|
if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
|
||||||
|
&& cmp->in(1)->is_Load()) {
|
||||||
|
LoadNode* load = cmp->in(1)->as_Load();
|
||||||
|
if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
|
||||||
|
&& load->in(2)->in(3)->is_Con()
|
||||||
|
&& load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
|
||||||
|
|
||||||
|
Node* if_ctrl = iff->in(0);
|
||||||
|
Node* load_ctrl = load->in(0);
|
||||||
|
|
||||||
|
if (if_ctrl != load_ctrl) {
|
||||||
|
// Skip possible CProj->NeverBranch in infinite loops
|
||||||
|
if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
|
||||||
|
&& (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
|
||||||
|
if_ctrl = if_ctrl->in(0)->in(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// The Compile object keeps track of failure reasons separately from the ciEnv.
|
// The Compile object keeps track of failure reasons separately from the ciEnv.
|
||||||
|
|
|
@ -1148,6 +1148,9 @@ class Compile : public Phase {
|
||||||
// graph is strongly connected from root in both directions.
|
// graph is strongly connected from root in both directions.
|
||||||
void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
|
void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
|
||||||
|
|
||||||
|
// Verify GC barrier patterns
|
||||||
|
void verify_barriers() PRODUCT_RETURN;
|
||||||
|
|
||||||
// End-of-run dumps.
|
// End-of-run dumps.
|
||||||
static void print_statistics() PRODUCT_RETURN;
|
static void print_statistics() PRODUCT_RETURN;
|
||||||
|
|
||||||
|
|
|
@ -1964,7 +1964,7 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
|
||||||
// Find loads off the surviving projection; remove their control edge
|
// Find loads off the surviving projection; remove their control edge
|
||||||
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
|
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
|
||||||
Node* cd = dp->fast_out(i); // Control-dependent node
|
Node* cd = dp->fast_out(i); // Control-dependent node
|
||||||
if( cd->is_Load() ) { // Loads can now float around in the loop
|
if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop
|
||||||
// Allow the load to float around in the loop, or before it
|
// Allow the load to float around in the loop, or before it
|
||||||
// but NOT before the pre-loop.
|
// but NOT before the pre-loop.
|
||||||
_igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
|
_igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
|
||||||
|
|
|
@ -204,6 +204,17 @@ public:
|
||||||
protected:
|
protected:
|
||||||
const Type* load_array_final_field(const TypeKlassPtr *tkls,
|
const Type* load_array_final_field(const TypeKlassPtr *tkls,
|
||||||
ciKlass* klass) const;
|
ciKlass* klass) const;
|
||||||
|
// depends_only_on_test is almost always true, and needs to be almost always
|
||||||
|
// true to enable key hoisting & commoning optimizations. However, for the
|
||||||
|
// special case of RawPtr loads from TLS top & end, and other loads performed by
|
||||||
|
// GC barriers, the control edge carries the dependence preventing hoisting past
|
||||||
|
// a Safepoint instead of the memory edge. (An unfortunate consequence of having
|
||||||
|
// Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
|
||||||
|
// which produce results (new raw memory state) inside of loops preventing all
|
||||||
|
// manner of other optimizations). Basically, it's ugly but so is the alternative.
|
||||||
|
// See comment in macro.cpp, around line 125 expand_allocate_common().
|
||||||
|
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------LoadBNode--------------------------------------
|
//------------------------------LoadBNode--------------------------------------
|
||||||
|
@ -370,16 +381,6 @@ public:
|
||||||
virtual uint ideal_reg() const { return Op_RegP; }
|
virtual uint ideal_reg() const { return Op_RegP; }
|
||||||
virtual int store_Opcode() const { return Op_StoreP; }
|
virtual int store_Opcode() const { return Op_StoreP; }
|
||||||
virtual BasicType memory_type() const { return T_ADDRESS; }
|
virtual BasicType memory_type() const { return T_ADDRESS; }
|
||||||
// depends_only_on_test is almost always true, and needs to be almost always
|
|
||||||
// true to enable key hoisting & commoning optimizations. However, for the
|
|
||||||
// special case of RawPtr loads from TLS top & end, the control edge carries
|
|
||||||
// the dependence preventing hoisting past a Safepoint instead of the memory
|
|
||||||
// edge. (An unfortunate consequence of having Safepoints not set Raw
|
|
||||||
// Memory; itself an unfortunate consequence of having Nodes which produce
|
|
||||||
// results (new raw memory state) inside of loops preventing all manner of
|
|
||||||
// other optimizations). Basically, it's ugly but so is the alternative.
|
|
||||||
// See comment in macro.cpp, around line 125 expand_allocate_common().
|
|
||||||
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -393,16 +394,6 @@ public:
|
||||||
virtual uint ideal_reg() const { return Op_RegN; }
|
virtual uint ideal_reg() const { return Op_RegN; }
|
||||||
virtual int store_Opcode() const { return Op_StoreN; }
|
virtual int store_Opcode() const { return Op_StoreN; }
|
||||||
virtual BasicType memory_type() const { return T_NARROWOOP; }
|
virtual BasicType memory_type() const { return T_NARROWOOP; }
|
||||||
// depends_only_on_test is almost always true, and needs to be almost always
|
|
||||||
// true to enable key hoisting & commoning optimizations. However, for the
|
|
||||||
// special case of RawPtr loads from TLS top & end, the control edge carries
|
|
||||||
// the dependence preventing hoisting past a Safepoint instead of the memory
|
|
||||||
// edge. (An unfortunate consequence of having Safepoints not set Raw
|
|
||||||
// Memory; itself an unfortunate consequence of having Nodes which produce
|
|
||||||
// results (new raw memory state) inside of loops preventing all manner of
|
|
||||||
// other optimizations). Basically, it's ugly but so is the alternative.
|
|
||||||
// See comment in macro.cpp, around line 125 expand_allocate_common().
|
|
||||||
virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------LoadKlassNode----------------------------------
|
//------------------------------LoadKlassNode----------------------------------
|
||||||
|
|
45
hotspot/test/compiler/regalloc/C1ObjectSpillInLogicOp.java
Normal file
45
hotspot/test/compiler/regalloc/C1ObjectSpillInLogicOp.java
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test
|
||||||
|
* @bug 8027751
|
||||||
|
* @summary C1 crashes generating G1 post-barrier in Unsafe.getAndSetObject() intrinsic because of the new value spill
|
||||||
|
* @run main/othervm -XX:+UseG1GC C1ObjectSpillInLogicOp
|
||||||
|
*
|
||||||
|
* G1 barriers use logical operators (xor) on T_OBJECT mixed with T_LONG or T_INT.
|
||||||
|
* The current implementation of logical operations on x86 in C1 doesn't allow for long operands to be on stack.
|
||||||
|
* There is a special code in the register allocator that forces long arguments in registers on x86. However T_OBJECT
|
||||||
|
* can be spilled just fine, and in that case the xor emission will fail.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.util.concurrent.atomic.*;
|
||||||
|
class C1ObjectSpillInLogicOp {
|
||||||
|
static public void main(String[] args) {
|
||||||
|
AtomicReferenceArray<Integer> x = new AtomicReferenceArray(128);
|
||||||
|
Integer y = new Integer(0);
|
||||||
|
for (int i = 0; i < 50000; i++) {
|
||||||
|
x.getAndSet(i % x.length(), y);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue