This commit is contained in:
Nils Eliasson 2015-11-13 13:31:48 +01:00
commit 2f9e32256d
181 changed files with 8026 additions and 4055 deletions

View file

@ -1079,10 +1079,10 @@ source %{
// and for a volatile write we need // and for a volatile write we need
// //
// stlr<x> // stlr<x>
// //
// Alternatively, we can implement them by pairing a normal // Alternatively, we can implement them by pairing a normal
// load/store with a memory barrier. For a volatile read we need // load/store with a memory barrier. For a volatile read we need
// //
// ldr<x> // ldr<x>
// dmb ishld // dmb ishld
// //
@ -1240,7 +1240,7 @@ source %{
// Alternatively, we can elide generation of the dmb instructions // Alternatively, we can elide generation of the dmb instructions
// and plant the alternative CompareAndSwap macro-instruction // and plant the alternative CompareAndSwap macro-instruction
// sequence (which uses ldaxr<x>). // sequence (which uses ldaxr<x>).
// //
// Of course, the above only applies when we see these signature // Of course, the above only applies when we see these signature
// configurations. We still want to plant dmb instructions in any // configurations. We still want to plant dmb instructions in any
// other cases where we may see a MemBarAcquire, MemBarRelease or // other cases where we may see a MemBarAcquire, MemBarRelease or
@ -1367,7 +1367,7 @@ source %{
opcode = parent->Opcode(); opcode = parent->Opcode();
return opcode == Op_MemBarRelease; return opcode == Op_MemBarRelease;
} }
// 2) card mark detection helper // 2) card mark detection helper
// helper predicate which can be used to detect a volatile membar // helper predicate which can be used to detect a volatile membar
@ -1383,7 +1383,7 @@ source %{
// true // true
// //
// iii) the node's Mem projection feeds a StoreCM node. // iii) the node's Mem projection feeds a StoreCM node.
bool is_card_mark_membar(const MemBarNode *barrier) bool is_card_mark_membar(const MemBarNode *barrier)
{ {
if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) { if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
@ -1402,7 +1402,7 @@ source %{
return true; return true;
} }
} }
return false; return false;
} }
@ -1430,7 +1430,7 @@ source %{
// where // where
// || and \\ represent Ctl and Mem feeds via Proj nodes // || and \\ represent Ctl and Mem feeds via Proj nodes
// | \ and / indicate further routing of the Ctl and Mem feeds // | \ and / indicate further routing of the Ctl and Mem feeds
// //
// this is the graph we see for non-object stores. however, for a // this is the graph we see for non-object stores. however, for a
// volatile Object store (StoreN/P) we may see other nodes below the // volatile Object store (StoreN/P) we may see other nodes below the
// leading membar because of the need for a GC pre- or post-write // leading membar because of the need for a GC pre- or post-write
@ -1592,7 +1592,7 @@ source %{
// ordering but neither will a releasing store (stlr). The latter // ordering but neither will a releasing store (stlr). The latter
// guarantees that the object put is visible but does not guarantee // guarantees that the object put is visible but does not guarantee
// that writes by other threads have also been observed. // that writes by other threads have also been observed.
// //
// So, returning to the task of translating the object put and the // So, returning to the task of translating the object put and the
// leading/trailing membar nodes: what do the non-normal node graph // leading/trailing membar nodes: what do the non-normal node graph
// look like for these 2 special cases? and how can we determine the // look like for these 2 special cases? and how can we determine the
@ -1731,7 +1731,7 @@ source %{
// | | | | // | | | |
// C | M | M | M | // C | M | M | M |
// \ | | / // \ | | /
// . . . // . . .
// (post write subtree elided) // (post write subtree elided)
// . . . // . . .
// C \ M / // C \ M /
@ -1812,12 +1812,12 @@ source %{
// | | | / / // | | | / /
// | Region . . . Phi[M] _____/ // | Region . . . Phi[M] _____/
// | / | / // | / | /
// | | / // | | /
// | . . . . . . | / // | . . . . . . | /
// | / | / // | / | /
// Region | | Phi[M] // Region | | Phi[M]
// | | | / Bot // | | | / Bot
// \ MergeMem // \ MergeMem
// \ / // \ /
// MemBarVolatile // MemBarVolatile
// //
@ -1858,7 +1858,7 @@ source %{
// to a trailing barrier via a MergeMem. That feed is either direct // to a trailing barrier via a MergeMem. That feed is either direct
// (for CMS) or via 2 or 3 Phi nodes merging the leading barrier // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier
// memory flow (for G1). // memory flow (for G1).
// //
// The predicates controlling generation of instructions for store // The predicates controlling generation of instructions for store
// and barrier nodes employ a few simple helper functions (described // and barrier nodes employ a few simple helper functions (described
// below) which identify the presence or absence of all these // below) which identify the presence or absence of all these
@ -2112,8 +2112,8 @@ source %{
x = x->in(MemNode::Memory); x = x->in(MemNode::Memory);
} else { } else {
// the merge should get its Bottom mem feed from the leading membar // the merge should get its Bottom mem feed from the leading membar
x = mm->in(Compile::AliasIdxBot); x = mm->in(Compile::AliasIdxBot);
} }
// ensure this is a non control projection // ensure this is a non control projection
if (!x->is_Proj() || x->is_CFG()) { if (!x->is_Proj() || x->is_CFG()) {
@ -2190,12 +2190,12 @@ source %{
// . . . // . . .
// | // |
// MemBarVolatile (card mark) // MemBarVolatile (card mark)
// | | // | |
// | StoreCM // | StoreCM
// | | // | |
// | . . . // | . . .
// Bot | / // Bot | /
// MergeMem // MergeMem
// | // |
// | // |
// MemBarVolatile {trailing} // MemBarVolatile {trailing}
@ -2203,10 +2203,10 @@ source %{
// 2) // 2)
// MemBarRelease/CPUOrder (leading) // MemBarRelease/CPUOrder (leading)
// | // |
// | // |
// |\ . . . // |\ . . .
// | \ | // | \ |
// | \ MemBarVolatile (card mark) // | \ MemBarVolatile (card mark)
// | \ | | // | \ | |
// \ \ | StoreCM . . . // \ \ | StoreCM . . .
// \ \ | // \ \ |
@ -2231,7 +2231,7 @@ source %{
// | \ \ | StoreCM . . . // | \ \ | StoreCM . . .
// | \ \ | // | \ \ |
// \ \ Phi // \ \ Phi
// \ \ / // \ \ /
// \ Phi // \ Phi
// \ / // \ /
// Phi . . . // Phi . . .
@ -2506,7 +2506,7 @@ bool unnecessary_acquire(const Node *barrier)
return (x->is_Load() && x->as_Load()->is_acquire()); return (x->is_Load() && x->as_Load()->is_acquire());
} }
// now check for an unsafe volatile get // now check for an unsafe volatile get
// need to check for // need to check for
@ -2644,7 +2644,7 @@ bool needs_acquiring_load(const Node *n)
} }
membar = child_membar(membar); membar = child_membar(membar);
if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) { if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) {
return false; return false;
} }
@ -2703,7 +2703,7 @@ bool unnecessary_volatile(const Node *n)
// first we check if this is part of a card mark. if so then we have // first we check if this is part of a card mark. if so then we have
// to generate a StoreLoad barrier // to generate a StoreLoad barrier
if (is_card_mark_membar(mbvol)) { if (is_card_mark_membar(mbvol)) {
return false; return false;
} }
@ -2769,7 +2769,7 @@ bool needs_releasing_store(const Node *n)
if (!is_card_mark_membar(mbvol)) { if (!is_card_mark_membar(mbvol)) {
return true; return true;
} }
// we found a card mark -- just make sure we have a trailing barrier // we found a card mark -- just make sure we have a trailing barrier
return (card_mark_to_trailing(mbvol) != NULL); return (card_mark_to_trailing(mbvol) != NULL);
@ -2808,7 +2808,7 @@ bool needs_acquiring_load_exclusive(const Node *n)
assert(barrier->Opcode() == Op_MemBarCPUOrder, assert(barrier->Opcode() == Op_MemBarCPUOrder,
"CAS not fed by cpuorder membar!"); "CAS not fed by cpuorder membar!");
MemBarNode *b = parent_membar(barrier); MemBarNode *b = parent_membar(barrier);
assert ((b != NULL && b->Opcode() == Op_MemBarRelease), assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
"CAS not fed by cpuorder+release membar pair!"); "CAS not fed by cpuorder+release membar pair!");
@ -3463,6 +3463,17 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported. return true; // Per default match rules are supported.
} }
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
// TODO
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
bool ret_value = match_rule_supported(opcode);
// Add rules here.
return ret_value; // Per default match rules are supported.
}
const int Matcher::float_pressure(int default_pressure_threshold) { const int Matcher::float_pressure(int default_pressure_threshold) {
return default_pressure_threshold; return default_pressure_threshold;
} }
@ -4663,7 +4674,7 @@ encode %{
call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf); call = __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
} }
if (call == NULL) { if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
@ -4671,7 +4682,7 @@ encode %{
// Emit stub for static call // Emit stub for static call
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) { if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
} }
@ -4681,7 +4692,7 @@ encode %{
MacroAssembler _masm(&cbuf); MacroAssembler _masm(&cbuf);
address call = __ ic_call((address)$meth$$method); address call = __ ic_call((address)$meth$$method);
if (call == NULL) { if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
%} %}
@ -4706,7 +4717,7 @@ encode %{
if (cb) { if (cb) {
address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type)); address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
if (call == NULL) { if (call == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
} else { } else {

View file

@ -73,6 +73,7 @@ define_pd_global(bool, UseCISCSpill, true);
define_pd_global(bool, OptoScheduling, false); define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false); define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false); define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
define_pd_global(intx, ReservedCodeCacheSize, 48*M); define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M); define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);

View file

@ -29,16 +29,16 @@
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp" #include "vmreg_aarch64.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) { jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
Unimplemented(); Unimplemented();
return 0; return 0;
} }
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented(); Unimplemented();
} }
@ -46,20 +46,20 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) { void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) { void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_relocate_poll(address pc, jint mark) { void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
Unimplemented(); Unimplemented();
} }
// convert JVMCI register indices (as used in oop maps) to HotSpot registers // convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) { VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
return NULL; return NULL;
} }

View file

@ -2384,6 +2384,7 @@ void SharedRuntime::generate_deopt_blob() {
} }
#endif // ASSERT #endif // ASSERT
__ mov(c_rarg0, rthread); __ mov(c_rarg0, rthread);
__ mov(c_rarg1, rcpool);
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); __ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
__ blrt(rscratch1, 1, 0, 1); __ blrt(rscratch1, 1, 0, 1);
__ bind(retaddr); __ bind(retaddr);
@ -2397,6 +2398,7 @@ void SharedRuntime::generate_deopt_blob() {
// Load UnrollBlock* into rdi // Load UnrollBlock* into rdi
__ mov(r5, r0); __ mov(r5, r0);
__ ldrw(rcpool, Address(r5, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
Label noException; Label noException;
__ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending? __ cmpw(rcpool, Deoptimization::Unpack_exception); // Was exception pending?
__ br(Assembler::NE, noException); __ br(Assembler::NE, noException);
@ -2609,6 +2611,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// n.b. 2 gp args, 0 fp args, integral return type // n.b. 2 gp args, 0 fp args, integral return type
__ mov(c_rarg0, rthread); __ mov(c_rarg0, rthread);
__ movw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ lea(rscratch1, __ lea(rscratch1,
RuntimeAddress(CAST_FROM_FN_PTR(address, RuntimeAddress(CAST_FROM_FN_PTR(address,
Deoptimization::uncommon_trap))); Deoptimization::uncommon_trap)));
@ -2628,6 +2631,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// move UnrollBlock* into r4 // move UnrollBlock* into r4
__ mov(r4, r0); __ mov(r4, r0);
#ifdef ASSERT
{ Label L;
__ ldrw(rscratch1, Address(r4, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
__ cmpw(rscratch1, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ br(Assembler::EQ, L);
__ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
__ bind(L);
}
#endif
// Pop all the frames we must move/replace. // Pop all the frames we must move/replace.
// //
// Frame picture (youngest to oldest) // Frame picture (youngest to oldest)

View file

@ -61,6 +61,7 @@ define_pd_global(bool, OptoPeephole, false);
define_pd_global(bool, UseCISCSpill, false); define_pd_global(bool, UseCISCSpill, false);
define_pd_global(bool, OptoBundling, false); define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false); define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
// GL: // GL:
// Detected a problem with unscaled compressed oops and // Detected a problem with unscaled compressed oops and
// narrow_oop_use_complex_address() == false. // narrow_oop_use_complex_address() == false.

View file

@ -2697,7 +2697,7 @@ address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
// Provide a debugger breakpoint in the frame manager if breakpoints // Provide a debugger breakpoint in the frame manager if breakpoints
// in osr'd methods are requested. // in osr'd methods are requested.
#ifdef COMPILER2 #ifdef COMPILER2
NOT_PRODUCT( if (OptoBreakpointOSR) { __ illtrap(); } ) if (OptoBreakpointOSR) { __ illtrap(); }
#endif #endif
// Load callee's pointer to locals array from callee's state. // Load callee's pointer to locals array from callee's state.

View file

@ -29,16 +29,16 @@
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "vmreg_ppc.inline.hpp" #include "vmreg_ppc.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) { jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
Unimplemented(); Unimplemented();
return 0; return 0;
} }
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
Unimplemented(); Unimplemented();
} }
@ -46,20 +46,20 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) { void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) { void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
Unimplemented(); Unimplemented();
} }
void CodeInstaller::pd_relocate_poll(address pc, jint mark) { void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
Unimplemented(); Unimplemented();
} }
// convert JVMCI register indices (as used in oop maps) to HotSpot registers // convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) { VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
return NULL; return NULL;
} }

View file

@ -2064,6 +2064,17 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported. return true; // Per default match rules are supported.
} }
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
// TODO
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
bool ret_value = match_rule_supported(opcode);
// Add rules here.
return ret_value; // Per default match rules are supported.
}
const int Matcher::float_pressure(int default_pressure_threshold) { const int Matcher::float_pressure(int default_pressure_threshold) {
return default_pressure_threshold; return default_pressure_threshold;
} }
@ -3416,7 +3427,7 @@ encode %{
// The stub for call to interpreter. // The stub for call to interpreter.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) { if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
} }
@ -3465,7 +3476,7 @@ encode %{
// The stub for call to interpreter. // The stub for call to interpreter.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
if (stub == NULL) { if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
@ -6911,7 +6922,7 @@ instruct decodeN_Disjoint_isel_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
n_compare->_opnds[0] = op_crx; n_compare->_opnds[0] = op_crx;
n_compare->_opnds[1] = op_src; n_compare->_opnds[1] = op_src;
n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR); n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode(); decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
n2->add_req(n_region, n_src, n1); n2->add_req(n_region, n_src, n1);
n2->_opnds[0] = op_dst; n2->_opnds[0] = op_dst;
@ -10588,7 +10599,7 @@ instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{
instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{ instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{
// Needs matchrule, see cmpDUnordered. // Needs matchrule, see cmpDUnordered.
match(Set crx (CmpF src1 src2)); match(Set crx (CmpF src1 src2));
// no match-rule, false predicate // no match-rule, false predicate
predicate(false); predicate(false);
@ -10697,13 +10708,13 @@ instruct cmpF3_reg_reg_ExEx(iRegIdst dst, regF src1, regF src2) %{
%} %}
instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{ instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{
// Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the // Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the
// node right before the conditional move using it. // node right before the conditional move using it.
// In jck test api/java_awt/geom/QuadCurve2DFloat/index.html#SetCurveTesttestCase7, // In jck test api/java_awt/geom/QuadCurve2DFloat/index.html#SetCurveTesttestCase7,
// compilation of java.awt.geom.RectangularShape::getBounds()Ljava/awt/Rectangle // compilation of java.awt.geom.RectangularShape::getBounds()Ljava/awt/Rectangle
// crashed in register allocation where the flags Reg between cmpDUnoredered and a // crashed in register allocation where the flags Reg between cmpDUnoredered and a
// conditional move was supposed to be spilled. // conditional move was supposed to be spilled.
match(Set crx (CmpD src1 src2)); match(Set crx (CmpD src1 src2));
// False predicate, shall not be matched. // False predicate, shall not be matched.
predicate(false); predicate(false);

View file

@ -2802,7 +2802,7 @@ void SharedRuntime::generate_deopt_blob() {
__ set_last_Java_frame(R1_SP, noreg); __ set_last_Java_frame(R1_SP, noreg);
// With EscapeAnalysis turned on, this call may safepoint! // With EscapeAnalysis turned on, this call may safepoint!
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread); __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread, exec_mode_reg);
address calls_return_pc = __ last_calls_return_pc(); address calls_return_pc = __ last_calls_return_pc();
// Set an oopmap for the call site that describes all our saved registers. // Set an oopmap for the call site that describes all our saved registers.
oop_maps->add_gc_map(calls_return_pc - start, map); oop_maps->add_gc_map(calls_return_pc - start, map);
@ -2815,6 +2815,8 @@ void SharedRuntime::generate_deopt_blob() {
// by save_volatile_registers(...). // by save_volatile_registers(...).
RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes); RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
// reload the exec mode from the UnrollBlock (it might have changed)
__ lwz(exec_mode_reg, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg);
// In excp_deopt_mode, restore and clear exception oop which we // In excp_deopt_mode, restore and clear exception oop which we
// stored in the thread during exception entry above. The exception // stored in the thread during exception entry above. The exception
// oop will be the return value of this stub. // oop will be the return value of this stub.
@ -2945,8 +2947,9 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1); __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
__ mr(klass_index_reg, R3); __ mr(klass_index_reg, R3);
__ li(R5, Deoptimization::Unpack_exception);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
R16_thread, klass_index_reg); R16_thread, klass_index_reg, R5);
// Set an oopmap for the call site. // Set an oopmap for the call site.
oop_maps->add_gc_map(gc_map_pc - start, map); oop_maps->add_gc_map(gc_map_pc - start, map);
@ -2966,6 +2969,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// stack: (caller_of_deoptee, ...). // stack: (caller_of_deoptee, ...).
#ifdef ASSERT
__ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg);
__ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
__ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0);
#endif
// Allocate new interpreter frame(s) and possibly a c2i adapter // Allocate new interpreter frame(s) and possibly a c2i adapter
// frame. // frame.
push_skeleton_frames(masm, false/*deopt*/, push_skeleton_frames(masm, false/*deopt*/,

View file

@ -65,6 +65,7 @@ define_pd_global(bool, UseCISCSpill, false);
define_pd_global(bool, OptoBundling, false); define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoScheduling, true); define_pd_global(bool, OptoScheduling, true);
define_pd_global(bool, OptoRegScheduling, false); define_pd_global(bool, OptoRegScheduling, false);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, false);
#ifdef _LP64 #ifdef _LP64
// We need to make sure that all generated code is within // We need to make sure that all generated code is within

View file

@ -29,7 +29,7 @@
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
#include "vmreg_sparc.inline.hpp" #include "vmreg_sparc.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) { jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
if (inst->is_call() || inst->is_jump()) { if (inst->is_call() || inst->is_jump()) {
return pc_offset + NativeCall::instruction_size; return pc_offset + NativeCall::instruction_size;
} else if (inst->is_call_reg()) { } else if (inst->is_call_reg()) {
@ -37,12 +37,12 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop
} else if (inst->is_sethi()) { } else if (inst->is_sethi()) {
return pc_offset + NativeFarCall::instruction_size; return pc_offset + NativeFarCall::instruction_size;
} else { } else {
fatal("unsupported type of instruction for call site"); JVMCI_ERROR_0("unsupported type of instruction for call site");
return 0; return 0;
} }
} }
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant); Handle obj = HotSpotObjectConstantImpl::object(constant);
jobject value = JNIHandles::make_local(obj()); jobject value = JNIHandles::make_local(obj());
@ -52,7 +52,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
RelocationHolder rspec = oop_Relocation::spec(oop_index); RelocationHolder rspec = oop_Relocation::spec(oop_index);
_instructions->relocate(pc, rspec, 1); _instructions->relocate(pc, rspec, 1);
#else #else
fatal("compressed oop on 32bit"); JVMCI_ERROR("compressed oop on 32bit");
#endif #endif
} else { } else {
NativeMovConstReg* move = nativeMovConstReg_at(pc); NativeMovConstReg* move = nativeMovConstReg_at(pc);
@ -66,20 +66,20 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
} }
} }
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) { if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64 #ifdef _LP64
NativeMovConstReg32* move = nativeMovConstReg32_at(pc); NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
narrowKlass narrowOop = record_narrow_metadata_reference(constant); narrowKlass narrowOop = record_narrow_metadata_reference(constant, CHECK);
move->set_data((intptr_t)narrowOop); move->set_data((intptr_t)narrowOop);
TRACE_jvmci_3("relocating (narrow metaspace constant) at %p/%p", pc, narrowOop); TRACE_jvmci_3("relocating (narrow metaspace constant) at %p/%p", pc, narrowOop);
#else #else
fatal("compressed Klass* on 32bit"); JVMCI_ERROR("compressed Klass* on 32bit");
#endif #endif
} else { } else {
NativeMovConstReg* move = nativeMovConstReg_at(pc); NativeMovConstReg* move = nativeMovConstReg_at(pc);
Metadata* reference = record_metadata_reference(constant); Metadata* reference = record_metadata_reference(constant, CHECK);
move->set_data((intptr_t)reference); move->set_data((intptr_t)reference);
TRACE_jvmci_3("relocating (metaspace constant) at %p/%p", pc, reference); TRACE_jvmci_3("relocating (metaspace constant) at %p/%p", pc, reference);
} }
@ -106,7 +106,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
} }
} }
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) { void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
address pc = (address) inst; address pc = (address) inst;
if (inst->is_call()) { if (inst->is_call()) {
NativeCall* call = nativeCall_at(pc); NativeCall* call = nativeCall_at(pc);
@ -117,17 +117,17 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
jump->set_jump_destination((address) foreign_call_destination); jump->set_jump_destination((address) foreign_call_destination);
_instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec()); _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
} else { } else {
fatal(err_msg("unknown call or jump instruction at " PTR_FORMAT, p2i(pc))); JVMCI_ERROR("unknown call or jump instruction at " PTR_FORMAT, p2i(pc));
} }
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst)); TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
} }
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) { void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT #ifdef ASSERT
Method* method = NULL; Method* method = NULL;
// we need to check, this might also be an unresolved method // we need to check, this might also be an unresolved method
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) { if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
method = getMethodFromHotSpotMethod(hotspot_method); method = getMethodFromHotSpotMethod(hotspot_method());
} }
#endif #endif
switch (_next_call_type) { switch (_next_call_type) {
@ -156,33 +156,33 @@ void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
break; break;
} }
default: default:
fatal("invalid _next_call_type value"); JVMCI_ERROR("invalid _next_call_type value");
break; break;
} }
} }
void CodeInstaller::pd_relocate_poll(address pc, jint mark) { void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
switch (mark) { switch (mark) {
case POLL_NEAR: case POLL_NEAR:
fatal("unimplemented"); JVMCI_ERROR("unimplemented");
break; break;
case POLL_FAR: case POLL_FAR:
_instructions->relocate(pc, relocInfo::poll_type); _instructions->relocate(pc, relocInfo::poll_type);
break; break;
case POLL_RETURN_NEAR: case POLL_RETURN_NEAR:
fatal("unimplemented"); JVMCI_ERROR("unimplemented");
break; break;
case POLL_RETURN_FAR: case POLL_RETURN_FAR:
_instructions->relocate(pc, relocInfo::poll_return_type); _instructions->relocate(pc, relocInfo::poll_return_type);
break; break;
default: default:
fatal("invalid mark value"); JVMCI_ERROR("invalid mark value");
break; break;
} }
} }
// convert JVMCI register indices (as used in oop maps) to HotSpot registers // convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) { VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
// JVMCI Registers are numbered as follows: // JVMCI Registers are numbered as follows:
// 0..31: Thirty-two General Purpose registers (CPU Registers) // 0..31: Thirty-two General Purpose registers (CPU Registers)
// 32..63: Thirty-two single precision float registers // 32..63: Thirty-two single precision float registers
@ -199,7 +199,7 @@ VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
} else if(jvmci_reg < 112) { } else if(jvmci_reg < 112) {
floatRegisterNumber = 4 * (jvmci_reg - 96); floatRegisterNumber = 4 * (jvmci_reg - 96);
} else { } else {
fatal("Unknown jvmci register"); JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
} }
return as_FloatRegister(floatRegisterNumber)->as_VMReg(); return as_FloatRegister(floatRegisterNumber)->as_VMReg();
} }

View file

@ -3036,6 +3036,7 @@ void SharedRuntime::generate_deopt_blob() {
__ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode); __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode);
__ mov(G2_thread, O0); __ mov(G2_thread, O0);
__ mov(L0deopt_mode, O2);
__ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)); __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
__ delayed()->nop(); __ delayed()->nop();
oop_maps->add_gc_map( __ offset()-start, map->deep_copy()); oop_maps->add_gc_map( __ offset()-start, map->deep_copy());
@ -3121,6 +3122,7 @@ void SharedRuntime::generate_deopt_blob() {
// do the call by hand so we can get the oopmap // do the call by hand so we can get the oopmap
__ mov(G2_thread, L7_thread_cache); __ mov(G2_thread, L7_thread_cache);
__ mov(L0deopt_mode, O1);
__ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
__ delayed()->mov(G2_thread, O0); __ delayed()->mov(G2_thread, O0);
@ -3146,6 +3148,7 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::restore_result_registers(masm); RegisterSaver::restore_result_registers(masm);
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode);
Label noException; Label noException;
__ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException); __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
@ -3269,7 +3272,8 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ save_frame(0); __ save_frame(0);
__ set_last_Java_frame(SP, noreg); __ set_last_Java_frame(SP, noreg);
__ mov(I0, O2klass_index); __ mov(I0, O2klass_index);
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index); __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode
__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3);
__ reset_last_Java_frame(); __ reset_last_Java_frame();
__ mov(O0, O2UnrollBlock->after_save()); __ mov(O0, O2UnrollBlock->after_save());
__ restore(); __ restore();
@ -3278,6 +3282,15 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ mov(O2UnrollBlock, O2UnrollBlock->after_save()); __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
__ restore(); __ restore();
#ifdef ASSERT
{ Label L;
__ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1);
__ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L);
__ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
__ bind(L);
}
#endif
// Allocate new interpreter frame(s) and possible c2i adapter frame // Allocate new interpreter frame(s) and possible c2i adapter frame
make_new_frames(masm, false); make_new_frames(masm, false);

View file

@ -1860,6 +1860,17 @@ const bool Matcher::match_rule_supported(int opcode) {
return true; // Per default match rules are supported. return true; // Per default match rules are supported.
} }
const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
// TODO
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
bool ret_value = match_rule_supported(opcode);
// Add rules here.
return ret_value; // Per default match rules are supported.
}
const int Matcher::float_pressure(int default_pressure_threshold) { const int Matcher::float_pressure(int default_pressure_threshold) {
return default_pressure_threshold; return default_pressure_threshold;
} }
@ -1905,7 +1916,7 @@ const bool Matcher::misaligned_vectors_ok() {
} }
// Current (2013) SPARC platforms need to read original key // Current (2013) SPARC platforms need to read original key
// to construct decryption expanded key // to construct decryption expanded key
const bool Matcher::pass_original_key_for_aes() { const bool Matcher::pass_original_key_for_aes() {
return true; return true;
} }
@ -2612,7 +2623,7 @@ encode %{
if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) { if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
} }
%} %}
@ -3132,10 +3143,10 @@ ins_attrib ins_size(32); // Required size attribute (in bits)
// AVOID_NONE - instruction can be placed anywhere // AVOID_NONE - instruction can be placed anywhere
// AVOID_BEFORE - instruction cannot be placed after an // AVOID_BEFORE - instruction cannot be placed after an
// instruction with MachNode::AVOID_AFTER // instruction with MachNode::AVOID_AFTER
// AVOID_AFTER - the next instruction cannot be the one // AVOID_AFTER - the next instruction cannot be the one
// with MachNode::AVOID_BEFORE // with MachNode::AVOID_BEFORE
// AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at // AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at
// the same time // the same time
ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE); ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE);
ins_attrib ins_short_branch(0); // Required flag: is this instruction a ins_attrib ins_short_branch(0); // Required flag: is this instruction a

View file

@ -358,7 +358,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseUnalignedAccesses, false); FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
} }
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) { if (PrintMiscellaneous && Verbose) {
tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size()); tty->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size()); tty->print_cr("L2 data cache line size: %u", L2_data_cache_line_size());
@ -391,7 +390,6 @@ void VM_Version::initialize() {
tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth); tty->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
} }
} }
#endif // PRODUCT
} }
void VM_Version::print_features() { void VM_Version::print_features() {
@ -400,7 +398,7 @@ void VM_Version::print_features() {
int VM_Version::determine_features() { int VM_Version::determine_features() {
if (UseV8InstrsOnly) { if (UseV8InstrsOnly) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-V8");) if (PrintMiscellaneous && Verbose) { tty->print_cr("Version is Forced-V8"); }
return generic_v8_m; return generic_v8_m;
} }
@ -416,12 +414,12 @@ int VM_Version::determine_features() {
if (is_T_family(features)) { if (is_T_family(features)) {
// Happy to accomodate... // Happy to accomodate...
} else { } else {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-Niagara");) if (PrintMiscellaneous && Verbose) { tty->print_cr("Version is Forced-Niagara"); }
features |= T_family_m; features |= T_family_m;
} }
} else { } else {
if (is_T_family(features) && !FLAG_IS_DEFAULT(UseNiagaraInstrs)) { if (is_T_family(features) && !FLAG_IS_DEFAULT(UseNiagaraInstrs)) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-Not-Niagara");) if (PrintMiscellaneous && Verbose) { tty->print_cr("Version is Forced-Not-Niagara"); }
features &= ~(T_family_m | T1_model_m); features &= ~(T_family_m | T1_model_m);
} else { } else {
// Happy to accomodate... // Happy to accomodate...

File diff suppressed because it is too large Load diff

View file

@ -438,6 +438,8 @@ class ArrayAddress VALUE_OBJ_CLASS_SPEC {
}; };
class InstructionAttr;
// 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes // 64-bit refect the fxsave size which is 512 bytes and the new xsave area on EVEX which is another 2176 bytes
// See fxsave and xsave(EVEX enabled) documentation for layout // See fxsave and xsave(EVEX enabled) documentation for layout
const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize); const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY(2688 / wordSize);
@ -568,7 +570,8 @@ class Assembler : public AbstractAssembler {
EVEX_8bit = 0, EVEX_8bit = 0,
EVEX_16bit = 1, EVEX_16bit = 1,
EVEX_32bit = 2, EVEX_32bit = 2,
EVEX_64bit = 3 EVEX_64bit = 3,
EVEX_NObit = 4
}; };
enum WhichOperand { enum WhichOperand {
@ -598,16 +601,12 @@ class Assembler : public AbstractAssembler {
private: private:
int _evex_encoding;
int _input_size_in_bits;
int _avx_vector_len;
int _tuple_type;
bool _is_evex_instruction;
bool _legacy_mode_bw; bool _legacy_mode_bw;
bool _legacy_mode_dq; bool _legacy_mode_dq;
bool _legacy_mode_vl; bool _legacy_mode_vl;
bool _legacy_mode_vlbw; bool _legacy_mode_vlbw;
bool _instruction_uses_vl;
class InstructionAttr *_attributes;
// 64bit prefixes // 64bit prefixes
int prefix_and_encode(int reg_enc, bool byteinst = false); int prefix_and_encode(int reg_enc, bool byteinst = false);
@ -637,181 +636,30 @@ private:
int rex_prefix_and_encode(int dst_enc, int src_enc, int rex_prefix_and_encode(int dst_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc, bool rex_w); VexSimdPrefix pre, VexOpcode opc, bool rex_w);
void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
int nds_enc, VexSimdPrefix pre, VexOpcode opc,
int vector_len);
void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, bool evex_r, bool evex_v, void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v,
int nds_enc, VexSimdPrefix pre, VexOpcode opc, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
bool is_extended_context, bool is_merge_context,
int vector_len, bool no_mask_reg );
void vex_prefix(Address adr, int nds_enc, int xreg_enc, void vex_prefix(Address adr, int nds_enc, int xreg_enc,
VexSimdPrefix pre, VexOpcode opc, VexSimdPrefix pre, VexOpcode opc,
bool vex_w, int vector_len, InstructionAttr *attributes);
bool legacy_mode = false, bool no_mask_reg = false);
void vex_prefix(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, int vector_len = AVX_128bit,
bool no_mask_reg = false, bool legacy_mode = false) {
int dst_enc = dst->encoding();
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector_len, legacy_mode, no_mask_reg);
}
void vex_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, int vector_len = AVX_128bit,
bool no_mask_reg = false) {
int dst_enc = dst->encoding();
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, true, vector_len, false, no_mask_reg);
}
void vex_prefix_0F38(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = false;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, no_mask_reg);
}
void vex_prefix_0F38_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = false;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, true, no_mask_reg);
}
void vex_prefix_0F38_q(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = true;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, no_mask_reg);
}
void vex_prefix_0F38_q_legacy(Register dst, Register nds, Address src, bool no_mask_reg = false) {
bool vex_w = true;
int vector_len = AVX_128bit;
vex_prefix(src, nds->encoding(), dst->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w,
vector_len, true, no_mask_reg);
}
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc, VexSimdPrefix pre, VexOpcode opc,
bool vex_w, int vector_len, InstructionAttr *attributes);
bool legacy_mode, bool no_mask_reg);
int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src, bool no_mask_reg = false) { void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
bool vex_w = false; VexOpcode opc, InstructionAttr *attributes);
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
false, no_mask_reg);
}
int vex_prefix_0F38_and_encode_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) { int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
bool vex_w = false; VexOpcode opc, InstructionAttr *attributes);
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
true, no_mask_reg);
}
int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src, bool no_mask_reg = false) { int kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src, VexSimdPrefix pre,
bool vex_w = true; VexOpcode opc, InstructionAttr *attributes);
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
false, no_mask_reg);
}
int vex_prefix_0F38_and_encode_q_legacy(Register dst, Register nds, Register src, bool no_mask_reg = false) { int kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src, VexSimdPrefix pre,
bool vex_w = true; VexOpcode opc, InstructionAttr *attributes);
int vector_len = AVX_128bit;
return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector_len,
true, no_mask_reg);
}
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, int vector_len = AVX_128bit,
VexOpcode opc = VEX_OPCODE_0F, bool legacy_mode = false,
bool no_mask_reg = false) {
int src_enc = src->encoding();
int dst_enc = dst->encoding();
int nds_enc = nds->is_valid() ? nds->encoding() : 0;
return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector_len, legacy_mode, no_mask_reg);
}
void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr,
VexSimdPrefix pre, bool no_mask_reg, VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit, bool legacy_mode = false);
void simd_prefix(XMMRegister dst, Address src, VexSimdPrefix pre,
bool no_mask_reg, VexOpcode opc = VEX_OPCODE_0F) {
simd_prefix(dst, xnoreg, src, pre, no_mask_reg, opc);
}
void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg) {
simd_prefix(src, dst, pre, no_mask_reg);
}
void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src,
VexSimdPrefix pre, bool no_mask_reg = false) {
bool rex_w = true;
simd_prefix(dst, nds, src, pre, no_mask_reg, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, bool no_mask_reg,
VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit,
bool legacy_mode = false);
int kreg_prefix_and_encode(KRegister dst, KRegister nds, KRegister src,
VexSimdPrefix pre, bool no_mask_reg,
VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit);
int kreg_prefix_and_encode(KRegister dst, KRegister nds, Register src,
VexSimdPrefix pre, bool no_mask_reg,
VexOpcode opc = VEX_OPCODE_0F,
bool rex_w = false, int vector_len = AVX_128bit);
// Move/convert 32-bit integer value.
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre, bool no_mask_reg) {
// It is OK to cast from Register to XMMRegister to pass argument here
// since only encoding is used in simd_prefix_and_encode() and number of
// Gen and Xmm registers are the same.
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, no_mask_reg, VEX_OPCODE_0F);
}
int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre, bool no_mask_reg) {
return simd_prefix_and_encode(dst, xnoreg, src, pre, no_mask_reg);
}
int simd_prefix_and_encode(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool no_mask_reg = false) {
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, no_mask_reg, opc);
}
// Move/convert 64-bit integer value.
int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src,
VexSimdPrefix pre, bool no_mask_reg = false) {
bool rex_w = true;
return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, no_mask_reg, VEX_OPCODE_0F, rex_w);
}
int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre, bool no_mask_reg) {
return simd_prefix_and_encode_q(dst, xnoreg, src, pre, no_mask_reg);
}
int simd_prefix_and_encode_q(Register dst, XMMRegister src,
VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F,
bool no_mask_reg = false) {
bool rex_w = true;
return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, no_mask_reg, opc, rex_w);
}
// Helper functions for groups of instructions // Helper functions for groups of instructions
void emit_arith_b(int op1, int op2, Register dst, int imm8); void emit_arith_b(int op1, int op2, Register dst, int imm8);
@ -821,27 +669,6 @@ private:
void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
void emit_arith(int op1, int op2, Register dst, Register src); void emit_arith(int op1, int op2, Register dst, Register src);
void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false);
void emit_simd_arith_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false);
void emit_simd_arith_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith_nonds_q(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false, bool legacy_mode = false);
void emit_simd_arith_nonds_q(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre, bool no_mask_reg = false);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
Address src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false, bool legacy_mode = false);
void emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds,
Address src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false);
void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
XMMRegister src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false, bool legacy_mode = false);
void emit_vex_arith_q(int opcode, XMMRegister dst, XMMRegister nds,
XMMRegister src, VexSimdPrefix pre, int vector_len,
bool no_mask_reg = false);
bool emit_compressed_disp_byte(int &disp); bool emit_compressed_disp_byte(int &disp);
void emit_operand(Register reg, void emit_operand(Register reg,
@ -986,18 +813,16 @@ private:
// belong in macro assembler but there is no need for both varieties to exist // belong in macro assembler but there is no need for both varieties to exist
void init_attributes(void) { void init_attributes(void) {
_evex_encoding = 0;
_input_size_in_bits = 0;
_avx_vector_len = AVX_NoVec;
_tuple_type = EVEX_ETUP;
_is_evex_instruction = false;
_legacy_mode_bw = (VM_Version::supports_avx512bw() == false); _legacy_mode_bw = (VM_Version::supports_avx512bw() == false);
_legacy_mode_dq = (VM_Version::supports_avx512dq() == false); _legacy_mode_dq = (VM_Version::supports_avx512dq() == false);
_legacy_mode_vl = (VM_Version::supports_avx512vl() == false); _legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
_legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false); _legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
_instruction_uses_vl = false; _attributes = NULL;
} }
void set_attributes(InstructionAttr *attributes) { _attributes = attributes; }
void clear_attributes(void) { _attributes = NULL; }
void lea(Register dst, Address src); void lea(Register dst, Address src);
void mov(Register dst, Register src); void mov(Register dst, Register src);
@ -2106,12 +1931,12 @@ private:
void vextracti128h(Address dst, XMMRegister src); void vextracti128h(Address dst, XMMRegister src);
// Copy low 256bit into high 256bit of ZMM registers. // Copy low 256bit into high 256bit of ZMM registers.
void vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src); void vinserti64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
void vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src); void vinsertf64x4h(XMMRegister dst, XMMRegister nds, XMMRegister src, int value);
void vextracti64x4h(XMMRegister dst, XMMRegister src); void vextracti64x4h(XMMRegister dst, XMMRegister src, int value);
void vextractf64x4h(XMMRegister dst, XMMRegister src); void vextractf64x4h(XMMRegister dst, XMMRegister src, int value);
void vextractf64x4h(Address dst, XMMRegister src); void vextractf64x4h(Address dst, XMMRegister src, int value);
void vinsertf64x4h(XMMRegister dst, Address src); void vinsertf64x4h(XMMRegister dst, Address src, int value);
// Copy targeted 128bit segments of the ZMM registers // Copy targeted 128bit segments of the ZMM registers
void vextracti64x2h(XMMRegister dst, XMMRegister src, int value); void vextracti64x2h(XMMRegister dst, XMMRegister src, int value);
@ -2173,4 +1998,95 @@ private:
}; };
// The Intel x86/Amd64 Assembler attributes: All fields enclosed here are to guide encoding level decisions.
// Specific set functions are for specialized use, else defaults or whatever was supplied to object construction
// are applied.
class InstructionAttr {
public:
InstructionAttr(
int vector_len,
bool rex_vex_w,
bool legacy_mode,
bool no_reg_mask,
bool uses_vl)
:
_avx_vector_len(vector_len),
_rex_vex_w(rex_vex_w),
_legacy_mode(legacy_mode),
_no_reg_mask(no_reg_mask),
_uses_vl(uses_vl),
_tuple_type(Assembler::EVEX_ETUP),
_input_size_in_bits(Assembler::EVEX_NObit),
_is_evex_instruction(false),
_evex_encoding(0),
_is_clear_context(false),
_is_extended_context(false),
_current_assembler(NULL) {
if (UseAVX < 3) _legacy_mode = true;
}
~InstructionAttr() {
if (_current_assembler != NULL) {
_current_assembler->clear_attributes();
}
_current_assembler = NULL;
}
private:
int _avx_vector_len;
bool _rex_vex_w;
bool _legacy_mode;
bool _no_reg_mask;
bool _uses_vl;
int _tuple_type;
int _input_size_in_bits;
bool _is_evex_instruction;
int _evex_encoding;
bool _is_clear_context;
bool _is_extended_context;
Assembler *_current_assembler;
public:
// query functions for field accessors
int get_vector_len(void) const { return _avx_vector_len; }
bool is_rex_vex_w(void) const { return _rex_vex_w; }
bool is_legacy_mode(void) const { return _legacy_mode; }
bool is_no_reg_mask(void) const { return _no_reg_mask; }
bool uses_vl(void) const { return _uses_vl; }
int get_tuple_type(void) const { return _tuple_type; }
int get_input_size(void) const { return _input_size_in_bits; }
int is_evex_instruction(void) const { return _is_evex_instruction; }
int get_evex_encoding(void) const { return _evex_encoding; }
bool is_clear_context(void) const { return _is_clear_context; }
bool is_extended_context(void) const { return _is_extended_context; }
// Set the vector len manually
void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
// Set the instruction to be encoded in AVX mode
void set_is_legacy_mode(void) { _legacy_mode = true; }
// Set the current instuction to be encoded as an EVEX instuction
void set_is_evex_instruction(void) { _is_evex_instruction = true; }
// Internal encoding data used in compressed immediate offset programming
void set_evex_encoding(int value) { _evex_encoding = value; }
// Set the Evex.Z field to be used to clear all non directed XMM/YMM/ZMM components
void set_is_clear_context(void) { _is_clear_context = true; }
// Map back to current asembler so that we can manage object level assocation
void set_current_assembler(Assembler *current_assembler) { _current_assembler = current_assembler; }
// Address modifiers used for compressed displacement calculation
void set_address_attributes(int tuple_type, int input_size_in_bits) {
if (VM_Version::supports_evex()) {
_tuple_type = tuple_type;
_input_size_in_bits = input_size_in_bits;
}
}
};
#endif // CPU_X86_VM_ASSEMBLER_X86_HPP #endif // CPU_X86_VM_ASSEMBLER_X86_HPP

View file

@ -3711,7 +3711,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) { if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg()); __ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
} }
if (UseAVX > 1) { if (UseAVX > 0) {
__ vnegatess(dest->as_xmm_float_reg(), dest->as_xmm_float_reg(), __ vnegatess(dest->as_xmm_float_reg(), dest->as_xmm_float_reg(),
ExternalAddress((address)float_signflip_pool)); ExternalAddress((address)float_signflip_pool));
} else { } else {
@ -3722,7 +3722,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) { if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg()); __ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
} }
if (UseAVX > 1) { if (UseAVX > 0) {
__ vnegatesd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg(), __ vnegatesd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg(),
ExternalAddress((address)double_signflip_pool)); ExternalAddress((address)double_signflip_pool));
} else { } else {

View file

@ -84,6 +84,7 @@ define_pd_global(bool, UseCISCSpill, true);
define_pd_global(bool, OptoScheduling, false); define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false); define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, true); define_pd_global(bool, OptoRegScheduling, true);
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(intx, ReservedCodeCacheSize, 48*M); define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M); define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);

View file

@ -58,6 +58,4 @@ void Compile::pd_compiler2_init() {
OptoReg::invalidate(i); OptoReg::invalidate(i);
} }
} }
SuperWordLoopUnrollAnalysis = true;
} }

View file

@ -36,7 +36,7 @@
#include "code/vmreg.hpp" #include "code/vmreg.hpp"
#include "vmreg_x86.inline.hpp" #include "vmreg_x86.inline.hpp"
jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method) { jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
if (inst->is_call() || inst->is_jump()) { if (inst->is_call() || inst->is_jump()) {
assert(NativeCall::instruction_size == (int)NativeJump::instruction_size, "unexpected size"); assert(NativeCall::instruction_size == (int)NativeJump::instruction_size, "unexpected size");
return (pc_offset + NativeCall::instruction_size); return (pc_offset + NativeCall::instruction_size);
@ -53,18 +53,17 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, oop
return (offset); return (offset);
} else if (inst->is_call_reg()) { } else if (inst->is_call_reg()) {
// the inlined vtable stub contains a "call register" instruction // the inlined vtable stub contains a "call register" instruction
assert(method != NULL, "only valid for virtual calls"); assert(method.not_null(), "only valid for virtual calls");
return (pc_offset + ((NativeCallReg *) inst)->next_instruction_offset()); return (pc_offset + ((NativeCallReg *) inst)->next_instruction_offset());
} else if (inst->is_cond_jump()) { } else if (inst->is_cond_jump()) {
address pc = (address) (inst); address pc = (address) (inst);
return pc_offset + (jint) (Assembler::locate_next_instruction(pc) - pc); return pc_offset + (jint) (Assembler::locate_next_instruction(pc) - pc);
} else { } else {
fatal("unsupported type of instruction for call site"); JVMCI_ERROR_0("unsupported type of instruction for call site");
return 0;
} }
} }
void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
Handle obj = HotSpotObjectConstantImpl::object(constant); Handle obj = HotSpotObjectConstantImpl::object(constant);
jobject value = JNIHandles::make_local(obj()); jobject value = JNIHandles::make_local(obj());
@ -75,7 +74,7 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
_instructions->relocate(pc, oop_Relocation::spec(oop_index), Assembler::narrow_oop_operand); _instructions->relocate(pc, oop_Relocation::spec(oop_index), Assembler::narrow_oop_operand);
TRACE_jvmci_3("relocating (narrow oop constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand)); TRACE_jvmci_3("relocating (narrow oop constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
#else #else
fatal("compressed oop on 32bit"); JVMCI_ERROR("compressed oop on 32bit");
#endif #endif
} else { } else {
address operand = Assembler::locate_operand(pc, Assembler::imm_operand); address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
@ -85,19 +84,19 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& constant) {
} }
} }
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle& constant) { void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
if (HotSpotMetaspaceConstantImpl::compressed(constant)) { if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64 #ifdef _LP64
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand); address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
*((narrowKlass*) operand) = record_narrow_metadata_reference(constant); *((narrowKlass*) operand) = record_narrow_metadata_reference(constant, CHECK);
TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand)); TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
#else #else
fatal("compressed Klass* on 32bit"); JVMCI_ERROR("compressed Klass* on 32bit");
#endif #endif
} else { } else {
address operand = Assembler::locate_operand(pc, Assembler::imm_operand); address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
*((Metadata**) operand) = record_metadata_reference(constant); *((Metadata**) operand) = record_metadata_reference(constant, CHECK);
TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand)); TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
} }
} }
@ -117,7 +116,7 @@ void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset
TRACE_jvmci_3("relocating at " PTR_FORMAT "/" PTR_FORMAT " with destination at " PTR_FORMAT " (%d)", p2i(pc), p2i(operand), p2i(dest), data_offset); TRACE_jvmci_3("relocating at " PTR_FORMAT "/" PTR_FORMAT " with destination at " PTR_FORMAT " (%d)", p2i(pc), p2i(operand), p2i(dest), data_offset);
} }
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) { void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
address pc = (address) inst; address pc = (address) inst;
if (inst->is_call()) { if (inst->is_call()) {
// NOTE: for call without a mov, the offset must fit a 32-bit immediate // NOTE: for call without a mov, the offset must fit a 32-bit immediate
@ -139,18 +138,18 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
*(jint*) disp += ((address) foreign_call_destination) - old_dest; *(jint*) disp += ((address) foreign_call_destination) - old_dest;
_instructions->relocate(pc, runtime_call_Relocation::spec(), Assembler::call32_operand); _instructions->relocate(pc, runtime_call_Relocation::spec(), Assembler::call32_operand);
} else { } else {
fatal("unsupported relocation for foreign call"); JVMCI_ERROR("unsupported relocation for foreign call");
} }
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst)); TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
} }
void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) { void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT #ifdef ASSERT
Method* method = NULL; Method* method = NULL;
// we need to check, this might also be an unresolved method // we need to check, this might also be an unresolved method
if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) { if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
method = getMethodFromHotSpotMethod(hotspot_method); method = getMethodFromHotSpotMethod(hotspot_method());
} }
#endif #endif
switch (_next_call_type) { switch (_next_call_type) {
@ -185,6 +184,7 @@ void CodeInstaller::pd_relocate_JavaMethod(oop hotspot_method, jint pc_offset) {
break; break;
} }
default: default:
JVMCI_ERROR("invalid _next_call_type value");
break; break;
} }
} }
@ -198,7 +198,7 @@ static void relocate_poll_near(address pc) {
} }
void CodeInstaller::pd_relocate_poll(address pc, jint mark) { void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
switch (mark) { switch (mark) {
case POLL_NEAR: { case POLL_NEAR: {
relocate_poll_near(pc); relocate_poll_near(pc);
@ -222,13 +222,13 @@ void CodeInstaller::pd_relocate_poll(address pc, jint mark) {
_instructions->relocate(pc, relocInfo::poll_return_type, Assembler::imm_operand); _instructions->relocate(pc, relocInfo::poll_return_type, Assembler::imm_operand);
break; break;
default: default:
fatal("invalid mark value"); JVMCI_ERROR("invalid mark value: %d", mark);
break; break;
} }
} }
// convert JVMCI register indices (as used in oop maps) to HotSpot registers // convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) { VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
if (jvmci_reg < RegisterImpl::number_of_registers) { if (jvmci_reg < RegisterImpl::number_of_registers) {
return as_Register(jvmci_reg)->as_VMReg(); return as_Register(jvmci_reg)->as_VMReg();
} else { } else {
@ -236,8 +236,7 @@ VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg) {
if (floatRegisterNumber < XMMRegisterImpl::number_of_registers) { if (floatRegisterNumber < XMMRegisterImpl::number_of_registers) {
return as_XMMRegister(floatRegisterNumber)->as_VMReg(); return as_XMMRegister(floatRegisterNumber)->as_VMReg();
} }
ShouldNotReachHere(); JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
return NULL;
} }
} }

File diff suppressed because it is too large Load diff

View file

@ -962,10 +962,15 @@ public:
void divss(XMMRegister dst, AddressLiteral src); void divss(XMMRegister dst, AddressLiteral src);
// Move Unaligned Double Quadword // Move Unaligned Double Quadword
void movdqu(Address dst, XMMRegister src) { Assembler::movdqu(dst, src); } void movdqu(Address dst, XMMRegister src);
void movdqu(XMMRegister dst, Address src) { Assembler::movdqu(dst, src); } void movdqu(XMMRegister dst, Address src);
void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); } void movdqu(XMMRegister dst, XMMRegister src);
void movdqu(XMMRegister dst, AddressLiteral src); void movdqu(XMMRegister dst, AddressLiteral src);
// AVX Unaligned forms
void vmovdqu(Address dst, XMMRegister src);
void vmovdqu(XMMRegister dst, Address src);
void vmovdqu(XMMRegister dst, XMMRegister src);
void vmovdqu(XMMRegister dst, AddressLiteral src);
// Move Aligned Double Quadword // Move Aligned Double Quadword
void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); }
@ -1024,12 +1029,12 @@ public:
void ucomisd(XMMRegister dst, AddressLiteral src); void ucomisd(XMMRegister dst, AddressLiteral src);
// Bitwise Logical XOR of Packed Double-Precision Floating-Point Values // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); } void xorpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); }
void xorpd(XMMRegister dst, AddressLiteral src); void xorpd(XMMRegister dst, AddressLiteral src);
// Bitwise Logical XOR of Packed Single-Precision Floating-Point Values // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); } void xorps(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); }
void xorps(XMMRegister dst, AddressLiteral src); void xorps(XMMRegister dst, AddressLiteral src);
@ -1047,6 +1052,39 @@ public:
void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); }
void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len);
void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len);
void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len);
void punpcklbw(XMMRegister dst, XMMRegister src);
void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); }
void pshuflw(XMMRegister dst, XMMRegister src, int mode);
void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); }
void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); }
void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len);

View file

@ -192,31 +192,22 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
} }
} else if(UseSSE >= 2) { } else if(UseSSE >= 2) {
// Save whole 128bit (16 bytes) XMM regiters // Save whole 128bit (16 bytes) XMM regiters
if (VM_Version::supports_avx512novl()) { for (int n = 0; n < num_xmm_regs; n++) {
for (int n = 0; n < num_xmm_regs; n++) { __ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
__ vextractf32x4h(Address(rsp, off*wordSize), as_XMMRegister(n), 0); off += delta;
off += delta;
}
} else {
for (int n = 0; n < num_xmm_regs; n++) {
__ movdqu(Address(rsp, off*wordSize), as_XMMRegister(n));
off += delta;
}
} }
} }
if (vect_words > 0) { if (vect_words > 0) {
assert(vect_words*wordSize == 128, ""); assert(vect_words*wordSize == 128, "");
__ subptr(rsp, 128); // Save upper half of YMM registes __ subptr(rsp, 128); // Save upper half of YMM registes
off = 0;
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf128h(Address(rsp, off++*16), as_XMMRegister(n)); __ vextractf128h(Address(rsp, n*16), as_XMMRegister(n));
} }
if (UseAVX > 2) { if (UseAVX > 2) {
__ subptr(rsp, 256); // Save upper half of ZMM registes __ subptr(rsp, 256); // Save upper half of ZMM registes
off = 0;
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vextractf64x4h(Address(rsp, off++*32), as_XMMRegister(n)); __ vextractf64x4h(Address(rsp, n*32), as_XMMRegister(n), 1);
} }
} }
} }
@ -285,31 +276,23 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
off += delta; off += delta;
} }
} else if (UseSSE >= 2) { } else if (UseSSE >= 2) {
if (VM_Version::supports_avx512novl()) { for (int n = 0; n < num_xmm_regs; n++) {
for (int n = 0; n < num_xmm_regs; n++) { __ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
__ vinsertf32x4h(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes), 0); off += delta;
off += delta;
}
} else {
for (int n = 0; n < num_xmm_regs; n++) {
__ movdqu(as_XMMRegister(n), Address(rsp, off*wordSize+additional_frame_bytes));
off += delta;
}
} }
} }
if (restore_vectors) { if (restore_vectors) {
assert(additional_frame_bytes == 128, "");
if (UseAVX > 2) { if (UseAVX > 2) {
off = 0; // Restore upper half of ZMM registers.
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf64x4h(as_XMMRegister(n), Address(rsp, off++*32)); __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, n*32), 1);
} }
__ addptr(rsp, additional_frame_bytes*2); // Save upper half of ZMM registes __ addptr(rsp, additional_frame_bytes*2); // Save upper half of ZMM registes
} }
// Restore upper half of YMM registes. // Restore upper half of YMM registes.
assert(additional_frame_bytes == 128, "");
off = 0;
for (int n = 0; n < num_xmm_regs; n++) { for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16)); __ vinsertf128h(as_XMMRegister(n), Address(rsp, n*16));
} }
__ addptr(rsp, additional_frame_bytes); // Save upper half of YMM registes __ addptr(rsp, additional_frame_bytes); // Save upper half of YMM registes
} }
@ -2562,7 +2545,8 @@ void SharedRuntime::generate_deopt_blob() {
oop_maps->add_gc_map( __ pc()-start, map); oop_maps->add_gc_map( __ pc()-start, map);
// Discard arg to fetch_unroll_info // Discard args to fetch_unroll_info
__ pop(rcx);
__ pop(rcx); __ pop(rcx);
__ get_thread(rcx); __ get_thread(rcx);
@ -2575,9 +2559,8 @@ void SharedRuntime::generate_deopt_blob() {
// we are very short of registers // we are very short of registers
Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()); Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
// retrieve the deopt kind from where we left it. // retrieve the deopt kind from the UnrollBlock.
__ pop(rax); __ movl(rax, unpack_kind);
__ movl(unpack_kind, rax); // save the unpack_kind value
Label noException; Label noException;
__ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending? __ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
@ -2787,11 +2770,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
enum frame_layout { enum frame_layout {
arg0_off, // thread sp + 0 // Arg location for arg0_off, // thread sp + 0 // Arg location for
arg1_off, // unloaded_class_index sp + 1 // calling C arg1_off, // unloaded_class_index sp + 1 // calling C
arg2_off, // exec_mode sp + 2
// The frame sender code expects that rbp will be in the "natural" place and // The frame sender code expects that rbp will be in the "natural" place and
// will override any oopMap setting for it. We must therefore force the layout // will override any oopMap setting for it. We must therefore force the layout
// so that it agrees with the frame sender code. // so that it agrees with the frame sender code.
rbp_off, // callee saved register sp + 2 rbp_off, // callee saved register sp + 3
return_off, // slot for return address sp + 3 return_off, // slot for return address sp + 4
framesize framesize
}; };
@ -2823,6 +2807,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ movptr(Address(rsp, arg0_off*wordSize), rdx); __ movptr(Address(rsp, arg0_off*wordSize), rdx);
// argument already in ECX // argument already in ECX
__ movl(Address(rsp, arg1_off*wordSize),rcx); __ movl(Address(rsp, arg1_off*wordSize),rcx);
__ movl(Address(rsp, arg2_off*wordSize), Deoptimization::Unpack_uncommon_trap);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
// Set an oopmap for the call site // Set an oopmap for the call site
@ -2839,6 +2824,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Load UnrollBlock into EDI // Load UnrollBlock into EDI
__ movptr(rdi, rax); __ movptr(rdi, rax);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
(int32_t)Deoptimization::Unpack_uncommon_trap);
__ jcc(Assembler::equal, L);
__ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
__ bind(L);
}
#endif
// Pop all the frames we must move/replace. // Pop all the frames we must move/replace.
// //
// Frame picture (youngest to oldest) // Frame picture (youngest to oldest)

View file

@ -72,45 +72,28 @@ class SimpleRuntimeFrame {
class RegisterSaver { class RegisterSaver {
// Capture info about frame layout. Layout offsets are in jint // Capture info about frame layout. Layout offsets are in jint
// units because compiler frame slots are jints. // units because compiler frame slots are jints.
#define HALF_ZMM_BANK_WORDS 128 #define XSAVE_AREA_BEGIN 160
#define XSAVE_AREA_YMM_BEGIN 576
#define XSAVE_AREA_ZMM_BEGIN 1152
#define XSAVE_AREA_UPPERBANK 1664
#define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
#define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
#define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off #define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
enum layout { enum layout {
fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area xmm_off = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt, // offset in fxsave save area
DEF_XMM_OFFS(0), DEF_XMM_OFFS(0),
DEF_XMM_OFFS(1), DEF_XMM_OFFS(1),
DEF_XMM_OFFS(2), // 2..15 are implied in range usage
DEF_XMM_OFFS(3), ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
DEF_XMM_OFFS(4), DEF_YMM_OFFS(0),
DEF_XMM_OFFS(5), DEF_YMM_OFFS(1),
DEF_XMM_OFFS(6), // 2..15 are implied in range usage
DEF_XMM_OFFS(7), zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
DEF_XMM_OFFS(8), zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
DEF_XMM_OFFS(9),
DEF_XMM_OFFS(10),
DEF_XMM_OFFS(11),
DEF_XMM_OFFS(12),
DEF_XMM_OFFS(13),
DEF_XMM_OFFS(14),
DEF_XMM_OFFS(15),
zmm_off = fpu_state_off + ((FPUStateSizeInWords - (HALF_ZMM_BANK_WORDS + 1))*wordSize / BytesPerInt),
DEF_ZMM_OFFS(16), DEF_ZMM_OFFS(16),
DEF_ZMM_OFFS(17), DEF_ZMM_OFFS(17),
DEF_ZMM_OFFS(18), // 18..31 are implied in range usage
DEF_ZMM_OFFS(19),
DEF_ZMM_OFFS(20),
DEF_ZMM_OFFS(21),
DEF_ZMM_OFFS(22),
DEF_ZMM_OFFS(23),
DEF_ZMM_OFFS(24),
DEF_ZMM_OFFS(25),
DEF_ZMM_OFFS(26),
DEF_ZMM_OFFS(27),
DEF_ZMM_OFFS(28),
DEF_ZMM_OFFS(29),
DEF_ZMM_OFFS(30),
DEF_ZMM_OFFS(31),
fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt), fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
fpu_stateH_end, fpu_stateH_end,
r15_off, r15H_off, r15_off, r15H_off,
@ -160,8 +143,6 @@ class RegisterSaver {
}; };
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) { OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
int vect_words = 0;
int ymmhi_offset = -1;
int off = 0; int off = 0;
int num_xmm_regs = XMMRegisterImpl::number_of_registers; int num_xmm_regs = XMMRegisterImpl::number_of_registers;
if (UseAVX < 3) { if (UseAVX < 3) {
@ -171,24 +152,15 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
if (save_vectors) { if (save_vectors) {
assert(UseAVX > 0, "512bit vectors are supported only with EVEX"); assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
assert(MaxVectorSize == 64, "only 512bit vectors are supported now"); assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
// Save upper half of YMM registers
vect_words = 16 * num_xmm_regs / wordSize;
if (UseAVX < 3) {
ymmhi_offset = additional_frame_words;
additional_frame_words += vect_words;
}
} }
#else #else
assert(!save_vectors, "vectors are generated only by C2 and JVMCI"); assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
#endif #endif
// Always make the frame size 16-byte aligned // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
int frame_size_in_bytes = round_to(additional_frame_words*wordSize + int frame_size_in_bytes = round_to(reg_save_size*BytesPerInt, num_xmm_regs);
reg_save_size*BytesPerInt, num_xmm_regs);
// OopMap frame size is in compiler stack slots (jint's) not bytes or words // OopMap frame size is in compiler stack slots (jint's) not bytes or words
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
// The caller will allocate additional_frame_words
int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
// CodeBlob frame size is in words. // CodeBlob frame size is in words.
int frame_size_in_words = frame_size_in_bytes / wordSize; int frame_size_in_words = frame_size_in_bytes / wordSize;
*total_frame_words = frame_size_in_words; *total_frame_words = frame_size_in_words;
@ -203,12 +175,34 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
__ push_CPU_state(); // Push a multiple of 16 bytes __ push_CPU_state(); // Push a multiple of 16 bytes
// push cpu state handles this on EVEX enabled targets // push cpu state handles this on EVEX enabled targets
if ((vect_words > 0) && (UseAVX < 3)) { if (save_vectors) {
assert(vect_words*wordSize >= 256, ""); // Save upper half of YMM registes(0..15)
// Save upper half of YMM registes(0..num_xmm_regs) int base_addr = XSAVE_AREA_YMM_BEGIN;
__ subptr(rsp, num_xmm_regs*16); for (int n = 0; n < 16; n++) {
for (int n = 0; n < num_xmm_regs; n++) { __ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n));
__ vextractf128h(Address(rsp, off++*16), as_XMMRegister(n)); }
if (VM_Version::supports_evex()) {
// Save upper half of ZMM registes(0..15)
base_addr = XSAVE_AREA_ZMM_BEGIN;
for (int n = 0; n < 16; n++) {
__ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1);
}
// Save full ZMM registes(16..num_xmm_regs)
base_addr = XSAVE_AREA_UPPERBANK;
int off = 0;
int vector_len = Assembler::AVX_512bit;
for (int n = 16; n < num_xmm_regs; n++) {
__ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
}
}
} else {
if (VM_Version::supports_evex()) {
// Save upper bank of ZMM registers(16..31) for double/float usage
int base_addr = XSAVE_AREA_UPPERBANK;
int off = 0;
for (int n = 16; n < num_xmm_regs; n++) {
__ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
}
} }
} }
if (frame::arg_reg_save_area_bytes != 0) { if (frame::arg_reg_save_area_bytes != 0) {
@ -224,8 +218,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
OopMap* map = new OopMap(frame_size_in_slots, 0); OopMap* map = new OopMap(frame_size_in_slots, 0);
#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots) #define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
#define YMMHI_STACK_OFFSET(x) VMRegImpl::stack2reg((x / VMRegImpl::stack_slot_size) + ymmhi_offset)
map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg()); map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg()); map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
@ -257,31 +250,21 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
off = zmm16_off; off = zmm16_off;
delta = zmm17_off - off; delta = zmm17_off - off;
for (int n = 16; n < num_xmm_regs; n++) { for (int n = 16; n < num_xmm_regs; n++) {
XMMRegister xmm_name = as_XMMRegister(n); XMMRegister zmm_name = as_XMMRegister(n);
map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()); map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
off += delta; off += delta;
} }
} }
#if defined(COMPILER2) || INCLUDE_JVMCI #if defined(COMPILER2) || INCLUDE_JVMCI
if (save_vectors) { if (save_vectors) {
assert(ymmhi_offset != -1, "save area must exist"); off = ymm0_off;
map->set_callee_saved(YMMHI_STACK_OFFSET( 0), xmm0->as_VMReg()->next(4)); int delta = ymm1_off - off;
map->set_callee_saved(YMMHI_STACK_OFFSET( 16), xmm1->as_VMReg()->next(4)); for (int n = 0; n < 16; n++) {
map->set_callee_saved(YMMHI_STACK_OFFSET( 32), xmm2->as_VMReg()->next(4)); XMMRegister ymm_name = as_XMMRegister(n);
map->set_callee_saved(YMMHI_STACK_OFFSET( 48), xmm3->as_VMReg()->next(4)); map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET( 64), xmm4->as_VMReg()->next(4)); off += delta;
map->set_callee_saved(YMMHI_STACK_OFFSET( 80), xmm5->as_VMReg()->next(4)); }
map->set_callee_saved(YMMHI_STACK_OFFSET( 96), xmm6->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(112), xmm7->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(128), xmm8->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(144), xmm9->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(160), xmm10->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(176), xmm11->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(192), xmm12->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(208), xmm13->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(224), xmm14->as_VMReg()->next(4));
map->set_callee_saved(YMMHI_STACK_OFFSET(240), xmm15->as_VMReg()->next(4));
} }
#endif // COMPILER2 || INCLUDE_JVMCI #endif // COMPILER2 || INCLUDE_JVMCI
@ -316,8 +299,8 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
off = zmm16H_off; off = zmm16H_off;
delta = zmm17H_off - off; delta = zmm17H_off - off;
for (int n = 16; n < num_xmm_regs; n++) { for (int n = 16; n < num_xmm_regs; n++) {
XMMRegister xmm_name = as_XMMRegister(n); XMMRegister zmm_name = as_XMMRegister(n);
map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next()); map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
off += delta; off += delta;
} }
} }
@ -335,21 +318,48 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_ve
// Pop arg register save area // Pop arg register save area
__ addptr(rsp, frame::arg_reg_save_area_bytes); __ addptr(rsp, frame::arg_reg_save_area_bytes);
} }
#if defined(COMPILER2) || INCLUDE_JVMCI #if defined(COMPILER2) || INCLUDE_JVMCI
// On EVEX enabled targets everything is handled in pop fpu state if (restore_vectors) {
if ((restore_vectors) && (UseAVX < 3)) { assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
assert(UseAVX > 0, "256/512-bit vectors are supported only with AVX"); assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
assert(MaxVectorSize == 64, "up to 512bit vectors are supported now");
int off = 0;
// Restore upper half of YMM registes (0..num_xmm_regs)
for (int n = 0; n < num_xmm_regs; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, off++*16));
}
__ addptr(rsp, num_xmm_regs*16);
} }
#else #else
assert(!restore_vectors, "vectors are generated only by C2 and JVMCI"); assert(!save_vectors, "vectors are generated only by C2");
#endif #endif
// On EVEX enabled targets everything is handled in pop fpu state
if (restore_vectors) {
// Restore upper half of YMM registes (0..15)
int base_addr = XSAVE_AREA_YMM_BEGIN;
for (int n = 0; n < 16; n++) {
__ vinsertf128h(as_XMMRegister(n), Address(rsp, base_addr+n*16));
}
if (VM_Version::supports_evex()) {
// Restore upper half of ZMM registes (0..15)
base_addr = XSAVE_AREA_ZMM_BEGIN;
for (int n = 0; n < 16; n++) {
__ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1);
}
// Restore full ZMM registes(16..num_xmm_regs)
base_addr = XSAVE_AREA_UPPERBANK;
int vector_len = Assembler::AVX_512bit;
int off = 0;
for (int n = 16; n < num_xmm_regs; n++) {
__ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
}
}
} else {
if (VM_Version::supports_evex()) {
// Restore upper bank of ZMM registes(16..31) for double/float usage
int base_addr = XSAVE_AREA_UPPERBANK;
int off = 0;
for (int n = 16; n < num_xmm_regs; n++) {
__ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
}
}
}
// Recover CPU state // Recover CPU state
__ pop_CPU_state(); __ pop_CPU_state();
// Get the rbp described implicitly by the calling convention (no oopMap) // Get the rbp described implicitly by the calling convention (no oopMap)
@ -2819,6 +2829,7 @@ void SharedRuntime::generate_deopt_blob() {
__ movl(r14, (int32_t)Deoptimization::Unpack_reexecute); __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
__ mov(c_rarg0, r15_thread); __ mov(c_rarg0, r15_thread);
__ movl(c_rarg2, r14); // exec mode
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
oop_maps->add_gc_map( __ pc()-start, map->deep_copy()); oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
@ -2905,6 +2916,7 @@ void SharedRuntime::generate_deopt_blob() {
} }
#endif // ASSERT #endif // ASSERT
__ mov(c_rarg0, r15_thread); __ mov(c_rarg0, r15_thread);
__ movl(c_rarg1, r14); // exec_mode
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
// Need to have an oopmap that tells fetch_unroll_info where to // Need to have an oopmap that tells fetch_unroll_info where to
@ -2922,6 +2934,7 @@ void SharedRuntime::generate_deopt_blob() {
// Load UnrollBlock* into rdi // Load UnrollBlock* into rdi
__ mov(rdi, rax); __ mov(rdi, rax);
__ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
Label noException; Label noException;
__ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending? __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
__ jcc(Assembler::notEqual, noException); __ jcc(Assembler::notEqual, noException);
@ -3140,6 +3153,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index); // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
__ mov(c_rarg0, r15_thread); __ mov(c_rarg0, r15_thread);
__ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
// Set an oopmap for the call site // Set an oopmap for the call site
@ -3155,6 +3169,16 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Load UnrollBlock* into rdi // Load UnrollBlock* into rdi
__ mov(rdi, rax); __ mov(rdi, rax);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
(int32_t)Deoptimization::Unpack_uncommon_trap);
__ jcc(Assembler::equal, L);
__ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
__ bind(L);
}
#endif
// Pop all the frames we must move/replace. // Pop all the frames we must move/replace.
// //
// Frame picture (youngest to oldest) // Frame picture (youngest to oldest)

View file

@ -273,7 +273,7 @@ class StubGenerator: public StubCodeGenerator {
if (UseAVX > 2) { if (UseAVX > 2) {
last_reg = 31; last_reg = 31;
} }
if (VM_Version::supports_avx512novl()) { if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) { for (int i = xmm_save_first; i <= last_reg; i++) {
__ vextractf32x4h(xmm_save(i), as_XMMRegister(i), 0); __ vextractf32x4h(xmm_save(i), as_XMMRegister(i), 0);
} }
@ -391,7 +391,7 @@ class StubGenerator: public StubCodeGenerator {
// restore regs belonging to calling function // restore regs belonging to calling function
#ifdef _WIN64 #ifdef _WIN64
// emit the restores for xmm regs // emit the restores for xmm regs
if (VM_Version::supports_avx512novl()) { if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) { for (int i = xmm_save_first; i <= last_reg; i++) {
__ vinsertf32x4h(as_XMMRegister(i), xmm_save(i), 0); __ vinsertf32x4h(as_XMMRegister(i), xmm_save(i), 0);
} }

View file

@ -891,7 +891,7 @@ void VM_Version::get_processor_features() {
UseNewLongLShift = true; UseNewLongLShift = true;
} }
if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) {
if( supports_sse4a() ) { if (supports_sse4a()) {
UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
} else { } else {
UseXmmLoadAndClearUpper = false; UseXmmLoadAndClearUpper = false;

View file

@ -552,6 +552,19 @@ protected:
break; break;
} }
} }
// zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen
if (retVal == false) {
// Verify that OS save/restore all bits of EVEX registers
// during signal processing.
int nreg = 2 LP64_ONLY(+2);
retVal = true;
for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
retVal = false;
break;
}
}
}
} }
return retVal; return retVal;
} }
@ -706,6 +719,9 @@ public:
static bool supports_avx512vl() { return (_cpuFeatures & CPU_AVX512VL) != 0; } static bool supports_avx512vl() { return (_cpuFeatures & CPU_AVX512VL) != 0; }
static bool supports_avx512vlbw() { return (supports_avx512bw() && supports_avx512vl()); } static bool supports_avx512vlbw() { return (supports_avx512bw() && supports_avx512vl()); }
static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); } static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); }
static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
// Intel features // Intel features
static bool is_intel_family_core() { return is_intel() && static bool is_intel_family_core() { return is_intel() &&
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; } extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }

File diff suppressed because it is too large Load diff

View file

@ -291,9 +291,7 @@ static int pre_call_resets_size() {
size += 6; // fldcw size += 6; // fldcw
} }
if (C->max_vector_size() > 16) { if (C->max_vector_size() > 16) {
if(UseAVX <= 2) { size += 3; // vzeroupper
size += 3; // vzeroupper
}
} }
return size; return size;
} }
@ -1915,7 +1913,7 @@ encode %{
if (stub == NULL) { if (stub == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }
} }
%} %}

View file

@ -536,11 +536,7 @@ source %{
#define __ _masm. #define __ _masm.
static int clear_avx_size() { static int clear_avx_size() {
if(UseAVX > 2) { return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
return 0; // vzeroupper is ignored
} else {
return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
}
} }
// !!!!! Special hack to get all types of calls to specify the byte offset // !!!!! Special hack to get all types of calls to specify the byte offset
@ -871,7 +867,7 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
if (framesize > 0) { if (framesize > 0) {
st->print("\n\t"); st->print("\n\t");
st->print("addq rbp, #%d", framesize); st->print("addq rbp, #%d", framesize);
} }
} }
} }

View file

@ -497,12 +497,15 @@ int CppInterpreter::accessor_entry(Method* method, intptr_t UNUSED, TRAPS) {
// 1: getfield // 1: getfield
// 2: index // 2: index
// 3: index // 3: index
// 4: ireturn/areturn // 4: ireturn/areturn/freturn/lreturn/dreturn
// NB this is not raw bytecode: index is in machine order // NB this is not raw bytecode: index is in machine order
u1 *code = method->code_base(); u1 *code = method->code_base();
assert(code[0] == Bytecodes::_aload_0 && assert(code[0] == Bytecodes::_aload_0 &&
code[1] == Bytecodes::_getfield && code[1] == Bytecodes::_getfield &&
(code[4] == Bytecodes::_ireturn || (code[4] == Bytecodes::_ireturn ||
code[4] == Bytecodes::_freturn ||
code[4] == Bytecodes::_lreturn ||
code[4] == Bytecodes::_dreturn ||
code[4] == Bytecodes::_areturn), "should do"); code[4] == Bytecodes::_areturn), "should do");
u2 index = Bytes::get_native_u2(&code[2]); u2 index = Bytes::get_native_u2(&code[2]);

View file

@ -32,6 +32,7 @@ import java.lang.reflect.Method;
import jdk.vm.ci.code.InstalledCode; import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.code.InvalidInstalledCodeException; import jdk.vm.ci.code.InvalidInstalledCodeException;
import jdk.vm.ci.code.TargetDescription; import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.hotspotvmconfig.HotSpotVMField; import jdk.vm.ci.hotspotvmconfig.HotSpotVMField;
import jdk.vm.ci.inittimer.InitTimer; import jdk.vm.ci.inittimer.InitTimer;
import jdk.vm.ci.meta.JavaType; import jdk.vm.ci.meta.JavaType;
@ -308,6 +309,8 @@ final class CompilerToVM {
* {@link HotSpotVMConfig#codeInstallResultCodeTooLarge}, * {@link HotSpotVMConfig#codeInstallResultCodeTooLarge},
* {@link HotSpotVMConfig#codeInstallResultDependenciesFailed} or * {@link HotSpotVMConfig#codeInstallResultDependenciesFailed} or
* {@link HotSpotVMConfig#codeInstallResultDependenciesInvalid}. * {@link HotSpotVMConfig#codeInstallResultDependenciesInvalid}.
* @throws JVMCIError if there is something wrong with the compiled code or the associated
* metadata.
*/ */
native int installCode(TargetDescription target, HotSpotCompiledCode compiledCode, InstalledCode code, HotSpotSpeculationLog speculationLog); native int installCode(TargetDescription target, HotSpotCompiledCode compiledCode, InstalledCode code, HotSpotSpeculationLog speculationLog);

View file

@ -1677,6 +1677,7 @@ public class HotSpotVMConfig {
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_caller_adjustment", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockCallerAdjustmentOffset; @HotSpotVMField(name = "Deoptimization::UnrollBlock::_caller_adjustment", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockCallerAdjustmentOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_number_of_frames", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockNumberOfFramesOffset; @HotSpotVMField(name = "Deoptimization::UnrollBlock::_number_of_frames", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockNumberOfFramesOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_total_frame_sizes", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockTotalFrameSizesOffset; @HotSpotVMField(name = "Deoptimization::UnrollBlock::_total_frame_sizes", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockTotalFrameSizesOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_unpack_kind", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockUnpackKindOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_frame_sizes", type = "intptr_t*", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockFrameSizesOffset; @HotSpotVMField(name = "Deoptimization::UnrollBlock::_frame_sizes", type = "intptr_t*", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockFrameSizesOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_frame_pcs", type = "address*", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockFramePcsOffset; @HotSpotVMField(name = "Deoptimization::UnrollBlock::_frame_pcs", type = "address*", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockFramePcsOffset;
@HotSpotVMField(name = "Deoptimization::UnrollBlock::_initial_info", type = "intptr_t", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockInitialInfoOffset; @HotSpotVMField(name = "Deoptimization::UnrollBlock::_initial_info", type = "intptr_t", get = HotSpotVMField.Type.OFFSET) @Stable public int deoptimizationUnrollBlockInitialInfoOffset;

View file

@ -66,12 +66,12 @@ int VM_Version::platform_features(int features) {
features = generic_v9_m; features = generic_v9_m;
if (detect_niagara()) { if (detect_niagara()) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on Niagara");) if (PrintMiscellaneous && Verbose) { tty->print_cr("Detected Linux on Niagara"); }
features = niagara1_m | T_family_m; features = niagara1_m | T_family_m;
} }
if (detect_M_family()) { if (detect_M_family()) {
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");) if (PrintMiscellaneous && Verbose) { tty->print_cr("Detected Linux on M family"); }
features = sun4v_m | generic_v9_m | M_family_m | T_family_m; features = sun4v_m | generic_v9_m | M_family_m | T_family_m;
} }

View file

@ -707,12 +707,10 @@ BlockBegin* GraphBuilder::ScopeData::block_at(int bci) {
BlockBegin* block = bci2block()->at(bci); BlockBegin* block = bci2block()->at(bci);
if (block != NULL && block == parent()->bci2block()->at(bci)) { if (block != NULL && block == parent()->bci2block()->at(bci)) {
BlockBegin* new_block = new BlockBegin(block->bci()); BlockBegin* new_block = new BlockBegin(block->bci());
#ifndef PRODUCT
if (PrintInitialBlockList) { if (PrintInitialBlockList) {
tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr", tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr",
block->block_id(), block->bci(), new_block->block_id()); block->block_id(), block->bci(), new_block->block_id());
} }
#endif
// copy data from cloned blocked // copy data from cloned blocked
new_block->set_depth_first_number(block->depth_first_number()); new_block->set_depth_first_number(block->depth_first_number());
if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag); if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag);
@ -1438,7 +1436,9 @@ void GraphBuilder::method_return(Value x) {
bool need_mem_bar = false; bool need_mem_bar = false;
if (method()->name() == ciSymbol::object_initializer_name() && if (method()->name() == ciSymbol::object_initializer_name() &&
(scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields()))) { (scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields())
|| (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile())
)){
need_mem_bar = true; need_mem_bar = true;
} }
@ -1554,6 +1554,9 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
if (code == Bytecodes::_putfield) { if (code == Bytecodes::_putfield) {
scope()->set_wrote_fields(); scope()->set_wrote_fields();
if (field->is_volatile()) {
scope()->set_wrote_volatile();
}
} }
const int offset = !needs_patching ? field->offset() : -1; const int offset = !needs_patching ? field->offset() : -1;
@ -3785,12 +3788,10 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, Bytecode
cont = new BlockBegin(next_bci()); cont = new BlockBegin(next_bci());
// low number so that continuation gets parsed as early as possible // low number so that continuation gets parsed as early as possible
cont->set_depth_first_number(0); cont->set_depth_first_number(0);
#ifndef PRODUCT
if (PrintInitialBlockList) { if (PrintInitialBlockList) {
tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d", tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d",
cont->block_id(), cont->bci(), bci()); cont->block_id(), cont->bci(), bci());
} }
#endif
continuation_existed = false; continuation_existed = false;
} }
// Record number of predecessors of continuation block before // Record number of predecessors of continuation block before

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -143,6 +143,7 @@ IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMe
_monitor_pairing_ok = method->has_balanced_monitors(); _monitor_pairing_ok = method->has_balanced_monitors();
_wrote_final = false; _wrote_final = false;
_wrote_fields = false; _wrote_fields = false;
_wrote_volatile = false;
_start = NULL; _start = NULL;
if (osr_bci == -1) { if (osr_bci == -1) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -151,6 +151,7 @@ class IRScope: public CompilationResourceObj {
bool _monitor_pairing_ok; // the monitor pairing info bool _monitor_pairing_ok; // the monitor pairing info
bool _wrote_final; // has written final field bool _wrote_final; // has written final field
bool _wrote_fields; // has written fields bool _wrote_fields; // has written fields
bool _wrote_volatile; // has written volatile field
BlockBegin* _start; // the start block, successsors are method entries BlockBegin* _start; // the start block, successsors are method entries
BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
@ -187,7 +188,8 @@ class IRScope: public CompilationResourceObj {
bool wrote_final () const { return _wrote_final; } bool wrote_final () const { return _wrote_final; }
void set_wrote_fields() { _wrote_fields = true; } void set_wrote_fields() { _wrote_fields = true; }
bool wrote_fields () const { return _wrote_fields; } bool wrote_fields () const { return _wrote_fields; }
void set_wrote_volatile() { _wrote_volatile = true; }
bool wrote_volatile () const { return _wrote_volatile; }
}; };

View file

@ -2004,7 +2004,7 @@ void LIR_OpRoundFP::print_instr(outputStream* out) const {
// LIR_Op2 // LIR_Op2
void LIR_Op2::print_instr(outputStream* out) const { void LIR_Op2::print_instr(outputStream* out) const {
if (code() == lir_cmove) { if (code() == lir_cmove || code() == lir_cmp) {
print_condition(out, condition()); out->print(" "); print_condition(out, condition()); out->print(" ");
} }
in_opr1()->print(out); out->print(" "); in_opr1()->print(out); out->print(" ");

View file

@ -1761,7 +1761,7 @@ void LIRGenerator::do_StoreField(StoreField* x) {
post_barrier(object.result(), value.result()); post_barrier(object.result(), value.result());
} }
if (is_volatile && os::is_MP()) { if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
__ membar(); __ membar();
} }
} }
@ -1822,6 +1822,10 @@ void LIRGenerator::do_LoadField(LoadField* x) {
address = generate_address(object.result(), x->offset(), field_type); address = generate_address(object.result(), x->offset(), field_type);
} }
if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
__ membar();
}
bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses; bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
if (needs_atomic_access && !needs_patching) { if (needs_atomic_access && !needs_patching) {
volatile_field_load(address, reg, info); volatile_field_load(address, reg, info);
@ -2238,6 +2242,10 @@ void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
LIR_Opr value = rlock_result(x, x->basic_type()); LIR_Opr value = rlock_result(x, x->basic_type());
if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
__ membar();
}
get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile()); get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
@ -2395,7 +2403,7 @@ void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
if (x->is_volatile() && os::is_MP()) __ membar_release(); if (x->is_volatile() && os::is_MP()) __ membar_release();
put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
if (x->is_volatile() && os::is_MP()) __ membar(); if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();
} }

View file

@ -6233,9 +6233,19 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
if (prev_branch->stub() == NULL) { if (prev_branch->stub() == NULL) {
LIR_Op2* prev_cmp = NULL; LIR_Op2* prev_cmp = NULL;
// There might be a cmove inserted for profiling which depends on the same
// compare. If we change the condition of the respective compare, we have
// to take care of this cmove as well.
LIR_Op2* prev_cmove = NULL;
for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) { for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
prev_op = instructions->at(j); prev_op = instructions->at(j);
// check for the cmove
if (prev_op->code() == lir_cmove) {
assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2");
prev_cmove = (LIR_Op2*)prev_op;
assert(prev_branch->cond() == prev_cmove->condition(), "should be the same");
}
if (prev_op->code() == lir_cmp) { if (prev_op->code() == lir_cmp) {
assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2"); assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
prev_cmp = (LIR_Op2*)prev_op; prev_cmp = (LIR_Op2*)prev_op;
@ -6252,6 +6262,13 @@ void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
prev_branch->negate_cond(); prev_branch->negate_cond();
prev_cmp->set_condition(prev_branch->cond()); prev_cmp->set_condition(prev_branch->cond());
instructions->truncate(instructions->length() - 1); instructions->truncate(instructions->length() - 1);
// if we do change the condition, we have to change the cmove as well
if (prev_cmove != NULL) {
prev_cmove->set_condition(prev_branch->cond());
LIR_Opr t = prev_cmove->in_opr1();
prev_cmove->set_in_opr1(prev_cmove->in_opr2());
prev_cmove->set_in_opr2(t);
}
} }
} }
} }

View file

@ -1262,6 +1262,8 @@ bool ciMethod::is_empty_method() const { FETCH_FLAG_FROM_VM(is_empty_met
bool ciMethod::is_vanilla_constructor() const { FETCH_FLAG_FROM_VM(is_vanilla_constructor); } bool ciMethod::is_vanilla_constructor() const { FETCH_FLAG_FROM_VM(is_vanilla_constructor); }
bool ciMethod::has_loops () const { FETCH_FLAG_FROM_VM(has_loops); } bool ciMethod::has_loops () const { FETCH_FLAG_FROM_VM(has_loops); }
bool ciMethod::has_jsrs () const { FETCH_FLAG_FROM_VM(has_jsrs); } bool ciMethod::has_jsrs () const { FETCH_FLAG_FROM_VM(has_jsrs); }
bool ciMethod::is_getter () const { FETCH_FLAG_FROM_VM(is_getter); }
bool ciMethod::is_setter () const { FETCH_FLAG_FROM_VM(is_setter); }
bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); } bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor); }
bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); } bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); }

View file

@ -311,6 +311,8 @@ class ciMethod : public ciMetadata {
bool is_final_method() const { return is_final() || holder()->is_final(); } bool is_final_method() const { return is_final() || holder()->is_final(); }
bool has_loops () const; bool has_loops () const;
bool has_jsrs () const; bool has_jsrs () const;
bool is_getter () const;
bool is_setter () const;
bool is_accessor () const; bool is_accessor () const;
bool is_initializer () const; bool is_initializer () const;
bool can_be_statically_bound() const { return _can_be_statically_bound; } bool can_be_statically_bound() const { return _can_be_statically_bound; }

View file

@ -1588,6 +1588,7 @@ ciTypeFlow::Block::Block(ciTypeFlow* outer,
_exceptions = NULL; _exceptions = NULL;
_exc_klasses = NULL; _exc_klasses = NULL;
_successors = NULL; _successors = NULL;
_predecessors = new (outer->arena()) GrowableArray<Block*>(outer->arena(), 1, 0, NULL);
_state = new (outer->arena()) StateVector(outer); _state = new (outer->arena()) StateVector(outer);
JsrSet* new_jsrs = JsrSet* new_jsrs =
new (outer->arena()) JsrSet(outer->arena(), jsrs->size()); new (outer->arena()) JsrSet(outer->arena(), jsrs->size());
@ -1771,6 +1772,12 @@ ciTypeFlow::Block::successors(ciBytecodeStream* str,
break; break;
} }
} }
// Set predecessor information
for (int i = 0; i < _successors->length(); i++) {
Block* block = _successors->at(i);
block->predecessors()->append(this);
}
} }
return _successors; return _successors;
} }
@ -1813,7 +1820,9 @@ void ciTypeFlow::Block::compute_exceptions() {
} else { } else {
klass = handler->catch_klass(); klass = handler->catch_klass();
} }
_exceptions->append(analyzer->block_at(bci, _jsrs)); Block* block = analyzer->block_at(bci, _jsrs);
_exceptions->append(block);
block->predecessors()->append(this);
_exc_klasses->append(klass); _exc_klasses->append(klass);
} }
} }
@ -1909,6 +1918,18 @@ void ciTypeFlow::Block::print_on(outputStream* st) const {
st->cr(); st->cr();
} }
} }
if (_predecessors == NULL) {
st->print_cr(" No predecessor information");
} else {
int num_predecessors = _predecessors->length();
st->print_cr(" Predecessors : %d", num_predecessors);
for (int i = 0; i < num_predecessors; i++) {
Block* predecessor = _predecessors->at(i);
st->print(" ");
predecessor->print_value_on(st);
st->cr();
}
}
if (_exceptions == NULL) { if (_exceptions == NULL) {
st->print_cr(" No exception information"); st->print_cr(" No exception information");
} else { } else {
@ -2270,6 +2291,9 @@ ciTypeFlow::Block* ciTypeFlow::clone_loop_head(Loop* lp, StateVector* temp_vecto
for (SuccIter iter(tail); !iter.done(); iter.next()) { for (SuccIter iter(tail); !iter.done(); iter.next()) {
if (iter.succ() == head) { if (iter.succ() == head) {
iter.set_succ(clone); iter.set_succ(clone);
// Update predecessor information
head->predecessors()->remove(tail);
clone->predecessors()->append(tail);
} }
} }
flow_block(tail, temp_vector, temp_set); flow_block(tail, temp_vector, temp_set);
@ -2279,6 +2303,9 @@ ciTypeFlow::Block* ciTypeFlow::clone_loop_head(Loop* lp, StateVector* temp_vecto
for (SuccIter iter(clone); !iter.done(); iter.next()) { for (SuccIter iter(clone); !iter.done(); iter.next()) {
if (iter.succ() == head) { if (iter.succ() == head) {
iter.set_succ(clone); iter.set_succ(clone);
// Update predecessor information
head->predecessors()->remove(clone);
clone->predecessors()->append(clone);
break; break;
} }
} }
@ -2883,6 +2910,69 @@ void ciTypeFlow::do_flow() {
} }
} }
// ------------------------------------------------------------------
// ciTypeFlow::is_dominated_by
//
// Determine if the instruction at bci is dominated by the instruction at dom_bci.
bool ciTypeFlow::is_dominated_by(int bci, int dom_bci) {
assert(!method()->has_jsrs(), "jsrs are not supported");
ResourceMark rm;
JsrSet* jsrs = new ciTypeFlow::JsrSet(NULL);
int index = _methodBlocks->block_containing(bci)->index();
int dom_index = _methodBlocks->block_containing(dom_bci)->index();
Block* block = get_block_for(index, jsrs, ciTypeFlow::no_create);
Block* dom_block = get_block_for(dom_index, jsrs, ciTypeFlow::no_create);
// Start block dominates all other blocks
if (start_block()->rpo() == dom_block->rpo()) {
return true;
}
// Dominated[i] is true if block i is dominated by dom_block
int num_blocks = _methodBlocks->num_blocks();
bool* dominated = NEW_RESOURCE_ARRAY(bool, num_blocks);
for (int i = 0; i < num_blocks; ++i) {
dominated[i] = true;
}
dominated[start_block()->rpo()] = false;
// Iterative dominator algorithm
bool changed = true;
while (changed) {
changed = false;
// Use reverse postorder iteration
for (Block* blk = _rpo_list; blk != NULL; blk = blk->rpo_next()) {
if (blk->is_start()) {
// Ignore start block
continue;
}
// The block is dominated if it is the dominating block
// itself or if all predecessors are dominated.
int index = blk->rpo();
bool dom = (index == dom_block->rpo());
if (!dom) {
// Check if all predecessors are dominated
dom = true;
for (int i = 0; i < blk->predecessors()->length(); ++i) {
Block* pred = blk->predecessors()->at(i);
if (!dominated[pred->rpo()]) {
dom = false;
break;
}
}
}
// Update dominator information
if (dominated[index] != dom) {
changed = true;
dominated[index] = dom;
}
}
}
// block dominated by dom_block?
return dominated[block->rpo()];
}
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// ciTypeFlow::record_failure() // ciTypeFlow::record_failure()
// The ciTypeFlow object keeps track of failure reasons separately from the ciEnv. // The ciTypeFlow object keeps track of failure reasons separately from the ciEnv.

View file

@ -529,6 +529,7 @@ public:
GrowableArray<Block*>* _exceptions; GrowableArray<Block*>* _exceptions;
GrowableArray<ciInstanceKlass*>* _exc_klasses; GrowableArray<ciInstanceKlass*>* _exc_klasses;
GrowableArray<Block*>* _successors; GrowableArray<Block*>* _successors;
GrowableArray<Block*>* _predecessors;
StateVector* _state; StateVector* _state;
JsrSet* _jsrs; JsrSet* _jsrs;
@ -617,6 +618,12 @@ public:
return _successors; return _successors;
} }
// Predecessors of this block (including exception edges)
GrowableArray<Block*>* predecessors() {
assert(_predecessors != NULL, "must be filled in");
return _predecessors;
}
// Get the exceptional successors for this Block. // Get the exceptional successors for this Block.
GrowableArray<Block*>* exceptions() { GrowableArray<Block*>* exceptions() {
if (_exceptions == NULL) { if (_exceptions == NULL) {
@ -941,6 +948,9 @@ public:
// Perform type inference flow analysis. // Perform type inference flow analysis.
void do_flow(); void do_flow();
// Determine if bci is dominated by dom_bci
bool is_dominated_by(int bci, int dom_bci);
void print_on(outputStream* st) const PRODUCT_RETURN; void print_on(outputStream* st) const PRODUCT_RETURN;
void rpo_print_on(outputStream* st) const PRODUCT_RETURN; void rpo_print_on(outputStream* st) const PRODUCT_RETURN;

View file

@ -133,13 +133,9 @@ class CodeBlob_sizes {
address CodeCache::_low_bound = 0; address CodeCache::_low_bound = 0;
address CodeCache::_high_bound = 0; address CodeCache::_high_bound = 0;
int CodeCache::_number_of_blobs = 0;
int CodeCache::_number_of_adapters = 0;
int CodeCache::_number_of_nmethods = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false; bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL; nmethod* CodeCache::_scavenge_root_nmethods = NULL;
int CodeCache::_codemem_full_count = 0;
// Initialize array of CodeHeaps // Initialize array of CodeHeaps
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
@ -420,42 +416,41 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
} }
} }
print_trace("allocation", cb, size); print_trace("allocation", cb, size);
_number_of_blobs++;
return cb; return cb;
} }
void CodeCache::free(CodeBlob* cb) { void CodeCache::free(CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
CodeHeap* heap = get_code_heap(cb);
print_trace("free", cb); print_trace("free", cb);
if (cb->is_nmethod()) { if (cb->is_nmethod()) {
_number_of_nmethods--; heap->set_nmethod_count(heap->nmethod_count() - 1);
if (((nmethod *)cb)->has_dependencies()) { if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies--; _number_of_nmethods_with_dependencies--;
} }
} }
if (cb->is_adapter_blob()) { if (cb->is_adapter_blob()) {
_number_of_adapters--; heap->set_adapter_count(heap->adapter_count() - 1);
} }
_number_of_blobs--;
// Get heap for given CodeBlob and deallocate // Get heap for given CodeBlob and deallocate
get_code_heap(cb)->deallocate(cb); get_code_heap(cb)->deallocate(cb);
assert(_number_of_blobs >= 0, "sanity check"); assert(heap->blob_count() >= 0, "sanity check");
} }
void CodeCache::commit(CodeBlob* cb) { void CodeCache::commit(CodeBlob* cb) {
// this is called by nmethod::nmethod, which must already own CodeCache_lock // this is called by nmethod::nmethod, which must already own CodeCache_lock
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
CodeHeap* heap = get_code_heap(cb);
if (cb->is_nmethod()) { if (cb->is_nmethod()) {
_number_of_nmethods++; heap->set_nmethod_count(heap->nmethod_count() + 1);
if (((nmethod *)cb)->has_dependencies()) { if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies++; _number_of_nmethods_with_dependencies++;
} }
} }
if (cb->is_adapter_blob()) { if (cb->is_adapter_blob()) {
_number_of_adapters++; heap->set_adapter_count(heap->adapter_count() + 1);
} }
// flush the hardware I-cache // flush the hardware I-cache
@ -577,11 +572,9 @@ void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
#ifndef PRODUCT
if (TraceScavenge) { if (TraceScavenge) {
cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
} }
#endif //PRODUCT
if (is_live) { if (is_live) {
// Perform cur->oops_do(f), maybe just once per nmethod. // Perform cur->oops_do(f), maybe just once per nmethod.
f->do_code_blob(cur); f->do_code_blob(cur);
@ -774,6 +767,55 @@ void CodeCache::verify_oops() {
} }
} }
int CodeCache::blob_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->blob_count() : 0;
}
int CodeCache::blob_count() {
int count = 0;
FOR_ALL_HEAPS(heap) {
count += (*heap)->blob_count();
}
return count;
}
int CodeCache::nmethod_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->nmethod_count() : 0;
}
int CodeCache::nmethod_count() {
int count = 0;
FOR_ALL_HEAPS(heap) {
count += (*heap)->nmethod_count();
}
return count;
}
int CodeCache::adapter_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->adapter_count() : 0;
}
int CodeCache::adapter_count() {
int count = 0;
FOR_ALL_HEAPS(heap) {
count += (*heap)->adapter_count();
}
return count;
}
address CodeCache::low_bound(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? (address)heap->low_boundary() : NULL;
}
address CodeCache::high_bound(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? (address)heap->high_boundary() : NULL;
}
size_t CodeCache::capacity() { size_t CodeCache::capacity() {
size_t cap = 0; size_t cap = 0;
FOR_ALL_HEAPS(heap) { FOR_ALL_HEAPS(heap) {
@ -863,6 +905,9 @@ void CodeCache::initialize() {
initialize_heaps(); initialize_heaps();
} else { } else {
// Use a single code heap // Use a single code heap
FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
add_heap(rs, "CodeCache", CodeBlobType::All); add_heap(rs, "CodeCache", CodeBlobType::All);
} }
@ -1104,9 +1149,8 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
CodeHeap* heap = get_code_heap(code_blob_type); CodeHeap* heap = get_code_heap(code_blob_type);
assert(heap != NULL, "heap is null"); assert(heap != NULL, "heap is null");
if (!heap->was_full() || print) { if ((heap->full_count() == 0) || print) {
// Not yet reported for this heap, report // Not yet reported for this heap, report
heap->report_full();
if (SegmentedCodeCache) { if (SegmentedCodeCache) {
warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type)); warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type)); warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
@ -1125,18 +1169,19 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
tty->print("%s", s.as_string()); tty->print("%s", s.as_string());
} }
_codemem_full_count++; heap->report_full();
EventCodeCacheFull event; EventCodeCacheFull event;
if (event.should_commit()) { if (event.should_commit()) {
event.set_codeBlobType((u1)code_blob_type); event.set_codeBlobType((u1)code_blob_type);
event.set_startAddress((u8)heap->low_boundary()); event.set_startAddress((u8)heap->low_boundary());
event.set_commitedTopAddress((u8)heap->high()); event.set_commitedTopAddress((u8)heap->high());
event.set_reservedTopAddress((u8)heap->high_boundary()); event.set_reservedTopAddress((u8)heap->high_boundary());
event.set_entryCount(nof_blobs()); event.set_entryCount(heap->blob_count());
event.set_methodCount(nof_nmethods()); event.set_methodCount(heap->nmethod_count());
event.set_adaptorCount(nof_adapters()); event.set_adaptorCount(heap->adapter_count());
event.set_unallocatedCapacity(heap->unallocated_capacity()/K); event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
event.set_fullCount(_codemem_full_count); event.set_fullCount(heap->full_count());
event.commit(); event.commit();
} }
} }
@ -1360,7 +1405,7 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
if (detailed) { if (detailed) {
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
" adapters=" UINT32_FORMAT, " adapters=" UINT32_FORMAT,
nof_blobs(), nof_nmethods(), nof_adapters()); blob_count(), nmethod_count(), adapter_count());
st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
"enabled" : Arguments::mode() == Arguments::_int ? "enabled" : Arguments::mode() == Arguments::_int ?
"disabled (interpreter mode)" : "disabled (interpreter mode)" :
@ -1392,6 +1437,6 @@ void CodeCache::print_layout(outputStream* st) {
void CodeCache::log_state(outputStream* st) { void CodeCache::log_state(outputStream* st) {
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
nof_blobs(), nof_nmethods(), nof_adapters(), blob_count(), nmethod_count(), adapter_count(),
unallocated_capacity()); unallocated_capacity());
} }

View file

@ -85,13 +85,9 @@ class CodeCache : AllStatic {
static address _low_bound; // Lower bound of CodeHeap addresses static address _low_bound; // Lower bound of CodeHeap addresses
static address _high_bound; // Upper bound of CodeHeap addresses static address _high_bound; // Upper bound of CodeHeap addresses
static int _number_of_blobs; // Total number of CodeBlobs in the cache
static int _number_of_adapters; // Total number of Adapters in the cache
static int _number_of_nmethods; // Total number of nmethods in the cache
static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies static int _number_of_nmethods_with_dependencies; // Total number of nmethods with dependencies
static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed static bool _needs_cache_clean; // True if inline caches of the nmethods needs to be flushed
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
static int _codemem_full_count; // Number of times a CodeHeap in the cache was full
static void mark_scavenge_root_nmethods() PRODUCT_RETURN; static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
@ -104,7 +100,6 @@ class CodeCache : AllStatic {
static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType
// Returns the name of the VM option to set the size of the corresponding CodeHeap // Returns the name of the VM option to set the size of the corresponding CodeHeap
static const char* get_code_heap_flag_name(int code_blob_type); static const char* get_code_heap_flag_name(int code_blob_type);
static bool heap_available(int code_blob_type); // Returns true if an own CodeHeap for the given CodeBlobType is available
static size_t heap_alignment(); // Returns the alignment of the CodeHeaps in bytes static size_t heap_alignment(); // Returns the alignment of the CodeHeaps in bytes
static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps static ReservedCodeSpace reserve_heap_memory(size_t size); // Reserves one continuous chunk of memory for the CodeHeaps
@ -139,9 +134,12 @@ class CodeCache : AllStatic {
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
static int nof_blobs() { return _number_of_blobs; } // Returns the total number of CodeBlobs in the cache static int blob_count(); // Returns the total number of CodeBlobs in the cache
static int nof_adapters() { return _number_of_adapters; } // Returns the total number of Adapters in the cache static int blob_count(int code_blob_type);
static int nof_nmethods() { return _number_of_nmethods; } // Returns the total number of nmethods in the cache static int adapter_count(); // Returns the total number of Adapters in the cache
static int adapter_count(int code_blob_type);
static int nmethod_count(); // Returns the total number of nmethods in the cache
static int nmethod_count(int code_blob_type);
// GC support // GC support
static void gc_epilogue(); static void gc_epilogue();
@ -177,7 +175,9 @@ class CodeCache : AllStatic {
// The full limits of the codeCache // The full limits of the codeCache
static address low_bound() { return _low_bound; } static address low_bound() { return _low_bound; }
static address low_bound(int code_blob_type);
static address high_bound() { return _high_bound; } static address high_bound() { return _high_bound; }
static address high_bound(int code_blob_type);
// Profiling // Profiling
static size_t capacity(); static size_t capacity();
@ -191,6 +191,9 @@ class CodeCache : AllStatic {
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
static void clear_inline_caches(); // clear all inline caches static void clear_inline_caches(); // clear all inline caches
// Returns true if an own CodeHeap for the given CodeBlobType is available
static bool heap_available(int code_blob_type);
// Returns the CodeBlobType for the given nmethod // Returns the CodeBlobType for the given nmethod
static int get_code_blob_type(nmethod* nm) { static int get_code_blob_type(nmethod* nm) {
return get_code_heap(nm)->code_blob_type(); return get_code_heap(nm)->code_blob_type();
@ -239,7 +242,10 @@ class CodeCache : AllStatic {
// tells how many nmethods have dependencies // tells how many nmethods have dependencies
static int number_of_nmethods_with_dependencies(); static int number_of_nmethods_with_dependencies();
static int get_codemem_full_count() { return _codemem_full_count; } static int get_codemem_full_count(int code_blob_type) {
CodeHeap* heap = get_code_heap(code_blob_type);
return (heap != NULL) ? heap->full_count() : 0;
}
}; };

View file

@ -1539,7 +1539,7 @@ void nmethod::flush() {
if (PrintMethodFlushing) { if (PrintMethodFlushing) {
tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
"/Free CodeCache:" SIZE_FORMAT "Kb", "/Free CodeCache:" SIZE_FORMAT "Kb",
_compile_id, p2i(this), CodeCache::nof_blobs(), _compile_id, p2i(this), CodeCache::blob_count(),
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024);
} }
@ -1819,9 +1819,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
if (_jvmci_installed_code != NULL) { if (_jvmci_installed_code != NULL) {
if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
if (!is_alive->do_object_b(_jvmci_installed_code)) { if (!is_alive->do_object_b(_jvmci_installed_code)) {
bs->write_ref_nmethod_pre(&_jvmci_installed_code, this); clear_jvmci_installed_code();
_jvmci_installed_code = NULL;
bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
} }
} else { } else {
if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
@ -1922,27 +1920,6 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
unloading_occurred = true; unloading_occurred = true;
} }
#if INCLUDE_JVMCI
// Follow JVMCI method
if (_jvmci_installed_code != NULL) {
if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
if (!is_alive->do_object_b(_jvmci_installed_code)) {
_jvmci_installed_code = NULL;
}
} else {
if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
return false;
}
}
}
if (_speculation_log != NULL) {
if (!is_alive->do_object_b(_speculation_log)) {
_speculation_log = NULL;
}
}
#endif
// Exception cache // Exception cache
clean_exception_cache(is_alive); clean_exception_cache(is_alive);
@ -2006,9 +1983,7 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
if (_jvmci_installed_code != NULL) { if (_jvmci_installed_code != NULL) {
if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) {
if (!is_alive->do_object_b(_jvmci_installed_code)) { if (!is_alive->do_object_b(_jvmci_installed_code)) {
bs->write_ref_nmethod_pre(&_jvmci_installed_code, this); clear_jvmci_installed_code();
_jvmci_installed_code = NULL;
bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
} }
} else { } else {
if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) {
@ -2271,7 +2246,7 @@ bool nmethod::test_set_oops_do_mark() {
break; break;
} }
// Mark was clear when we first saw this guy. // Mark was clear when we first saw this guy.
NOT_PRODUCT(if (TraceScavenge) print_on(tty, "oops_do, mark")); if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
return false; return false;
} }
} }
@ -2280,7 +2255,7 @@ bool nmethod::test_set_oops_do_mark() {
} }
void nmethod::oops_do_marking_prologue() { void nmethod::oops_do_marking_prologue() {
NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue")); if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row"); assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
// We use cmpxchg_ptr instead of regular assignment here because the user // We use cmpxchg_ptr instead of regular assignment here because the user
// may fork a bunch of threads, and we need them all to see the same state. // may fork a bunch of threads, and we need them all to see the same state.
@ -2302,7 +2277,7 @@ void nmethod::oops_do_marking_epilogue() {
void* required = _oops_do_mark_nmethods; void* required = _oops_do_mark_nmethods;
void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required); void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
guarantee(observed == required, "no races in this sequential code"); guarantee(observed == required, "no races in this sequential code");
NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]")); if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
} }
class DetectScavengeRoot: public OopClosure { class DetectScavengeRoot: public OopClosure {
@ -3373,6 +3348,14 @@ void nmethod::print_statistics() {
#endif // !PRODUCT #endif // !PRODUCT
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
void nmethod::clear_jvmci_installed_code() {
// This must be done carefully to maintain nmethod remembered sets properly
BarrierSet* bs = Universe::heap()->barrier_set();
bs->write_ref_nmethod_pre(&_jvmci_installed_code, this);
_jvmci_installed_code = NULL;
bs->write_ref_nmethod_post(&_jvmci_installed_code, this);
}
void nmethod::maybe_invalidate_installed_code() { void nmethod::maybe_invalidate_installed_code() {
if (_jvmci_installed_code != NULL) { if (_jvmci_installed_code != NULL) {
if (!is_alive()) { if (!is_alive()) {
@ -3382,7 +3365,7 @@ void nmethod::maybe_invalidate_installed_code() {
// might want to invalidate all existing activations. // might want to invalidate all existing activations.
InstalledCode::set_address(_jvmci_installed_code, 0); InstalledCode::set_address(_jvmci_installed_code, 0);
InstalledCode::set_entryPoint(_jvmci_installed_code, 0); InstalledCode::set_entryPoint(_jvmci_installed_code, 0);
_jvmci_installed_code = NULL; clear_jvmci_installed_code();
} else if (is_not_entrant()) { } else if (is_not_entrant()) {
InstalledCode::set_entryPoint(_jvmci_installed_code, 0); InstalledCode::set_entryPoint(_jvmci_installed_code, 0);
} }

View file

@ -602,7 +602,7 @@ public:
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
oop jvmci_installed_code() { return _jvmci_installed_code ; } oop jvmci_installed_code() { return _jvmci_installed_code ; }
char* jvmci_installed_code_name(char* buf, size_t buflen); char* jvmci_installed_code_name(char* buf, size_t buflen);
void set_jvmci_installed_code(oop installed_code) { _jvmci_installed_code = installed_code; } void clear_jvmci_installed_code();
void maybe_invalidate_installed_code(); void maybe_invalidate_installed_code();
oop speculation_log() { return _speculation_log ; } oop speculation_log() { return _speculation_log ; }
void set_speculation_log(oop speculation_log) { _speculation_log = speculation_log; } void set_speculation_log(oop speculation_log) { _speculation_log = speculation_log; }

View file

@ -1716,7 +1716,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
EventCompilation event; EventCompilation event;
JVMCIEnv env(task, system_dictionary_modification_counter); JVMCIEnv env(task, system_dictionary_modification_counter);
jvmci->compile_method(target_handle, osr_bci, &env); methodHandle method(thread, target_handle);
jvmci->compile_method(method, osr_bci, &env);
post_compile(thread, task, event, task->code() != NULL, NULL); post_compile(thread, task, event, task->code() != NULL, NULL);
} else } else

View file

@ -67,7 +67,7 @@
cflags(VectorizeDebug, bool, false, VectorizeDebug) \ cflags(VectorizeDebug, bool, false, VectorizeDebug) \
cflags(CloneMapDebug, bool, false, CloneMapDebug) \ cflags(CloneMapDebug, bool, false, CloneMapDebug) \
cflags(DoReserveCopyInSuperWordDebug, bool, false, DoReserveCopyInSuperWordDebug) \ cflags(DoReserveCopyInSuperWordDebug, bool, false, DoReserveCopyInSuperWordDebug) \
NOT_PRODUCT( cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel)) \ cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit)
#else #else
#define compilerdirectives_c2_flags(cflags) #define compilerdirectives_c2_flags(cflags)

View file

@ -1148,7 +1148,6 @@ oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
} }
assert(new_obj != NULL, "just checking"); assert(new_obj != NULL, "just checking");
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect // This code must come after the CAS test, or it will print incorrect
// information. // information.
if (TraceScavenge) { if (TraceScavenge) {
@ -1156,7 +1155,6 @@ oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
is_in_reserved(new_obj) ? "copying" : "tenuring", is_in_reserved(new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
} }
#endif
if (forward_ptr == NULL) { if (forward_ptr == NULL) {
oop obj_to_push = new_obj; oop obj_to_push = new_obj;

View file

@ -108,14 +108,11 @@ inline void ParScanClosure::do_oop_work(T* p,
if (m->is_marked()) { // Contains forwarding pointer. if (m->is_marked()) { // Contains forwarding pointer.
new_obj = ParNewGeneration::real_forwardee(obj); new_obj = ParNewGeneration::real_forwardee(obj);
oopDesc::encode_store_heap_oop_not_null(p, new_obj); oopDesc::encode_store_heap_oop_not_null(p, new_obj);
#ifndef PRODUCT
if (TraceScavenge) { if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
"forwarded ", "forwarded ",
new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size()); new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
} }
#endif
} else { } else {
size_t obj_sz = obj->size_given_klass(objK); size_t obj_sz = obj->size_given_klass(objK);
new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m); new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);

View file

@ -430,7 +430,6 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
obj = obj->forwardee(); obj = obj->forwardee();
} }
#ifndef PRODUCT
if (TraceScavenge) { if (TraceScavenge) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " (%d)}", gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " (%d)}",
"promotion-failure", "promotion-failure",
@ -438,7 +437,6 @@ oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
p2i(obj), obj->size()); p2i(obj), obj->size());
} }
#endif
return obj; return obj;
} }

View file

@ -260,7 +260,6 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
new_obj = o->forwardee(); new_obj = o->forwardee();
} }
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect // This code must come after the CAS test, or it will print incorrect
// information. // information.
if (TraceScavenge) { if (TraceScavenge) {
@ -268,7 +267,6 @@ inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
should_scavenge(&new_obj) ? "copying" : "tenuring", should_scavenge(&new_obj) ? "copying" : "tenuring",
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size()); new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
} }
#endif
return new_obj; return new_obj;
} }
@ -285,15 +283,13 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
? o->forwardee() ? o->forwardee()
: copy_to_survivor_space<promote_immediately>(o); : copy_to_survivor_space<promote_immediately>(o);
#ifndef PRODUCT
// This code must come after the CAS test, or it will print incorrect // This code must come after the CAS test, or it will print incorrect
// information. // information.
if (TraceScavenge && o->is_forwarded()) { if (TraceScavenge && o->is_forwarded()) {
gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
"forwarding", "forwarding",
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size()); new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
} }
#endif
oopDesc::encode_store_heap_oop_not_null(p, new_obj); oopDesc::encode_store_heap_oop_not_null(p, new_obj);

View file

@ -138,7 +138,6 @@ class PSScavengeKlassClosure: public KlassClosure {
// If the klass has not been dirtied we know that there's // If the klass has not been dirtied we know that there's
// no references into the young gen and we can skip it. // no references into the young gen and we can skip it.
#ifndef PRODUCT
if (TraceScavenge) { if (TraceScavenge) {
ResourceMark rm; ResourceMark rm;
gclog_or_tty->print_cr("PSScavengeKlassClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", gclog_or_tty->print_cr("PSScavengeKlassClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
@ -146,7 +145,6 @@ class PSScavengeKlassClosure: public KlassClosure {
klass->external_name(), klass->external_name(),
klass->has_modified_oops() ? "true" : "false"); klass->has_modified_oops() ? "true" : "false");
} }
#endif
if (klass->has_modified_oops()) { if (klass->has_modified_oops()) {
// Clean the klass since we're going to scavenge all the metadata. // Clean the klass since we're going to scavenge all the metadata.

View file

@ -134,7 +134,6 @@ void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
void KlassScanClosure::do_klass(Klass* klass) { void KlassScanClosure::do_klass(Klass* klass) {
#ifndef PRODUCT
if (TraceScavenge) { if (TraceScavenge) {
ResourceMark rm; ResourceMark rm;
gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s", gclog_or_tty->print_cr("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
@ -142,7 +141,6 @@ void KlassScanClosure::do_klass(Klass* klass) {
klass->external_name(), klass->external_name(),
klass->has_modified_oops() ? "true" : "false"); klass->has_modified_oops() ? "true" : "false");
} }
#endif
// If the klass has not been dirtied we know that there's // If the klass has not been dirtied we know that there's
// no references into the young gen and we can skip it. // no references into the young gen and we can skip it.

View file

@ -300,7 +300,10 @@ AbstractInterpreter::MethodKind AbstractInterpreter::method_kind(methodHandle m)
} }
// Accessor method? // Accessor method?
if (m->is_accessor()) { if (m->is_getter()) {
// TODO: We should have used ::is_accessor above, but fast accessors in Zero expect only getters.
// See CppInterpreter::accessor_entry in cppInterpreter_zero.cpp. This should be fixed in Zero,
// then the call above updated to ::is_accessor
assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1"); assert(m->size_of_parameters() == 1, "fast code for accessors assumes parameter size = 1");
return accessor; return accessor;
} }

View file

@ -71,62 +71,97 @@ Method* getMethodFromHotSpotMethod(oop hotspot_method) {
return CompilerToVM::asMethod(hotspot_method); return CompilerToVM::asMethod(hotspot_method);
} }
VMReg getVMRegFromLocation(oop location, int total_frame_size) { VMReg getVMRegFromLocation(Handle location, int total_frame_size, TRAPS) {
oop reg = code_Location::reg(location); if (location.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
Handle reg = code_Location::reg(location);
jint offset = code_Location::offset(location); jint offset = code_Location::offset(location);
if (reg != NULL) { if (reg.not_null()) {
// register // register
jint number = code_Register::number(reg); jint number = code_Register::number(reg);
VMReg vmReg = CodeInstaller::get_hotspot_reg(number); VMReg vmReg = CodeInstaller::get_hotspot_reg(number, CHECK_NULL);
assert(offset % 4 == 0, "must be aligned"); if (offset % 4 == 0) {
return vmReg->next(offset / 4); return vmReg->next(offset / 4);
} else {
JVMCI_ERROR_NULL("unaligned subregister offset %d in oop map", offset);
}
} else { } else {
// stack slot // stack slot
assert(offset % 4 == 0, "must be aligned"); if (offset % 4 == 0) {
return VMRegImpl::stack2reg(offset / 4); return VMRegImpl::stack2reg(offset / 4);
} else {
JVMCI_ERROR_NULL("unaligned stack offset %d in oop map", offset);
}
} }
} }
// creates a HotSpot oop map out of the byte arrays provided by DebugInfo // creates a HotSpot oop map out of the byte arrays provided by DebugInfo
OopMap* CodeInstaller::create_oop_map(oop debug_info) { OopMap* CodeInstaller::create_oop_map(Handle debug_info, TRAPS) {
oop reference_map = DebugInfo::referenceMap(debug_info); Handle reference_map = DebugInfo::referenceMap(debug_info);
if (reference_map.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
if (!reference_map->is_a(HotSpotReferenceMap::klass())) {
JVMCI_ERROR_NULL("unknown reference map: %s", reference_map->klass()->signature_name());
}
if (HotSpotReferenceMap::maxRegisterSize(reference_map) > 16) { if (HotSpotReferenceMap::maxRegisterSize(reference_map) > 16) {
_has_wide_vector = true; _has_wide_vector = true;
} }
OopMap* map = new OopMap(_total_frame_size, _parameter_count); OopMap* map = new OopMap(_total_frame_size, _parameter_count);
objArrayOop objects = HotSpotReferenceMap::objects(reference_map); objArrayHandle objects = HotSpotReferenceMap::objects(reference_map);
objArrayOop derivedBase = HotSpotReferenceMap::derivedBase(reference_map); objArrayHandle derivedBase = HotSpotReferenceMap::derivedBase(reference_map);
typeArrayOop sizeInBytes = HotSpotReferenceMap::sizeInBytes(reference_map); typeArrayHandle sizeInBytes = HotSpotReferenceMap::sizeInBytes(reference_map);
if (objects.is_null() || derivedBase.is_null() || sizeInBytes.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
if (objects->length() != derivedBase->length() || objects->length() != sizeInBytes->length()) {
JVMCI_ERROR_NULL("arrays in reference map have different sizes: %d %d %d", objects->length(), derivedBase->length(), sizeInBytes->length());
}
for (int i = 0; i < objects->length(); i++) { for (int i = 0; i < objects->length(); i++) {
oop location = objects->obj_at(i); Handle location = objects->obj_at(i);
oop baseLocation = derivedBase->obj_at(i); Handle baseLocation = derivedBase->obj_at(i);
int bytes = sizeInBytes->int_at(i); int bytes = sizeInBytes->int_at(i);
VMReg vmReg = getVMRegFromLocation(location, _total_frame_size); VMReg vmReg = getVMRegFromLocation(location, _total_frame_size, CHECK_NULL);
if (baseLocation != NULL) { if (baseLocation.not_null()) {
// derived oop // derived oop
assert(bytes == 8, "derived oop can't be compressed"); #ifdef _LP64
VMReg baseReg = getVMRegFromLocation(baseLocation, _total_frame_size); if (bytes == 8) {
map->set_derived_oop(vmReg, baseReg); #else
if (bytes == 4) {
#endif
VMReg baseReg = getVMRegFromLocation(baseLocation, _total_frame_size, CHECK_NULL);
map->set_derived_oop(vmReg, baseReg);
} else {
JVMCI_ERROR_NULL("invalid derived oop size in ReferenceMap: %d", bytes);
}
#ifdef _LP64
} else if (bytes == 8) { } else if (bytes == 8) {
// wide oop // wide oop
map->set_oop(vmReg); map->set_oop(vmReg);
} else { } else if (bytes == 4) {
// narrow oop // narrow oop
assert(bytes == 4, "wrong size");
map->set_narrowoop(vmReg); map->set_narrowoop(vmReg);
#else
} else if (bytes == 4) {
map->set_oop(vmReg);
#endif
} else {
JVMCI_ERROR_NULL("invalid oop size in ReferenceMap: %d", bytes);
} }
} }
oop callee_save_info = (oop) DebugInfo::calleeSaveInfo(debug_info); Handle callee_save_info = (oop) DebugInfo::calleeSaveInfo(debug_info);
if (callee_save_info != NULL) { if (callee_save_info.not_null()) {
objArrayOop registers = RegisterSaveLayout::registers(callee_save_info); objArrayHandle registers = RegisterSaveLayout::registers(callee_save_info);
typeArrayOop slots = RegisterSaveLayout::slots(callee_save_info); typeArrayHandle slots = RegisterSaveLayout::slots(callee_save_info);
for (jint i = 0; i < slots->length(); i++) { for (jint i = 0; i < slots->length(); i++) {
oop jvmci_reg = registers->obj_at(i); Handle jvmci_reg = registers->obj_at(i);
jint jvmci_reg_number = code_Register::number(jvmci_reg); jint jvmci_reg_number = code_Register::number(jvmci_reg);
VMReg hotspot_reg = CodeInstaller::get_hotspot_reg(jvmci_reg_number); VMReg hotspot_reg = CodeInstaller::get_hotspot_reg(jvmci_reg_number, CHECK_NULL);
// HotSpot stack slots are 4 bytes // HotSpot stack slots are 4 bytes
jint jvmci_slot = slots->int_at(i); jint jvmci_slot = slots->int_at(i);
jint hotspot_slot = jvmci_slot * VMRegImpl::slots_per_word; jint hotspot_slot = jvmci_slot * VMRegImpl::slots_per_word;
@ -142,7 +177,7 @@ OopMap* CodeInstaller::create_oop_map(oop debug_info) {
return map; return map;
} }
Metadata* CodeInstaller::record_metadata_reference(Handle& constant) { Metadata* CodeInstaller::record_metadata_reference(Handle constant, TRAPS) {
oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant); oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant);
if (obj->is_a(HotSpotResolvedObjectTypeImpl::klass())) { if (obj->is_a(HotSpotResolvedObjectTypeImpl::klass())) {
Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj)); Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj));
@ -157,16 +192,18 @@ Metadata* CodeInstaller::record_metadata_reference(Handle& constant) {
TRACE_jvmci_3("metadata[%d of %d] = %s", index, _oop_recorder->metadata_count(), method->name()->as_C_string()); TRACE_jvmci_3("metadata[%d of %d] = %s", index, _oop_recorder->metadata_count(), method->name()->as_C_string());
return method; return method;
} else { } else {
fatal("unexpected metadata reference for constant of type %s", obj->klass()->name()->as_C_string()); JVMCI_ERROR_NULL("unexpected metadata reference for constant of type %s", obj->klass()->signature_name());
return NULL;
} }
} }
#ifdef _LP64 #ifdef _LP64
narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle& constant) { narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle constant, TRAPS) {
oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant); oop obj = HotSpotMetaspaceConstantImpl::metaspaceObject(constant);
assert(HotSpotMetaspaceConstantImpl::compressed(constant), "unexpected uncompressed pointer"); assert(HotSpotMetaspaceConstantImpl::compressed(constant), "unexpected uncompressed pointer");
assert(obj->is_a(HotSpotResolvedObjectTypeImpl::klass()), "unexpected compressed pointer of type %s", obj->klass()->name()->as_C_string());
if (!obj->is_a(HotSpotResolvedObjectTypeImpl::klass())) {
JVMCI_ERROR_0("unexpected compressed pointer of type %s", obj->klass()->signature_name());
}
Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj)); Klass* klass = java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(obj));
int index = _oop_recorder->find_index(klass); int index = _oop_recorder->find_index(klass);
@ -175,9 +212,9 @@ narrowKlass CodeInstaller::record_narrow_metadata_reference(Handle& constant) {
} }
#endif #endif
Location::Type CodeInstaller::get_oop_type(oop value) { Location::Type CodeInstaller::get_oop_type(Handle value) {
oop lirKind = Value::lirKind(value); Handle lirKind = Value::lirKind(value);
oop platformKind = LIRKind::platformKind(lirKind); Handle platformKind = LIRKind::platformKind(lirKind);
assert(LIRKind::referenceMask(lirKind) == 1, "unexpected referenceMask"); assert(LIRKind::referenceMask(lirKind) == 1, "unexpected referenceMask");
if (platformKind == word_kind()) { if (platformKind == word_kind()) {
@ -187,24 +224,29 @@ Location::Type CodeInstaller::get_oop_type(oop value) {
} }
} }
ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second) { ScopeValue* CodeInstaller::get_scope_value(Handle value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second, TRAPS) {
second = NULL; second = NULL;
if (value == Value::ILLEGAL()) { if (value.is_null()) {
assert(type == T_ILLEGAL, "expected legal value"); THROW_NULL(vmSymbols::java_lang_NullPointerException());
} else if (value == Value::ILLEGAL()) {
if (type != T_ILLEGAL) {
JVMCI_ERROR_NULL("unexpected illegal value, expected %s", basictype_to_str(type));
}
return _illegal_value; return _illegal_value;
} else if (value->is_a(RegisterValue::klass())) { } else if (value->is_a(RegisterValue::klass())) {
oop reg = RegisterValue::reg(value); Handle reg = RegisterValue::reg(value);
jint number = code_Register::number(reg); jint number = code_Register::number(reg);
VMReg hotspotRegister = get_hotspot_reg(number); VMReg hotspotRegister = get_hotspot_reg(number, CHECK_NULL);
if (is_general_purpose_reg(hotspotRegister)) { if (is_general_purpose_reg(hotspotRegister)) {
Location::Type locationType; Location::Type locationType;
if (type == T_OBJECT) { if (type == T_OBJECT) {
locationType = get_oop_type(value); locationType = get_oop_type(value);
} else if (type == T_LONG) { } else if (type == T_LONG) {
locationType = Location::lng; locationType = Location::lng;
} else { } else if (type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN) {
assert(type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN, "unexpected type in cpu register");
locationType = Location::int_in_long; locationType = Location::int_in_long;
} else {
JVMCI_ERROR_NULL("unexpected type %s in cpu register", basictype_to_str(type));
} }
ScopeValue* value = new LocationValue(Location::new_reg_loc(locationType, hotspotRegister)); ScopeValue* value = new LocationValue(Location::new_reg_loc(locationType, hotspotRegister));
if (type == T_LONG) { if (type == T_LONG) {
@ -212,13 +254,14 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
} }
return value; return value;
} else { } else {
assert(type == T_FLOAT || type == T_DOUBLE, "only float and double expected in xmm register");
Location::Type locationType; Location::Type locationType;
if (type == T_FLOAT) { if (type == T_FLOAT) {
// this seems weird, but the same value is used in c1_LinearScan // this seems weird, but the same value is used in c1_LinearScan
locationType = Location::normal; locationType = Location::normal;
} else { } else if (type == T_DOUBLE) {
locationType = Location::dbl; locationType = Location::dbl;
} else {
JVMCI_ERROR_NULL("unexpected type %s in floating point register", basictype_to_str(type));
} }
ScopeValue* value = new LocationValue(Location::new_reg_loc(locationType, hotspotRegister)); ScopeValue* value = new LocationValue(Location::new_reg_loc(locationType, hotspotRegister));
if (type == T_DOUBLE) { if (type == T_DOUBLE) {
@ -239,9 +282,10 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
locationType = Location::lng; locationType = Location::lng;
} else if (type == T_DOUBLE) { } else if (type == T_DOUBLE) {
locationType = Location::dbl; locationType = Location::dbl;
} else { } else if (type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN) {
assert(type == T_INT || type == T_FLOAT || type == T_SHORT || type == T_CHAR || type == T_BYTE || type == T_BOOLEAN, "unexpected type in stack slot");
locationType = Location::normal; locationType = Location::normal;
} else {
JVMCI_ERROR_NULL("unexpected type %s in stack slot", basictype_to_str(type));
} }
ScopeValue* value = new LocationValue(Location::new_stk_loc(locationType, offset)); ScopeValue* value = new LocationValue(Location::new_stk_loc(locationType, offset));
if (type == T_DOUBLE || type == T_LONG) { if (type == T_DOUBLE || type == T_LONG) {
@ -254,7 +298,10 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
jlong prim = PrimitiveConstant::primitive(value); jlong prim = PrimitiveConstant::primitive(value);
return new ConstantLongValue(prim); return new ConstantLongValue(prim);
} else { } else {
assert(type == JVMCIRuntime::kindToBasicType(JavaKind::typeChar(PrimitiveConstant::kind(value))), "primitive constant type doesn't match"); BasicType constantType = JVMCIRuntime::kindToBasicType(PrimitiveConstant::kind(value), CHECK_NULL);
if (type != constantType) {
JVMCI_ERROR_NULL("primitive constant type doesn't match, expected %s but got %s", basictype_to_str(type), basictype_to_str(constantType));
}
if (type == T_INT || type == T_FLOAT) { if (type == T_INT || type == T_FLOAT) {
jint prim = (jint)PrimitiveConstant::primitive(value); jint prim = (jint)PrimitiveConstant::primitive(value);
switch (prim) { switch (prim) {
@ -264,53 +311,63 @@ ScopeValue* CodeInstaller::get_scope_value(oop value, BasicType type, GrowableAr
case 2: return _int_2_scope_value; case 2: return _int_2_scope_value;
default: return new ConstantIntValue(prim); default: return new ConstantIntValue(prim);
} }
} else { } else if (type == T_LONG || type == T_DOUBLE) {
assert(type == T_LONG || type == T_DOUBLE, "unexpected primitive constant type");
jlong prim = PrimitiveConstant::primitive(value); jlong prim = PrimitiveConstant::primitive(value);
second = _int_1_scope_value; second = _int_1_scope_value;
return new ConstantLongValue(prim); return new ConstantLongValue(prim);
} else {
JVMCI_ERROR_NULL("unexpected primitive constant type %s", basictype_to_str(type));
} }
} }
} else { } else if (value->is_a(NullConstant::klass()) || value->is_a(HotSpotCompressedNullConstant::klass())) {
assert(type == T_OBJECT, "unexpected object constant"); if (type == T_OBJECT) {
if (value->is_a(NullConstant::klass()) || value->is_a(HotSpotCompressedNullConstant::klass())) {
return _oop_null_scope_value; return _oop_null_scope_value;
} else { } else {
assert(value->is_a(HotSpotObjectConstantImpl::klass()), "unexpected constant type"); JVMCI_ERROR_NULL("unexpected null constant, expected %s", basictype_to_str(type));
}
} else if (value->is_a(HotSpotObjectConstantImpl::klass())) {
if (type == T_OBJECT) {
oop obj = HotSpotObjectConstantImpl::object(value); oop obj = HotSpotObjectConstantImpl::object(value);
assert(obj != NULL, "null value must be in NullConstant"); if (obj == NULL) {
JVMCI_ERROR_NULL("null value must be in NullConstant");
}
return new ConstantOopWriteValue(JNIHandles::make_local(obj)); return new ConstantOopWriteValue(JNIHandles::make_local(obj));
} else {
JVMCI_ERROR_NULL("unexpected object constant, expected %s", basictype_to_str(type));
} }
} }
} else if (value->is_a(VirtualObject::klass())) { } else if (value->is_a(VirtualObject::klass())) {
assert(type == T_OBJECT, "unexpected virtual object"); if (type == T_OBJECT) {
int id = VirtualObject::id(value); int id = VirtualObject::id(value);
ScopeValue* object = objects->at(id); if (0 <= id && id < objects->length()) {
assert(object != NULL, "missing value"); ScopeValue* object = objects->at(id);
return object; if (object != NULL) {
} else { return object;
value->klass()->print(); }
value->print(); }
JVMCI_ERROR_NULL("unknown virtual object id %d", id);
} else {
JVMCI_ERROR_NULL("unexpected virtual object, expected %s", basictype_to_str(type));
}
} }
ShouldNotReachHere();
return NULL; JVMCI_ERROR_NULL("unexpected value in scope: %s", value->klass()->signature_name())
} }
void CodeInstaller::record_object_value(ObjectValue* sv, oop value, GrowableArray<ScopeValue*>* objects) { void CodeInstaller::record_object_value(ObjectValue* sv, Handle value, GrowableArray<ScopeValue*>* objects, TRAPS) {
oop type = VirtualObject::type(value); Handle type = VirtualObject::type(value);
int id = VirtualObject::id(value); int id = VirtualObject::id(value);
oop javaMirror = HotSpotResolvedObjectTypeImpl::javaClass(type); oop javaMirror = HotSpotResolvedObjectTypeImpl::javaClass(type);
Klass* klass = java_lang_Class::as_Klass(javaMirror); Klass* klass = java_lang_Class::as_Klass(javaMirror);
bool isLongArray = klass == Universe::longArrayKlassObj(); bool isLongArray = klass == Universe::longArrayKlassObj();
objArrayOop values = VirtualObject::values(value); objArrayHandle values = VirtualObject::values(value);
objArrayOop slotKinds = VirtualObject::slotKinds(value); objArrayHandle slotKinds = VirtualObject::slotKinds(value);
for (jint i = 0; i < values->length(); i++) { for (jint i = 0; i < values->length(); i++) {
ScopeValue* cur_second = NULL; ScopeValue* cur_second = NULL;
oop object = values->obj_at(i); Handle object = values->obj_at(i);
oop kind = slotKinds->obj_at(i); BasicType type = JVMCIRuntime::kindToBasicType(slotKinds->obj_at(i), CHECK);
BasicType type = JVMCIRuntime::kindToBasicType(JavaKind::typeChar(kind)); ScopeValue* value = get_scope_value(object, type, objects, cur_second, CHECK);
ScopeValue* value = get_scope_value(object, type, objects, cur_second);
if (isLongArray && cur_second == NULL) { if (isLongArray && cur_second == NULL) {
// we're trying to put ints into a long array... this isn't really valid, but it's used for some optimizations. // we're trying to put ints into a long array... this isn't really valid, but it's used for some optimizations.
@ -326,14 +383,19 @@ void CodeInstaller::record_object_value(ObjectValue* sv, oop value, GrowableArra
} }
} }
MonitorValue* CodeInstaller::get_monitor_value(oop value, GrowableArray<ScopeValue*>* objects) { MonitorValue* CodeInstaller::get_monitor_value(Handle value, GrowableArray<ScopeValue*>* objects, TRAPS) {
guarantee(value->is_a(StackLockValue::klass()), "Monitors must be of type StackLockValue"); if (value.is_null()) {
THROW_NULL(vmSymbols::java_lang_NullPointerException());
}
if (!value->is_a(StackLockValue::klass())) {
JVMCI_ERROR_NULL("Monitors must be of type StackLockValue, got %s", value->klass()->signature_name());
}
ScopeValue* second = NULL; ScopeValue* second = NULL;
ScopeValue* owner_value = get_scope_value(StackLockValue::owner(value), T_OBJECT, objects, second); ScopeValue* owner_value = get_scope_value(StackLockValue::owner(value), T_OBJECT, objects, second, CHECK_NULL);
assert(second == NULL, "monitor cannot occupy two stack slots"); assert(second == NULL, "monitor cannot occupy two stack slots");
ScopeValue* lock_data_value = get_scope_value(StackLockValue::slot(value), T_LONG, objects, second); ScopeValue* lock_data_value = get_scope_value(StackLockValue::slot(value), T_LONG, objects, second, CHECK_NULL);
assert(second == lock_data_value, "monitor is LONG value that occupies two stack slots"); assert(second == lock_data_value, "monitor is LONG value that occupies two stack slots");
assert(lock_data_value->is_location(), "invalid monitor location"); assert(lock_data_value->is_location(), "invalid monitor location");
Location lock_data_loc = ((LocationValue*)lock_data_value)->location(); Location lock_data_loc = ((LocationValue*)lock_data_value)->location();
@ -346,7 +408,7 @@ MonitorValue* CodeInstaller::get_monitor_value(oop value, GrowableArray<ScopeVal
return new MonitorValue(owner_value, lock_data_loc, eliminated); return new MonitorValue(owner_value, lock_data_loc, eliminated);
} }
void CodeInstaller::initialize_dependencies(oop compiled_code, OopRecorder* recorder) { void CodeInstaller::initialize_dependencies(oop compiled_code, OopRecorder* recorder, TRAPS) {
JavaThread* thread = JavaThread::current(); JavaThread* thread = JavaThread::current();
CompilerThread* compilerThread = thread->is_Compiler_thread() ? thread->as_CompilerThread() : NULL; CompilerThread* compilerThread = thread->is_Compiler_thread() ? thread->as_CompilerThread() : NULL;
_oop_recorder = recorder; _oop_recorder = recorder;
@ -368,8 +430,7 @@ void CodeInstaller::initialize_dependencies(oop compiled_code, OopRecorder* reco
} else if (assumption->klass() == Assumptions_CallSiteTargetValue::klass()) { } else if (assumption->klass() == Assumptions_CallSiteTargetValue::klass()) {
assumption_CallSiteTargetValue(assumption); assumption_CallSiteTargetValue(assumption);
} else { } else {
assumption->print(); JVMCI_ERROR("unexpected Assumption subclass %s", assumption->klass()->signature_name());
fatal("unexpected Assumption subclass");
} }
} }
} }
@ -414,18 +475,19 @@ void RelocBuffer::ensure_size(size_t bytes) {
_size = bytes; _size = bytes;
} }
JVMCIEnv::CodeInstallResult CodeInstaller::gather_metadata(Handle target, Handle& compiled_code, CodeMetadata& metadata) { JVMCIEnv::CodeInstallResult CodeInstaller::gather_metadata(Handle target, Handle compiled_code, CodeMetadata& metadata, TRAPS) {
CodeBuffer buffer("JVMCI Compiler CodeBuffer for Metadata"); CodeBuffer buffer("JVMCI Compiler CodeBuffer for Metadata");
jobject compiled_code_obj = JNIHandles::make_local(compiled_code()); jobject compiled_code_obj = JNIHandles::make_local(compiled_code());
initialize_dependencies(JNIHandles::resolve(compiled_code_obj), NULL); initialize_dependencies(JNIHandles::resolve(compiled_code_obj), NULL, CHECK_OK);
// Get instructions and constants CodeSections early because we need it. // Get instructions and constants CodeSections early because we need it.
_instructions = buffer.insts(); _instructions = buffer.insts();
_constants = buffer.consts(); _constants = buffer.consts();
initialize_fields(target(), JNIHandles::resolve(compiled_code_obj)); initialize_fields(target(), JNIHandles::resolve(compiled_code_obj), CHECK_OK);
if (!initialize_buffer(buffer)) { JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer, CHECK_OK);
return JVMCIEnv::code_too_large; if (result != JVMCIEnv::ok) {
return result;
} }
process_exception_handlers(); process_exception_handlers();
@ -446,18 +508,18 @@ JVMCIEnv::CodeInstallResult CodeInstaller::gather_metadata(Handle target, Handle
} }
// constructor used to create a method // constructor used to create a method
JVMCIEnv::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, Handle target, Handle& compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log) { JVMCIEnv::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, Handle target, Handle compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log, TRAPS) {
CodeBuffer buffer("JVMCI Compiler CodeBuffer"); CodeBuffer buffer("JVMCI Compiler CodeBuffer");
jobject compiled_code_obj = JNIHandles::make_local(compiled_code()); jobject compiled_code_obj = JNIHandles::make_local(compiled_code());
OopRecorder* recorder = new OopRecorder(&_arena, true); OopRecorder* recorder = new OopRecorder(&_arena, true);
initialize_dependencies(JNIHandles::resolve(compiled_code_obj), recorder); initialize_dependencies(JNIHandles::resolve(compiled_code_obj), recorder, CHECK_OK);
// Get instructions and constants CodeSections early because we need it. // Get instructions and constants CodeSections early because we need it.
_instructions = buffer.insts(); _instructions = buffer.insts();
_constants = buffer.consts(); _constants = buffer.consts();
initialize_fields(target(), JNIHandles::resolve(compiled_code_obj)); initialize_fields(target(), JNIHandles::resolve(compiled_code_obj), CHECK_OK);
JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer); JVMCIEnv::CodeInstallResult result = initialize_buffer(buffer, CHECK_OK);
if (result != JVMCIEnv::ok) { if (result != JVMCIEnv::ok) {
return result; return result;
} }
@ -500,7 +562,7 @@ JVMCIEnv::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, Hand
return result; return result;
} }
void CodeInstaller::initialize_fields(oop target, oop compiled_code) { void CodeInstaller::initialize_fields(oop target, oop compiled_code, TRAPS) {
if (compiled_code->is_a(HotSpotCompiledNmethod::klass())) { if (compiled_code->is_a(HotSpotCompiledNmethod::klass())) {
Handle hotspotJavaMethod = HotSpotCompiledNmethod::method(compiled_code); Handle hotspotJavaMethod = HotSpotCompiledNmethod::method(compiled_code);
methodHandle method = getMethodFromHotSpotMethod(hotspotJavaMethod()); methodHandle method = getMethodFromHotSpotMethod(hotspotJavaMethod());
@ -521,7 +583,9 @@ void CodeInstaller::initialize_fields(oop target, oop compiled_code) {
// Pre-calculate the constants section size. This is required for PC-relative addressing. // Pre-calculate the constants section size. This is required for PC-relative addressing.
_data_section_handle = JNIHandles::make_local(HotSpotCompiledCode::dataSection(compiled_code)); _data_section_handle = JNIHandles::make_local(HotSpotCompiledCode::dataSection(compiled_code));
guarantee(HotSpotCompiledCode::dataSectionAlignment(compiled_code) <= _constants->alignment(), "Alignment inside constants section is restricted by alignment of section begin"); if ((_constants->alignment() % HotSpotCompiledCode::dataSectionAlignment(compiled_code)) != 0) {
JVMCI_ERROR("invalid data section alignment: %d", HotSpotCompiledCode::dataSectionAlignment(compiled_code));
}
_constants_size = data_section()->length(); _constants_size = data_section()->length();
_data_section_patches_handle = JNIHandles::make_local(HotSpotCompiledCode::dataSectionPatches(compiled_code)); _data_section_patches_handle = JNIHandles::make_local(HotSpotCompiledCode::dataSectionPatches(compiled_code));
@ -538,16 +602,18 @@ void CodeInstaller::initialize_fields(oop target, oop compiled_code) {
_word_kind_handle = JNIHandles::make_local(Architecture::wordKind(arch)); _word_kind_handle = JNIHandles::make_local(Architecture::wordKind(arch));
} }
int CodeInstaller::estimate_stubs_size() { int CodeInstaller::estimate_stubs_size(TRAPS) {
// Estimate the number of static call stubs that might be emitted. // Estimate the number of static call stubs that might be emitted.
int static_call_stubs = 0; int static_call_stubs = 0;
objArrayOop sites = this->sites(); objArrayOop sites = this->sites();
for (int i = 0; i < sites->length(); i++) { for (int i = 0; i < sites->length(); i++) {
oop site = sites->obj_at(i); oop site = sites->obj_at(i);
if (site->is_a(CompilationResult_Mark::klass())) { if (site != NULL && site->is_a(CompilationResult_Mark::klass())) {
oop id_obj = CompilationResult_Mark::id(site); oop id_obj = CompilationResult_Mark::id(site);
if (id_obj != NULL) { if (id_obj != NULL) {
assert(java_lang_boxing_object::is_instance(id_obj, T_INT), "Integer id expected"); if (!java_lang_boxing_object::is_instance(id_obj, T_INT)) {
JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name());
}
jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT)); jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
if (id == INVOKESTATIC || id == INVOKESPECIAL) { if (id == INVOKESTATIC || id == INVOKESPECIAL) {
static_call_stubs++; static_call_stubs++;
@ -559,7 +625,7 @@ int CodeInstaller::estimate_stubs_size() {
} }
// perform data and call relocation on the CodeBuffer // perform data and call relocation on the CodeBuffer
JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer) { JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer, TRAPS) {
HandleMark hm; HandleMark hm;
objArrayHandle sites = this->sites(); objArrayHandle sites = this->sites();
int locs_buffer_size = sites->length() * (relocInfo::length_limit + sizeof(relocInfo)); int locs_buffer_size = sites->length() * (relocInfo::length_limit + sizeof(relocInfo));
@ -568,7 +634,7 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
// stubs. Stubs have extra relocs but they are managed by the stub // stubs. Stubs have extra relocs but they are managed by the stub
// section itself so they don't need to be accounted for in the // section itself so they don't need to be accounted for in the
// locs_buffer above. // locs_buffer above.
int stubs_size = estimate_stubs_size(); int stubs_size = estimate_stubs_size(CHECK_OK);
int total_size = round_to(_code_size, buffer.insts()->alignment()) + round_to(_constants_size, buffer.consts()->alignment()) + round_to(stubs_size, buffer.stubs()->alignment()); int total_size = round_to(_code_size, buffer.insts()->alignment()) + round_to(_constants_size, buffer.consts()->alignment()) + round_to(stubs_size, buffer.stubs()->alignment());
if (total_size > JVMCINMethodSizeLimit) { if (total_size > JVMCINMethodSizeLimit) {
@ -600,19 +666,30 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
for (int i = 0; i < data_section_patches()->length(); i++) { for (int i = 0; i < data_section_patches()->length(); i++) {
Handle patch = data_section_patches()->obj_at(i); Handle patch = data_section_patches()->obj_at(i);
if (patch.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
}
Handle reference = CompilationResult_DataPatch::reference(patch); Handle reference = CompilationResult_DataPatch::reference(patch);
assert(reference->is_a(CompilationResult_ConstantReference::klass()), "patch in data section must be a ConstantReference"); if (reference.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
}
if (!reference->is_a(CompilationResult_ConstantReference::klass())) {
JVMCI_ERROR_OK("invalid patch in data section: %s", reference->klass()->signature_name());
}
Handle constant = CompilationResult_ConstantReference::constant(reference); Handle constant = CompilationResult_ConstantReference::constant(reference);
if (constant.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
}
address dest = _constants->start() + CompilationResult_Site::pcOffset(patch); address dest = _constants->start() + CompilationResult_Site::pcOffset(patch);
if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) { if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) {
if (HotSpotMetaspaceConstantImpl::compressed(constant)) { if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
#ifdef _LP64 #ifdef _LP64
*((narrowKlass*) dest) = record_narrow_metadata_reference(constant); *((narrowKlass*) dest) = record_narrow_metadata_reference(constant, CHECK_OK);
#else #else
fatal("unexpected compressed Klass* in 32-bit mode"); JVMCI_ERROR_OK("unexpected compressed Klass* in 32-bit mode");
#endif #endif
} else { } else {
*((Metadata**) dest) = record_metadata_reference(constant); *((Metadata**) dest) = record_metadata_reference(constant, CHECK_OK);
} }
} else if (constant->is_a(HotSpotObjectConstantImpl::klass())) { } else if (constant->is_a(HotSpotObjectConstantImpl::klass())) {
Handle obj = HotSpotObjectConstantImpl::object(constant); Handle obj = HotSpotObjectConstantImpl::object(constant);
@ -623,48 +700,49 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
#ifdef _LP64 #ifdef _LP64
_constants->relocate(dest, oop_Relocation::spec(oop_index), relocInfo::narrow_oop_in_const); _constants->relocate(dest, oop_Relocation::spec(oop_index), relocInfo::narrow_oop_in_const);
#else #else
fatal("unexpected compressed oop in 32-bit mode"); JVMCI_ERROR_OK("unexpected compressed oop in 32-bit mode");
#endif #endif
} else { } else {
_constants->relocate(dest, oop_Relocation::spec(oop_index)); _constants->relocate(dest, oop_Relocation::spec(oop_index));
} }
} else { } else {
ShouldNotReachHere(); JVMCI_ERROR_OK("invalid constant in data section: %s", constant->klass()->signature_name());
} }
} }
jint last_pc_offset = -1; jint last_pc_offset = -1;
for (int i = 0; i < sites->length(); i++) { for (int i = 0; i < sites->length(); i++) {
{ Handle site = sites->obj_at(i);
No_Safepoint_Verifier no_safepoint; if (site.is_null()) {
oop site = sites->obj_at(i); THROW_(vmSymbols::java_lang_NullPointerException(), JVMCIEnv::ok);
jint pc_offset = CompilationResult_Site::pcOffset(site);
if (site->is_a(CompilationResult_Call::klass())) {
TRACE_jvmci_4("call at %i", pc_offset);
site_Call(buffer, pc_offset, site);
} else if (site->is_a(CompilationResult_Infopoint::klass())) {
// three reasons for infopoints denote actual safepoints
oop reason = CompilationResult_Infopoint::reason(site);
if (InfopointReason::SAFEPOINT() == reason || InfopointReason::CALL() == reason || InfopointReason::IMPLICIT_EXCEPTION() == reason) {
TRACE_jvmci_4("safepoint at %i", pc_offset);
site_Safepoint(buffer, pc_offset, site);
} else {
// if the infopoint is not an actual safepoint, it must have one of the other reasons
// (safeguard against new safepoint types that require handling above)
assert(InfopointReason::METHOD_START() == reason || InfopointReason::METHOD_END() == reason || InfopointReason::LINE_NUMBER() == reason, "");
site_Infopoint(buffer, pc_offset, site);
}
} else if (site->is_a(CompilationResult_DataPatch::klass())) {
TRACE_jvmci_4("datapatch at %i", pc_offset);
site_DataPatch(buffer, pc_offset, site);
} else if (site->is_a(CompilationResult_Mark::klass())) {
TRACE_jvmci_4("mark at %i", pc_offset);
site_Mark(buffer, pc_offset, site);
} else {
fatal("unexpected Site subclass");
}
last_pc_offset = pc_offset;
} }
jint pc_offset = CompilationResult_Site::pcOffset(site);
if (site->is_a(CompilationResult_Call::klass())) {
TRACE_jvmci_4("call at %i", pc_offset);
site_Call(buffer, pc_offset, site, CHECK_OK);
} else if (site->is_a(CompilationResult_Infopoint::klass())) {
// three reasons for infopoints denote actual safepoints
oop reason = CompilationResult_Infopoint::reason(site);
if (InfopointReason::SAFEPOINT() == reason || InfopointReason::CALL() == reason || InfopointReason::IMPLICIT_EXCEPTION() == reason) {
TRACE_jvmci_4("safepoint at %i", pc_offset);
site_Safepoint(buffer, pc_offset, site, CHECK_OK);
} else if (InfopointReason::METHOD_START() == reason || InfopointReason::METHOD_END() == reason || InfopointReason::LINE_NUMBER() == reason) {
site_Infopoint(buffer, pc_offset, site, CHECK_OK);
} else {
JVMCI_ERROR_OK("unknown infopoint reason at %i", pc_offset);
}
} else if (site->is_a(CompilationResult_DataPatch::klass())) {
TRACE_jvmci_4("datapatch at %i", pc_offset);
site_DataPatch(buffer, pc_offset, site, CHECK_OK);
} else if (site->is_a(CompilationResult_Mark::klass())) {
TRACE_jvmci_4("mark at %i", pc_offset);
site_Mark(buffer, pc_offset, site, CHECK_OK);
} else {
JVMCI_ERROR_OK("unexpected site subclass: %s", site->klass()->signature_name());
}
last_pc_offset = pc_offset;
if (CodeInstallSafepointChecks && SafepointSynchronize::do_call_back()) { if (CodeInstallSafepointChecks && SafepointSynchronize::do_call_back()) {
// this is a hacky way to force a safepoint check but nothing else was jumping out at me. // this is a hacky way to force a safepoint check but nothing else was jumping out at me.
ThreadToNativeFromVM ttnfv(JavaThread::current()); ThreadToNativeFromVM ttnfv(JavaThread::current());
@ -673,7 +751,6 @@ JVMCIEnv::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer)
#ifndef PRODUCT #ifndef PRODUCT
if (comments() != NULL) { if (comments() != NULL) {
No_Safepoint_Verifier no_safepoint;
for (int i = 0; i < comments()->length(); i++) { for (int i = 0; i < comments()->length(); i++) {
oop comment = comments()->obj_at(i); oop comment = comments()->obj_at(i);
assert(comment->is_a(HotSpotCompiledCode_Comment::klass()), "cce"); assert(comment->is_a(HotSpotCompiledCode_Comment::klass()), "cce");
@ -759,56 +836,61 @@ static bool bytecode_should_reexecute(Bytecodes::Code code) {
return true; return true;
} }
GrowableArray<ScopeValue*>* CodeInstaller::record_virtual_objects(oop debug_info) { GrowableArray<ScopeValue*>* CodeInstaller::record_virtual_objects(Handle debug_info, TRAPS) {
objArrayOop virtualObjects = DebugInfo::virtualObjectMapping(debug_info); objArrayHandle virtualObjects = DebugInfo::virtualObjectMapping(debug_info);
if (virtualObjects == NULL) { if (virtualObjects.is_null()) {
return NULL; return NULL;
} }
GrowableArray<ScopeValue*>* objects = new GrowableArray<ScopeValue*>(virtualObjects->length(), virtualObjects->length(), NULL); GrowableArray<ScopeValue*>* objects = new GrowableArray<ScopeValue*>(virtualObjects->length(), virtualObjects->length(), NULL);
// Create the unique ObjectValues // Create the unique ObjectValues
for (int i = 0; i < virtualObjects->length(); i++) { for (int i = 0; i < virtualObjects->length(); i++) {
oop value = virtualObjects->obj_at(i); Handle value = virtualObjects->obj_at(i);
int id = VirtualObject::id(value); int id = VirtualObject::id(value);
oop type = VirtualObject::type(value); Handle type = VirtualObject::type(value);
oop javaMirror = HotSpotResolvedObjectTypeImpl::javaClass(type); oop javaMirror = HotSpotResolvedObjectTypeImpl::javaClass(type);
ObjectValue* sv = new ObjectValue(id, new ConstantOopWriteValue(JNIHandles::make_local(Thread::current(), javaMirror))); ObjectValue* sv = new ObjectValue(id, new ConstantOopWriteValue(JNIHandles::make_local(Thread::current(), javaMirror)));
assert(objects->at(id) == NULL, "once"); if (id < 0 || id >= objects->length()) {
JVMCI_ERROR_NULL("virtual object id %d out of bounds", id);
}
if (objects->at(id) != NULL) {
JVMCI_ERROR_NULL("duplicate virtual object id %d", id);
}
objects->at_put(id, sv); objects->at_put(id, sv);
} }
// All the values which could be referenced by the VirtualObjects // All the values which could be referenced by the VirtualObjects
// exist, so now describe all the VirtualObjects themselves. // exist, so now describe all the VirtualObjects themselves.
for (int i = 0; i < virtualObjects->length(); i++) { for (int i = 0; i < virtualObjects->length(); i++) {
oop value = virtualObjects->obj_at(i); Handle value = virtualObjects->obj_at(i);
int id = VirtualObject::id(value); int id = VirtualObject::id(value);
record_object_value(objects->at(id)->as_ObjectValue(), value, objects); record_object_value(objects->at(id)->as_ObjectValue(), value, objects, CHECK_NULL);
} }
_debug_recorder->dump_object_pool(objects); _debug_recorder->dump_object_pool(objects);
return objects; return objects;
} }
void CodeInstaller::record_scope(jint pc_offset, oop debug_info) { void CodeInstaller::record_scope(jint pc_offset, Handle debug_info, TRAPS) {
oop position = DebugInfo::bytecodePosition(debug_info); Handle position = DebugInfo::bytecodePosition(debug_info);
if (position == NULL) { if (position.is_null()) {
// Stubs do not record scope info, just oop maps // Stubs do not record scope info, just oop maps
return; return;
} }
GrowableArray<ScopeValue*>* objectMapping = record_virtual_objects(debug_info); GrowableArray<ScopeValue*>* objectMapping = record_virtual_objects(debug_info, CHECK);
record_scope(pc_offset, position, objectMapping); record_scope(pc_offset, position, objectMapping, CHECK);
} }
void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<ScopeValue*>* objects) { void CodeInstaller::record_scope(jint pc_offset, Handle position, GrowableArray<ScopeValue*>* objects, TRAPS) {
oop frame = NULL; Handle frame;
if (position->is_a(BytecodeFrame::klass())) { if (position->is_a(BytecodeFrame::klass())) {
frame = position; frame = position;
} }
oop caller_frame = BytecodePosition::caller(position); Handle caller_frame = BytecodePosition::caller(position);
if (caller_frame != NULL) { if (caller_frame.not_null()) {
record_scope(pc_offset, caller_frame, objects); record_scope(pc_offset, caller_frame, objects, CHECK);
} }
oop hotspot_method = BytecodePosition::method(position); Handle hotspot_method = BytecodePosition::method(position);
Method* method = getMethodFromHotSpotMethod(hotspot_method); Method* method = getMethodFromHotSpotMethod(hotspot_method());
jint bci = BytecodePosition::bci(position); jint bci = BytecodePosition::bci(position);
if (bci == BytecodeFrame::BEFORE_BCI()) { if (bci == BytecodeFrame::BEFORE_BCI()) {
bci = SynchronizationEntryBCI; bci = SynchronizationEntryBCI;
@ -817,13 +899,13 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
TRACE_jvmci_2("Recording scope pc_offset=%d bci=%d method=%s", pc_offset, bci, method->name_and_sig_as_C_string()); TRACE_jvmci_2("Recording scope pc_offset=%d bci=%d method=%s", pc_offset, bci, method->name_and_sig_as_C_string());
bool reexecute = false; bool reexecute = false;
if (frame != NULL) { if (frame.not_null()) {
if (bci == SynchronizationEntryBCI){ if (bci == SynchronizationEntryBCI){
reexecute = false; reexecute = false;
} else { } else {
Bytecodes::Code code = Bytecodes::java_code_at(method, method->bcp_from(bci)); Bytecodes::Code code = Bytecodes::java_code_at(method, method->bcp_from(bci));
reexecute = bytecode_should_reexecute(code); reexecute = bytecode_should_reexecute(code);
if (frame != NULL) { if (frame.not_null()) {
reexecute = (BytecodeFrame::duringCall(frame) == JNI_FALSE); reexecute = (BytecodeFrame::duringCall(frame) == JNI_FALSE);
} }
} }
@ -834,15 +916,22 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
DebugToken* monitors_token = NULL; DebugToken* monitors_token = NULL;
bool throw_exception = false; bool throw_exception = false;
if (frame != NULL) { if (frame.not_null()) {
jint local_count = BytecodeFrame::numLocals(frame); jint local_count = BytecodeFrame::numLocals(frame);
jint expression_count = BytecodeFrame::numStack(frame); jint expression_count = BytecodeFrame::numStack(frame);
jint monitor_count = BytecodeFrame::numLocks(frame); jint monitor_count = BytecodeFrame::numLocks(frame);
objArrayOop values = BytecodeFrame::values(frame); objArrayHandle values = BytecodeFrame::values(frame);
objArrayOop slotKinds = BytecodeFrame::slotKinds(frame); objArrayHandle slotKinds = BytecodeFrame::slotKinds(frame);
assert(local_count + expression_count + monitor_count == values->length(), "unexpected values length"); if (values.is_null() || slotKinds.is_null()) {
assert(local_count + expression_count == slotKinds->length(), "unexpected slotKinds length"); THROW(vmSymbols::java_lang_NullPointerException());
}
if (local_count + expression_count + monitor_count != values->length()) {
JVMCI_ERROR("unexpected values length %d in scope (%d locals, %d expressions, %d monitors)", values->length(), local_count, expression_count, monitor_count);
}
if (local_count + expression_count != slotKinds->length()) {
JVMCI_ERROR("unexpected slotKinds length %d in scope (%d locals, %d expressions)", slotKinds->length(), local_count, expression_count);
}
GrowableArray<ScopeValue*>* locals = local_count > 0 ? new GrowableArray<ScopeValue*> (local_count) : NULL; GrowableArray<ScopeValue*>* locals = local_count > 0 ? new GrowableArray<ScopeValue*> (local_count) : NULL;
GrowableArray<ScopeValue*>* expressions = expression_count > 0 ? new GrowableArray<ScopeValue*> (expression_count) : NULL; GrowableArray<ScopeValue*>* expressions = expression_count > 0 ? new GrowableArray<ScopeValue*> (expression_count) : NULL;
@ -853,30 +942,30 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
for (jint i = 0; i < values->length(); i++) { for (jint i = 0; i < values->length(); i++) {
ScopeValue* second = NULL; ScopeValue* second = NULL;
oop value = values->obj_at(i); Handle value = values->obj_at(i);
if (i < local_count) { if (i < local_count) {
oop kind = slotKinds->obj_at(i); BasicType type = JVMCIRuntime::kindToBasicType(slotKinds->obj_at(i), CHECK);
BasicType type = JVMCIRuntime::kindToBasicType(JavaKind::typeChar(kind)); ScopeValue* first = get_scope_value(value, type, objects, second, CHECK);
ScopeValue* first = get_scope_value(value, type, objects, second);
if (second != NULL) { if (second != NULL) {
locals->append(second); locals->append(second);
} }
locals->append(first); locals->append(first);
} else if (i < local_count + expression_count) { } else if (i < local_count + expression_count) {
oop kind = slotKinds->obj_at(i); BasicType type = JVMCIRuntime::kindToBasicType(slotKinds->obj_at(i), CHECK);
BasicType type = JVMCIRuntime::kindToBasicType(JavaKind::typeChar(kind)); ScopeValue* first = get_scope_value(value, type, objects, second, CHECK);
ScopeValue* first = get_scope_value(value, type, objects, second);
if (second != NULL) { if (second != NULL) {
expressions->append(second); expressions->append(second);
} }
expressions->append(first); expressions->append(first);
} else { } else {
monitors->append(get_monitor_value(value, objects)); MonitorValue *monitor = get_monitor_value(value, objects, CHECK);
monitors->append(monitor);
} }
if (second != NULL) { if (second != NULL) {
i++; i++;
assert(i < values->length(), "double-slot value not followed by Value.ILLEGAL"); if (i >= values->length() || values->obj_at(i) != Value::ILLEGAL()) {
assert(values->obj_at(i) == Value::ILLEGAL(), "double-slot value not followed by Value.ILLEGAL"); JVMCI_ERROR("double-slot value not followed by Value.ILLEGAL");
}
} }
} }
@ -891,32 +980,37 @@ void CodeInstaller::record_scope(jint pc_offset, oop position, GrowableArray<Sco
locals_token, expressions_token, monitors_token); locals_token, expressions_token, monitors_token);
} }
void CodeInstaller::site_Safepoint(CodeBuffer& buffer, jint pc_offset, oop site) { void CodeInstaller::site_Safepoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
oop debug_info = CompilationResult_Infopoint::debugInfo(site); Handle debug_info = CompilationResult_Infopoint::debugInfo(site);
assert(debug_info != NULL, "debug info expected"); if (debug_info.is_null()) {
JVMCI_ERROR("debug info expected at safepoint at %i", pc_offset);
}
// address instruction = _instructions->start() + pc_offset; // address instruction = _instructions->start() + pc_offset;
// jint next_pc_offset = Assembler::locate_next_instruction(instruction) - _instructions->start(); // jint next_pc_offset = Assembler::locate_next_instruction(instruction) - _instructions->start();
_debug_recorder->add_safepoint(pc_offset, create_oop_map(debug_info)); OopMap *map = create_oop_map(debug_info, CHECK);
record_scope(pc_offset, debug_info); _debug_recorder->add_safepoint(pc_offset, map);
record_scope(pc_offset, debug_info, CHECK);
_debug_recorder->end_safepoint(pc_offset); _debug_recorder->end_safepoint(pc_offset);
} }
void CodeInstaller::site_Infopoint(CodeBuffer& buffer, jint pc_offset, oop site) { void CodeInstaller::site_Infopoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
oop debug_info = CompilationResult_Infopoint::debugInfo(site); Handle debug_info = CompilationResult_Infopoint::debugInfo(site);
assert(debug_info != NULL, "debug info expected"); if (debug_info.is_null()) {
JVMCI_ERROR("debug info expected at infopoint at %i", pc_offset);
}
_debug_recorder->add_non_safepoint(pc_offset); _debug_recorder->add_non_safepoint(pc_offset);
record_scope(pc_offset, debug_info); record_scope(pc_offset, debug_info, CHECK);
_debug_recorder->end_non_safepoint(pc_offset); _debug_recorder->end_non_safepoint(pc_offset);
} }
void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, oop site) { void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
oop target = CompilationResult_Call::target(site); Handle target = CompilationResult_Call::target(site);
InstanceKlass* target_klass = InstanceKlass::cast(target->klass()); InstanceKlass* target_klass = InstanceKlass::cast(target->klass());
oop hotspot_method = NULL; // JavaMethod Handle hotspot_method; // JavaMethod
oop foreign_call = NULL; Handle foreign_call;
if (target_klass->is_subclass_of(SystemDictionary::HotSpotForeignCallTarget_klass())) { if (target_klass->is_subclass_of(SystemDictionary::HotSpotForeignCallTarget_klass())) {
foreign_call = target; foreign_call = target;
@ -924,27 +1018,29 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, oop site) {
hotspot_method = target; hotspot_method = target;
} }
oop debug_info = CompilationResult_Call::debugInfo(site); Handle debug_info = CompilationResult_Call::debugInfo(site);
assert(!!hotspot_method ^ !!foreign_call, "Call site needs exactly one type"); assert(hotspot_method.not_null() ^ foreign_call.not_null(), "Call site needs exactly one type");
NativeInstruction* inst = nativeInstruction_at(_instructions->start() + pc_offset); NativeInstruction* inst = nativeInstruction_at(_instructions->start() + pc_offset);
jint next_pc_offset = CodeInstaller::pd_next_offset(inst, pc_offset, hotspot_method); jint next_pc_offset = CodeInstaller::pd_next_offset(inst, pc_offset, hotspot_method, CHECK);
if (debug_info != NULL) { if (debug_info.not_null()) {
_debug_recorder->add_safepoint(next_pc_offset, create_oop_map(debug_info)); OopMap *map = create_oop_map(debug_info, CHECK);
record_scope(next_pc_offset, debug_info); _debug_recorder->add_safepoint(next_pc_offset, map);
record_scope(next_pc_offset, debug_info, CHECK);
} }
if (foreign_call != NULL) { if (foreign_call.not_null()) {
jlong foreign_call_destination = HotSpotForeignCallTarget::address(foreign_call); jlong foreign_call_destination = HotSpotForeignCallTarget::address(foreign_call);
CodeInstaller::pd_relocate_ForeignCall(inst, foreign_call_destination); CodeInstaller::pd_relocate_ForeignCall(inst, foreign_call_destination, CHECK);
} else { // method != NULL } else { // method != NULL
assert(hotspot_method != NULL, "unexpected JavaMethod"); if (debug_info.is_null()) {
assert(debug_info != NULL, "debug info expected"); JVMCI_ERROR("debug info expected at call at %i", pc_offset);
}
TRACE_jvmci_3("method call"); TRACE_jvmci_3("method call");
CodeInstaller::pd_relocate_JavaMethod(hotspot_method, pc_offset); CodeInstaller::pd_relocate_JavaMethod(hotspot_method, pc_offset, CHECK);
if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) { if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) {
// Need a static call stub for transitions from compiled to interpreted. // Need a static call stub for transitions from compiled to interpreted.
CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset); CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
@ -953,38 +1049,45 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, oop site) {
_next_call_type = INVOKE_INVALID; _next_call_type = INVOKE_INVALID;
if (debug_info != NULL) { if (debug_info.not_null()) {
_debug_recorder->end_safepoint(next_pc_offset); _debug_recorder->end_safepoint(next_pc_offset);
} }
} }
void CodeInstaller::site_DataPatch(CodeBuffer& buffer, jint pc_offset, oop site) { void CodeInstaller::site_DataPatch(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
oop reference = CompilationResult_DataPatch::reference(site); Handle reference = CompilationResult_DataPatch::reference(site);
if (reference->is_a(CompilationResult_ConstantReference::klass())) { if (reference.is_null()) {
THROW(vmSymbols::java_lang_NullPointerException());
} else if (reference->is_a(CompilationResult_ConstantReference::klass())) {
Handle constant = CompilationResult_ConstantReference::constant(reference); Handle constant = CompilationResult_ConstantReference::constant(reference);
if (constant->is_a(HotSpotObjectConstantImpl::klass())) { if (constant.is_null()) {
pd_patch_OopConstant(pc_offset, constant); THROW(vmSymbols::java_lang_NullPointerException());
} else if (constant->is_a(HotSpotObjectConstantImpl::klass())) {
pd_patch_OopConstant(pc_offset, constant, CHECK);
} else if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) { } else if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) {
pd_patch_MetaspaceConstant(pc_offset, constant); pd_patch_MetaspaceConstant(pc_offset, constant, CHECK);
} else if (constant->is_a(HotSpotSentinelConstant::klass())) {
fatal("sentinel constant unsupported");
} else { } else {
fatal("unknown constant type in data patch"); JVMCI_ERROR("unknown constant type in data patch: %s", constant->klass()->signature_name());
} }
} else if (reference->is_a(CompilationResult_DataSectionReference::klass())) { } else if (reference->is_a(CompilationResult_DataSectionReference::klass())) {
int data_offset = CompilationResult_DataSectionReference::offset(reference); int data_offset = CompilationResult_DataSectionReference::offset(reference);
assert(0 <= data_offset && data_offset < _constants_size, "data offset 0x%X points outside data section (size 0x%X)", data_offset, _constants_size); if (0 <= data_offset && data_offset < _constants_size) {
pd_patch_DataSectionReference(pc_offset, data_offset); pd_patch_DataSectionReference(pc_offset, data_offset);
} else {
JVMCI_ERROR("data offset 0x%X points outside data section (size 0x%X)", data_offset, _constants_size);
}
} else { } else {
fatal("unknown data patch type"); JVMCI_ERROR("unknown data patch type: %s", reference->klass()->signature_name());
} }
} }
void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, oop site) { void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS) {
oop id_obj = CompilationResult_Mark::id(site); Handle id_obj = CompilationResult_Mark::id(site);
if (id_obj != NULL) { if (id_obj.not_null()) {
assert(java_lang_boxing_object::is_instance(id_obj, T_INT), "Integer id expected"); if (!java_lang_boxing_object::is_instance(id_obj(), T_INT)) {
JVMCI_ERROR("expected Integer id, got %s", id_obj->klass()->signature_name());
}
jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT)); jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
address pc = _instructions->start() + pc_offset; address pc = _instructions->start() + pc_offset;
@ -1017,7 +1120,7 @@ void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, oop site) {
case POLL_FAR: case POLL_FAR:
case POLL_RETURN_NEAR: case POLL_RETURN_NEAR:
case POLL_RETURN_FAR: case POLL_RETURN_FAR:
pd_relocate_poll(pc, id); pd_relocate_poll(pc, id, CHECK);
break; break;
case CARD_TABLE_SHIFT: case CARD_TABLE_SHIFT:
case CARD_TABLE_ADDRESS: case CARD_TABLE_ADDRESS:
@ -1027,7 +1130,7 @@ void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, oop site) {
case CRC_TABLE_ADDRESS: case CRC_TABLE_ADDRESS:
break; break;
default: default:
ShouldNotReachHere(); JVMCI_ERROR("invalid mark id: %d", id);
break; break;
} }
} }

View file

@ -154,13 +154,13 @@ private:
static ConstantIntValue* _int_2_scope_value; static ConstantIntValue* _int_2_scope_value;
static LocationValue* _illegal_value; static LocationValue* _illegal_value;
jint pd_next_offset(NativeInstruction* inst, jint pc_offset, oop method); jint pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS);
void pd_patch_OopConstant(int pc_offset, Handle& constant); void pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS);
void pd_patch_MetaspaceConstant(int pc_offset, Handle& constant); void pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS);
void pd_patch_DataSectionReference(int pc_offset, int data_offset); void pd_patch_DataSectionReference(int pc_offset, int data_offset);
void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination); void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS);
void pd_relocate_JavaMethod(oop method, jint pc_offset); void pd_relocate_JavaMethod(Handle method, jint pc_offset, TRAPS);
void pd_relocate_poll(address pc, jint mark); void pd_relocate_poll(address pc, jint mark, TRAPS);
objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); } objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); }
arrayOop code() { return (arrayOop) JNIHandles::resolve(_code_handle); } arrayOop code() { return (arrayOop) JNIHandles::resolve(_code_handle); }
@ -177,33 +177,33 @@ public:
CodeInstaller() : _arena(mtCompiler) {} CodeInstaller() : _arena(mtCompiler) {}
JVMCIEnv::CodeInstallResult gather_metadata(Handle target, Handle& compiled_code, CodeMetadata& metadata); JVMCIEnv::CodeInstallResult gather_metadata(Handle target, Handle compiled_code, CodeMetadata& metadata, TRAPS);
JVMCIEnv::CodeInstallResult install(JVMCICompiler* compiler, Handle target, Handle& compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log); JVMCIEnv::CodeInstallResult install(JVMCICompiler* compiler, Handle target, Handle compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log, TRAPS);
static address runtime_call_target_address(oop runtime_call); static address runtime_call_target_address(oop runtime_call);
static VMReg get_hotspot_reg(jint jvmciRegisterNumber); static VMReg get_hotspot_reg(jint jvmciRegisterNumber, TRAPS);
static bool is_general_purpose_reg(VMReg hotspotRegister); static bool is_general_purpose_reg(VMReg hotspotRegister);
const OopMapSet* oopMapSet() const { return _debug_recorder->_oopmaps; } const OopMapSet* oopMapSet() const { return _debug_recorder->_oopmaps; }
protected: protected:
Location::Type get_oop_type(oop value); Location::Type get_oop_type(Handle value);
ScopeValue* get_scope_value(oop value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second); ScopeValue* get_scope_value(Handle value, BasicType type, GrowableArray<ScopeValue*>* objects, ScopeValue* &second, TRAPS);
MonitorValue* get_monitor_value(oop value, GrowableArray<ScopeValue*>* objects); MonitorValue* get_monitor_value(Handle value, GrowableArray<ScopeValue*>* objects, TRAPS);
Metadata* record_metadata_reference(Handle& constant); Metadata* record_metadata_reference(Handle constant, TRAPS);
#ifdef _LP64 #ifdef _LP64
narrowKlass record_narrow_metadata_reference(Handle& constant); narrowKlass record_narrow_metadata_reference(Handle constant, TRAPS);
#endif #endif
// extract the fields of the CompilationResult // extract the fields of the CompilationResult
void initialize_fields(oop target, oop target_method); void initialize_fields(oop target, oop target_method, TRAPS);
void initialize_dependencies(oop target_method, OopRecorder* oop_recorder); void initialize_dependencies(oop target_method, OopRecorder* oop_recorder, TRAPS);
int estimate_stubs_size(); int estimate_stubs_size(TRAPS);
// perform data and call relocation on the CodeBuffer // perform data and call relocation on the CodeBuffer
JVMCIEnv::CodeInstallResult initialize_buffer(CodeBuffer& buffer); JVMCIEnv::CodeInstallResult initialize_buffer(CodeBuffer& buffer, TRAPS);
void assumption_NoFinalizableSubclass(Handle assumption); void assumption_NoFinalizableSubclass(Handle assumption);
void assumption_ConcreteSubtype(Handle assumption); void assumption_ConcreteSubtype(Handle assumption);
@ -211,19 +211,19 @@ protected:
void assumption_ConcreteMethod(Handle assumption); void assumption_ConcreteMethod(Handle assumption);
void assumption_CallSiteTargetValue(Handle assumption); void assumption_CallSiteTargetValue(Handle assumption);
void site_Safepoint(CodeBuffer& buffer, jint pc_offset, oop site); void site_Safepoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_Infopoint(CodeBuffer& buffer, jint pc_offset, oop site); void site_Infopoint(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_Call(CodeBuffer& buffer, jint pc_offset, oop site); void site_Call(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_DataPatch(CodeBuffer& buffer, jint pc_offset, oop site); void site_DataPatch(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
void site_Mark(CodeBuffer& buffer, jint pc_offset, oop site); void site_Mark(CodeBuffer& buffer, jint pc_offset, Handle site, TRAPS);
OopMap* create_oop_map(oop debug_info); OopMap* create_oop_map(Handle debug_info, TRAPS);
void record_scope(jint pc_offset, oop debug_info); void record_scope(jint pc_offset, Handle debug_info, TRAPS);
void record_scope(jint pc_offset, oop code_pos, GrowableArray<ScopeValue*>* objects); void record_scope(jint pc_offset, Handle code_pos, GrowableArray<ScopeValue*>* objects, TRAPS);
void record_object_value(ObjectValue* sv, oop value, GrowableArray<ScopeValue*>* objects); void record_object_value(ObjectValue* sv, Handle value, GrowableArray<ScopeValue*>* objects, TRAPS);
GrowableArray<ScopeValue*>* record_virtual_objects(oop debug_info); GrowableArray<ScopeValue*>* record_virtual_objects(Handle debug_info, TRAPS);
void process_exception_handlers(); void process_exception_handlers();
int estimateStubSpace(int static_call_stubs); int estimateStubSpace(int static_call_stubs);

View file

@ -112,7 +112,7 @@ void JVMCICompiler::bootstrap() {
_bootstrapping = false; _bootstrapping = false;
} }
void JVMCICompiler::compile_method(methodHandle method, int entry_bci, JVMCIEnv* env) { void JVMCICompiler::compile_method(const methodHandle& method, int entry_bci, JVMCIEnv* env) {
JVMCI_EXCEPTION_CONTEXT JVMCI_EXCEPTION_CONTEXT
bool is_osr = entry_bci != InvocationEntryBci; bool is_osr = entry_bci != InvocationEntryBci;

View file

@ -71,7 +71,7 @@ public:
// Compilation entry point for methods // Compilation entry point for methods
virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci, DirectiveSet* directive); virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci, DirectiveSet* directive);
void compile_method(methodHandle target, int entry_bci, JVMCIEnv* env); void compile_method(const methodHandle& target, int entry_bci, JVMCIEnv* env);
virtual bool is_trivial(Method* method); virtual bool is_trivial(Method* method);

View file

@ -670,7 +670,7 @@ C2V_VMENTRY(jint, installCode, (JNIEnv *jniEnv, jobject, jobject target, jobject
TraceTime install_time("installCode", JVMCICompiler::codeInstallTimer()); TraceTime install_time("installCode", JVMCICompiler::codeInstallTimer());
CodeInstaller installer; CodeInstaller installer;
JVMCIEnv::CodeInstallResult result = installer.install(compiler, target_handle, compiled_code_handle, cb, installed_code_handle, speculation_log_handle); JVMCIEnv::CodeInstallResult result = installer.install(compiler, target_handle, compiled_code_handle, cb, installed_code_handle, speculation_log_handle, CHECK_0);
if (PrintCodeCacheOnCompilation) { if (PrintCodeCacheOnCompilation) {
stringStream s; stringStream s;
@ -690,6 +690,7 @@ C2V_VMENTRY(jint, installCode, (JNIEnv *jniEnv, jobject, jobject target, jobject
assert(installed_code_handle->is_a(InstalledCode::klass()), "wrong type"); assert(installed_code_handle->is_a(InstalledCode::klass()), "wrong type");
CompilerToVM::invalidate_installed_code(installed_code_handle, CHECK_0); CompilerToVM::invalidate_installed_code(installed_code_handle, CHECK_0);
InstalledCode::set_address(installed_code_handle, (jlong) cb); InstalledCode::set_address(installed_code_handle, (jlong) cb);
InstalledCode::set_version(installed_code_handle, InstalledCode::version(installed_code_handle) + 1);
if (cb->is_nmethod()) { if (cb->is_nmethod()) {
InstalledCode::set_entryPoint(installed_code_handle, (jlong) cb->as_nmethod_or_null()->verified_entry_point()); InstalledCode::set_entryPoint(installed_code_handle, (jlong) cb->as_nmethod_or_null()->verified_entry_point());
} else { } else {
@ -726,7 +727,7 @@ C2V_VMENTRY(jint, getMetadata, (JNIEnv *jniEnv, jobject, jobject target, jobject
CodeBlob *cb = NULL; CodeBlob *cb = NULL;
CodeInstaller installer; CodeInstaller installer;
JVMCIEnv::CodeInstallResult result = installer.gather_metadata(target_handle, compiled_code_handle, code_metadata); //cb, pc_descs, nr_pc_descs, scopes_descs, scopes_size, reloc_buffer); JVMCIEnv::CodeInstallResult result = installer.gather_metadata(target_handle, compiled_code_handle, code_metadata, CHECK_0); //cb, pc_descs, nr_pc_descs, scopes_descs, scopes_size, reloc_buffer);
if (result != JVMCIEnv::ok) { if (result != JVMCIEnv::ok) {
return result; return result;
} }

View file

@ -161,7 +161,7 @@ KlassHandle JVMCIEnv::get_klass_by_name_impl(KlassHandle& accessing_klass,
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
KlassHandle JVMCIEnv::get_klass_by_name(KlassHandle& accessing_klass, KlassHandle JVMCIEnv::get_klass_by_name(KlassHandle accessing_klass,
Symbol* klass_name, Symbol* klass_name,
bool require_local) { bool require_local) {
ResourceMark rm; ResourceMark rm;
@ -177,7 +177,7 @@ KlassHandle JVMCIEnv::get_klass_by_name(KlassHandle& accessing_klass,
KlassHandle JVMCIEnv::get_klass_by_index_impl(const constantPoolHandle& cpool, KlassHandle JVMCIEnv::get_klass_by_index_impl(const constantPoolHandle& cpool,
int index, int index,
bool& is_accessible, bool& is_accessible,
KlassHandle& accessor) { KlassHandle accessor) {
JVMCI_EXCEPTION_CONTEXT; JVMCI_EXCEPTION_CONTEXT;
KlassHandle klass (THREAD, ConstantPool::klass_at_if_loaded(cpool, index)); KlassHandle klass (THREAD, ConstantPool::klass_at_if_loaded(cpool, index));
Symbol* klass_name = NULL; Symbol* klass_name = NULL;
@ -218,7 +218,7 @@ KlassHandle JVMCIEnv::get_klass_by_index_impl(const constantPoolHandle& cpool,
KlassHandle JVMCIEnv::get_klass_by_index(const constantPoolHandle& cpool, KlassHandle JVMCIEnv::get_klass_by_index(const constantPoolHandle& cpool,
int index, int index,
bool& is_accessible, bool& is_accessible,
KlassHandle& accessor) { KlassHandle accessor) {
ResourceMark rm; ResourceMark rm;
KlassHandle result = get_klass_by_index_impl(cpool, index, is_accessible, accessor); KlassHandle result = get_klass_by_index_impl(cpool, index, is_accessible, accessor);
return result; return result;
@ -229,7 +229,7 @@ KlassHandle JVMCIEnv::get_klass_by_index(const constantPoolHandle& cpool,
// //
// Implementation note: the results of field lookups are cached // Implementation note: the results of field lookups are cached
// in the accessor klass. // in the accessor klass.
void JVMCIEnv::get_field_by_index_impl(instanceKlassHandle& klass, fieldDescriptor& field_desc, void JVMCIEnv::get_field_by_index_impl(instanceKlassHandle klass, fieldDescriptor& field_desc,
int index) { int index) {
JVMCI_EXCEPTION_CONTEXT; JVMCI_EXCEPTION_CONTEXT;
@ -270,7 +270,7 @@ void JVMCIEnv::get_field_by_index_impl(instanceKlassHandle& klass, fieldDescript
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// Get a field by index from a klass's constant pool. // Get a field by index from a klass's constant pool.
void JVMCIEnv::get_field_by_index(instanceKlassHandle& accessor, fieldDescriptor& fd, int index) { void JVMCIEnv::get_field_by_index(instanceKlassHandle accessor, fieldDescriptor& fd, int index) {
ResourceMark rm; ResourceMark rm;
return get_field_by_index_impl(accessor, fd, index); return get_field_by_index_impl(accessor, fd, index);
} }
@ -278,8 +278,8 @@ void JVMCIEnv::get_field_by_index(instanceKlassHandle& accessor, fieldDescriptor
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// Perform an appropriate method lookup based on accessor, holder, // Perform an appropriate method lookup based on accessor, holder,
// name, signature, and bytecode. // name, signature, and bytecode.
methodHandle JVMCIEnv::lookup_method(instanceKlassHandle& h_accessor, methodHandle JVMCIEnv::lookup_method(instanceKlassHandle h_accessor,
instanceKlassHandle& h_holder, instanceKlassHandle h_holder,
Symbol* name, Symbol* name,
Symbol* sig, Symbol* sig,
Bytecodes::Code bc) { Bytecodes::Code bc) {
@ -314,7 +314,7 @@ methodHandle JVMCIEnv::lookup_method(instanceKlassHandle& h_accessor,
// ------------------------------------------------------------------ // ------------------------------------------------------------------
methodHandle JVMCIEnv::get_method_by_index_impl(const constantPoolHandle& cpool, methodHandle JVMCIEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
int index, Bytecodes::Code bc, int index, Bytecodes::Code bc,
instanceKlassHandle& accessor) { instanceKlassHandle accessor) {
if (bc == Bytecodes::_invokedynamic) { if (bc == Bytecodes::_invokedynamic) {
ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index); ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index);
bool is_resolved = !cpce->is_f1_null(); bool is_resolved = !cpce->is_f1_null();
@ -379,7 +379,7 @@ methodHandle JVMCIEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------
instanceKlassHandle JVMCIEnv::get_instance_klass_for_declared_method_holder(KlassHandle& method_holder) { instanceKlassHandle JVMCIEnv::get_instance_klass_for_declared_method_holder(KlassHandle method_holder) {
// For the case of <array>.clone(), the method holder can be an ArrayKlass* // For the case of <array>.clone(), the method holder can be an ArrayKlass*
// instead of an InstanceKlass*. For that case simply pretend that the // instead of an InstanceKlass*. For that case simply pretend that the
// declared holder is Object.clone since that's where the call will bottom out. // declared holder is Object.clone since that's where the call will bottom out.
@ -397,7 +397,7 @@ instanceKlassHandle JVMCIEnv::get_instance_klass_for_declared_method_holder(Klas
// ------------------------------------------------------------------ // ------------------------------------------------------------------
methodHandle JVMCIEnv::get_method_by_index(const constantPoolHandle& cpool, methodHandle JVMCIEnv::get_method_by_index(const constantPoolHandle& cpool,
int index, Bytecodes::Code bc, int index, Bytecodes::Code bc,
instanceKlassHandle& accessor) { instanceKlassHandle accessor) {
ResourceMark rm; ResourceMark rm;
return get_method_by_index_impl(cpool, index, bc, accessor); return get_method_by_index_impl(cpool, index, bc, accessor);
} }
@ -452,7 +452,7 @@ JVMCIEnv::CodeInstallResult JVMCIEnv::check_for_system_dictionary_modification(D
// ------------------------------------------------------------------ // ------------------------------------------------------------------
JVMCIEnv::CodeInstallResult JVMCIEnv::register_method( JVMCIEnv::CodeInstallResult JVMCIEnv::register_method(
methodHandle& method, const methodHandle& method,
nmethod*& nm, nmethod*& nm,
int entry_bci, int entry_bci,
CodeOffsets* offsets, CodeOffsets* offsets,

View file

@ -78,7 +78,7 @@ public:
// The CI treats a klass as loaded if it is consistently defined in // The CI treats a klass as loaded if it is consistently defined in
// another loader, even if it hasn't yet been loaded in all loaders // another loader, even if it hasn't yet been loaded in all loaders
// that could potentially see it via delegation. // that could potentially see it via delegation.
static KlassHandle get_klass_by_name(KlassHandle& accessing_klass, static KlassHandle get_klass_by_name(KlassHandle accessing_klass,
Symbol* klass_name, Symbol* klass_name,
bool require_local); bool require_local);
@ -86,12 +86,12 @@ public:
static KlassHandle get_klass_by_index(const constantPoolHandle& cpool, static KlassHandle get_klass_by_index(const constantPoolHandle& cpool,
int klass_index, int klass_index,
bool& is_accessible, bool& is_accessible,
KlassHandle& loading_klass); KlassHandle loading_klass);
static void get_field_by_index(instanceKlassHandle& loading_klass, fieldDescriptor& fd, static void get_field_by_index(instanceKlassHandle loading_klass, fieldDescriptor& fd,
int field_index); int field_index);
static methodHandle get_method_by_index(const constantPoolHandle& cpool, static methodHandle get_method_by_index(const constantPoolHandle& cpool,
int method_index, Bytecodes::Code bc, int method_index, Bytecodes::Code bc,
instanceKlassHandle& loading_klass); instanceKlassHandle loading_klass);
JVMCIEnv(CompileTask* task, int system_dictionary_modification_counter); JVMCIEnv(CompileTask* task, int system_dictionary_modification_counter);
@ -112,17 +112,17 @@ private:
static KlassHandle get_klass_by_index_impl(const constantPoolHandle& cpool, static KlassHandle get_klass_by_index_impl(const constantPoolHandle& cpool,
int klass_index, int klass_index,
bool& is_accessible, bool& is_accessible,
KlassHandle& loading_klass); KlassHandle loading_klass);
static void get_field_by_index_impl(instanceKlassHandle& loading_klass, fieldDescriptor& fd, static void get_field_by_index_impl(instanceKlassHandle loading_klass, fieldDescriptor& fd,
int field_index); int field_index);
static methodHandle get_method_by_index_impl(const constantPoolHandle& cpool, static methodHandle get_method_by_index_impl(const constantPoolHandle& cpool,
int method_index, Bytecodes::Code bc, int method_index, Bytecodes::Code bc,
instanceKlassHandle& loading_klass); instanceKlassHandle loading_klass);
// Helper methods // Helper methods
static bool check_klass_accessibility(KlassHandle accessing_klass, KlassHandle resolved_klass); static bool check_klass_accessibility(KlassHandle accessing_klass, KlassHandle resolved_klass);
static methodHandle lookup_method(instanceKlassHandle& accessor, static methodHandle lookup_method(instanceKlassHandle accessor,
instanceKlassHandle& holder, instanceKlassHandle holder,
Symbol* name, Symbol* name,
Symbol* sig, Symbol* sig,
Bytecodes::Code bc); Bytecodes::Code bc);
@ -142,7 +142,7 @@ public:
// Register the result of a compilation. // Register the result of a compilation.
static JVMCIEnv::CodeInstallResult register_method( static JVMCIEnv::CodeInstallResult register_method(
methodHandle& target, const methodHandle& target,
nmethod*& nm, nmethod*& nm,
int entry_bci, int entry_bci,
CodeOffsets* offsets, CodeOffsets* offsets,
@ -166,7 +166,7 @@ public:
// InstanceKlass*. This is needed since the holder of a method in // InstanceKlass*. This is needed since the holder of a method in
// the bytecodes could be an array type. Basically this converts // the bytecodes could be an array type. Basically this converts
// array types into java/lang/Object and other types stay as they are. // array types into java/lang/Object and other types stay as they are.
static instanceKlassHandle get_instance_klass_for_declared_method_holder(KlassHandle& klass); static instanceKlassHandle get_instance_klass_for_declared_method_holder(KlassHandle klass);
}; };
#endif // SHARE_VM_JVMCI_JVMCIENV_HPP #endif // SHARE_VM_JVMCI_JVMCIENV_HPP

View file

@ -315,10 +315,10 @@ class name : AllStatic {
#define FIELD(name, type, accessor, cast) \ #define FIELD(name, type, accessor, cast) \
static int _##name##_offset; \ static int _##name##_offset; \
static type name(oop obj) { check(obj, #name, _##name##_offset); return cast obj->accessor(_##name##_offset); } \ static type name(oop obj) { check(obj, #name, _##name##_offset); return cast obj->accessor(_##name##_offset); } \
static type name(Handle& obj) { check(obj(), #name, _##name##_offset); return cast obj->accessor(_##name##_offset); } \ static type name(Handle obj) { check(obj(), #name, _##name##_offset); return cast obj->accessor(_##name##_offset); } \
static type name(jobject obj) { check(JNIHandles::resolve(obj), #name, _##name##_offset); return cast JNIHandles::resolve(obj)->accessor(_##name##_offset); } \ static type name(jobject obj) { check(JNIHandles::resolve(obj), #name, _##name##_offset); return cast JNIHandles::resolve(obj)->accessor(_##name##_offset); } \
static void set_##name(oop obj, type x) { check(obj, #name, _##name##_offset); obj->accessor##_put(_##name##_offset, x); } \ static void set_##name(oop obj, type x) { check(obj, #name, _##name##_offset); obj->accessor##_put(_##name##_offset, x); } \
static void set_##name(Handle& obj, type x) { check(obj(), #name, _##name##_offset); obj->accessor##_put(_##name##_offset, x); } \ static void set_##name(Handle obj, type x) { check(obj(), #name, _##name##_offset); obj->accessor##_put(_##name##_offset, x); } \
static void set_##name(jobject obj, type x) { check(JNIHandles::resolve(obj), #name, _##name##_offset); JNIHandles::resolve(obj)->accessor##_put(_##name##_offset, x); } static void set_##name(jobject obj, type x) { check(JNIHandles::resolve(obj), #name, _##name##_offset); JNIHandles::resolve(obj)->accessor##_put(_##name##_offset, x); }
#define EMPTY_CAST #define EMPTY_CAST

View file

@ -59,7 +59,11 @@ bool JVMCIRuntime::_shutdown_called = false;
static const char* OPTION_PREFIX = "jvmci.option."; static const char* OPTION_PREFIX = "jvmci.option.";
static const size_t OPTION_PREFIX_LEN = strlen(OPTION_PREFIX); static const size_t OPTION_PREFIX_LEN = strlen(OPTION_PREFIX);
BasicType JVMCIRuntime::kindToBasicType(jchar ch) { BasicType JVMCIRuntime::kindToBasicType(Handle kind, TRAPS) {
if (kind.is_null()) {
THROW_(vmSymbols::java_lang_NullPointerException(), T_ILLEGAL);
}
jchar ch = JavaKind::typeChar(kind);
switch(ch) { switch(ch) {
case 'z': return T_BOOLEAN; case 'z': return T_BOOLEAN;
case 'b': return T_BYTE; case 'b': return T_BYTE;
@ -72,10 +76,8 @@ BasicType JVMCIRuntime::kindToBasicType(jchar ch) {
case 'a': return T_OBJECT; case 'a': return T_OBJECT;
case '-': return T_ILLEGAL; case '-': return T_ILLEGAL;
default: default:
fatal("unexpected Kind: %c", ch); JVMCI_ERROR_(T_ILLEGAL, "unexpected Kind: %c", ch);
break;
} }
return T_ILLEGAL;
} }
// Simple helper to see if the caller of a runtime stub which // Simple helper to see if the caller of a runtime stub which

View file

@ -29,6 +29,17 @@
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp" #include "runtime/deoptimization.hpp"
#define JVMCI_ERROR(...) \
{ Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::jdk_vm_ci_common_JVMCIError(), __VA_ARGS__); return; }
#define JVMCI_ERROR_(ret, ...) \
{ Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::jdk_vm_ci_common_JVMCIError(), __VA_ARGS__); return ret; }
#define JVMCI_ERROR_0(...) JVMCI_ERROR_(0, __VA_ARGS__)
#define JVMCI_ERROR_NULL(...) JVMCI_ERROR_(NULL, __VA_ARGS__)
#define JVMCI_ERROR_OK(...) JVMCI_ERROR_(JVMCIEnv::ok, __VA_ARGS__)
#define CHECK_OK CHECK_(JVMCIEnv::ok)
class ParseClosure : public StackObj { class ParseClosure : public StackObj {
int _lineNo; int _lineNo;
char* _filename; char* _filename;
@ -171,7 +182,7 @@ class JVMCIRuntime: public AllStatic {
} \ } \
(void)(0 (void)(0
static BasicType kindToBasicType(jchar ch); static BasicType kindToBasicType(Handle kind, TRAPS);
// The following routines are all called from compiled JVMCI code // The following routines are all called from compiled JVMCI code

View file

@ -86,6 +86,7 @@
template(jdk_vm_ci_code_VirtualObject, "jdk/vm/ci/code/VirtualObject") \ template(jdk_vm_ci_code_VirtualObject, "jdk/vm/ci/code/VirtualObject") \
template(jdk_vm_ci_code_RegisterSaveLayout, "jdk/vm/ci/code/RegisterSaveLayout") \ template(jdk_vm_ci_code_RegisterSaveLayout, "jdk/vm/ci/code/RegisterSaveLayout") \
template(jdk_vm_ci_code_InvalidInstalledCodeException, "jdk/vm/ci/code/InvalidInstalledCodeException") \ template(jdk_vm_ci_code_InvalidInstalledCodeException, "jdk/vm/ci/code/InvalidInstalledCodeException") \
template(jdk_vm_ci_common_JVMCIError, "jdk/vm/ci/common/JVMCIError") \
template(compileMethod_name, "compileMethod") \ template(compileMethod_name, "compileMethod") \
template(compileMethod_signature, "(Ljdk/vm/ci/hotspot/HotSpotResolvedJavaMethod;IJI)V") \ template(compileMethod_signature, "(Ljdk/vm/ci/hotspot/HotSpotResolvedJavaMethod;IJI)V") \
template(fromMetaspace_name, "fromMetaspace") \ template(fromMetaspace_name, "fromMetaspace") \

View file

@ -47,7 +47,10 @@ CodeHeap::CodeHeap(const char* name, const int code_blob_type)
_freelist_segments = 0; _freelist_segments = 0;
_freelist_length = 0; _freelist_length = 0;
_max_allocated_capacity = 0; _max_allocated_capacity = 0;
_was_full = false; _blob_count = 0;
_nmethod_count = 0;
_adapter_count = 0;
_full_count = 0;
} }
@ -185,6 +188,7 @@ void* CodeHeap::allocate(size_t instance_size) {
assert(!block->free(), "must be marked free"); assert(!block->free(), "must be marked free");
DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size)); DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
_max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
_blob_count++;
return block->allocated_space(); return block->allocated_space();
} }
@ -198,6 +202,7 @@ void* CodeHeap::allocate(size_t instance_size) {
_next_segment += number_of_segments; _next_segment += number_of_segments;
DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size)); DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
_max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
_blob_count++;
return b->allocated_space(); return b->allocated_space();
} else { } else {
return NULL; return NULL;

View file

@ -100,7 +100,11 @@ class CodeHeap : public CHeapObj<mtCode> {
const char* _name; // Name of the CodeHeap const char* _name; // Name of the CodeHeap
const int _code_blob_type; // CodeBlobType it contains const int _code_blob_type; // CodeBlobType it contains
bool _was_full; // True if the code heap was full int _blob_count; // Number of CodeBlobs
int _nmethod_count; // Number of nmethods
int _adapter_count; // Number of adapters
int _full_count; // Number of times the code heap was full
enum { free_sentinel = 0xFF }; enum { free_sentinel = 0xFF };
@ -179,8 +183,13 @@ class CodeHeap : public CHeapObj<mtCode> {
// Debugging / Profiling // Debugging / Profiling
const char* name() const { return _name; } const char* name() const { return _name; }
bool was_full() { return _was_full; } int blob_count() { return _blob_count; }
void report_full() { _was_full = true; } int nmethod_count() { return _nmethod_count; }
void set_nmethod_count(int count) { _nmethod_count = count; }
int adapter_count() { return _adapter_count; }
void set_adapter_count(int count) { _adapter_count = count; }
int full_count() { return _full_count; }
void report_full() { _full_count++; }
private: private:
size_t heap_unallocated_capacity() const; size_t heap_unallocated_capacity() const;

View file

@ -579,12 +579,45 @@ bool Method::can_be_statically_bound() const {
} }
bool Method::is_accessor() const { bool Method::is_accessor() const {
return is_getter() || is_setter();
}
bool Method::is_getter() const {
if (code_size() != 5) return false; if (code_size() != 5) return false;
if (size_of_parameters() != 1) return false; if (size_of_parameters() != 1) return false;
if (java_code_at(0) != Bytecodes::_aload_0 ) return false; if (java_code_at(0) != Bytecodes::_aload_0) return false;
if (java_code_at(1) != Bytecodes::_getfield) return false; if (java_code_at(1) != Bytecodes::_getfield) return false;
if (java_code_at(4) != Bytecodes::_areturn && switch (java_code_at(4)) {
java_code_at(4) != Bytecodes::_ireturn ) return false; case Bytecodes::_ireturn:
case Bytecodes::_lreturn:
case Bytecodes::_freturn:
case Bytecodes::_dreturn:
case Bytecodes::_areturn:
break;
default:
return false;
}
return true;
}
bool Method::is_setter() const {
if (code_size() != 6) return false;
if (java_code_at(0) != Bytecodes::_aload_0) return false;
switch (java_code_at(1)) {
case Bytecodes::_iload_1:
case Bytecodes::_aload_1:
case Bytecodes::_fload_1:
if (size_of_parameters() != 2) return false;
break;
case Bytecodes::_dload_1:
case Bytecodes::_lload_1:
if (size_of_parameters() != 3) return false;
break;
default:
return false;
}
if (java_code_at(2) != Bytecodes::_putfield) return false;
if (java_code_at(5) != Bytecodes::_return) return false;
return true; return true;
} }

View file

@ -595,6 +595,12 @@ class Method : public Metadata {
// returns true if the method is an accessor function (setter/getter). // returns true if the method is an accessor function (setter/getter).
bool is_accessor() const; bool is_accessor() const;
// returns true if the method is a getter
bool is_getter() const;
// returns true if the method is a setter
bool is_setter() const;
// returns true if the method does nothing but return a constant of primitive type // returns true if the method does nothing but return a constant of primitive type
bool is_constant_getter() const; bool is_constant_getter() const;

View file

@ -542,10 +542,11 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work
if (i == cfg->number_of_blocks()) { if (i == cfg->number_of_blocks()) {
break; // Got 'em all break; // Got 'em all
} }
#ifndef PRODUCT
if( PrintOpto && Verbose ) if (PrintOpto && Verbose) {
tty->print_cr("retripping live calc"); tty->print_cr("retripping live calc");
#endif }
// Force the issue (expensively): recheck everybody // Force the issue (expensively): recheck everybody
for (i = 1; i < cfg->number_of_blocks(); i++) { for (i = 1; i < cfg->number_of_blocks(); i++) {
worklist->push(cfg->get_block(i)); worklist->push(cfg->get_block(i));

View file

@ -186,9 +186,9 @@
"Maximum number of unrolls for main loop") \ "Maximum number of unrolls for main loop") \
range(0, max_jint) \ range(0, max_jint) \
\ \
product(bool, SuperWordLoopUnrollAnalysis, false, \ product_pd(bool, SuperWordLoopUnrollAnalysis, \
"Map number of unrolls for main loop via " \ "Map number of unrolls for main loop via " \
"Superword Level Parallelism analysis") \ "Superword Level Parallelism analysis") \
\ \
notproduct(bool, TraceSuperWordLoopUnrollAnalysis, false, \ notproduct(bool, TraceSuperWordLoopUnrollAnalysis, false, \
"Trace what Superword Level Parallelism analysis applies") \ "Trace what Superword Level Parallelism analysis applies") \

View file

@ -778,7 +778,7 @@ bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
} }
if (is_CallJava() && as_CallJava()->method() != NULL) { if (is_CallJava() && as_CallJava()->method() != NULL) {
ciMethod* meth = as_CallJava()->method(); ciMethod* meth = as_CallJava()->method();
if (meth->is_accessor()) { if (meth->is_getter()) {
return false; return false;
} }
// May modify (by reflection) if an boxing object is passed // May modify (by reflection) if an boxing object is passed

View file

@ -270,7 +270,6 @@ class IfNode : public MultiBranchNode {
virtual uint size_of() const { return sizeof(*this); } virtual uint size_of() const { return sizeof(*this); }
private: private:
ProjNode* range_check_trap_proj(int& flip, Node*& l, Node*& r);
ProjNode* range_check_trap_proj() { ProjNode* range_check_trap_proj() {
int flip_test = 0; int flip_test = 0;
Node* l = NULL; Node* l = NULL;
@ -283,7 +282,7 @@ private:
bool is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn); bool is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn);
bool has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail); bool has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail);
bool has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNode*& fail, PhaseIterGVN* igvn); bool has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNode*& fail, PhaseIterGVN* igvn);
static void merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn); Node* merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn);
static void improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn); static void improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn);
bool is_cmp_with_loadrange(ProjNode* proj); bool is_cmp_with_loadrange(ProjNode* proj);
bool is_null_check(ProjNode* proj, PhaseIterGVN* igvn); bool is_null_check(ProjNode* proj, PhaseIterGVN* igvn);
@ -292,6 +291,12 @@ private:
ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call) const; ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call) const;
bool fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn); bool fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn);
protected:
ProjNode* range_check_trap_proj(int& flip, Node*& l, Node*& r);
Node* Ideal_common(PhaseGVN *phase, bool can_reshape);
Node* dominated_by(Node* prev_dom, PhaseIterGVN* igvn);
Node* search_identical(int dist);
public: public:
// Degrees of branch prediction probability by order of magnitude: // Degrees of branch prediction probability by order of magnitude:
@ -375,8 +380,6 @@ public:
virtual const Type *Value( PhaseTransform *phase ) const; virtual const Type *Value( PhaseTransform *phase ) const;
virtual int required_outcnt() const { return 2; } virtual int required_outcnt() const { return 2; }
virtual const RegMask &out_RegMask() const; virtual const RegMask &out_RegMask() const;
void dominated_by(Node* prev_dom, PhaseIterGVN* igvn);
int is_range_check(Node* &range, Node* &index, jint &offset);
Node* fold_compares(PhaseIterGVN* phase); Node* fold_compares(PhaseIterGVN* phase);
static Node* up_one_dom(Node* curr, bool linear_only = false); static Node* up_one_dom(Node* curr, bool linear_only = false);
@ -391,6 +394,20 @@ public:
#endif #endif
}; };
class RangeCheckNode : public IfNode {
private:
int is_range_check(Node* &range, Node* &index, jint &offset);
public:
RangeCheckNode(Node* control, Node *b, float p, float fcnt)
: IfNode(control, b, p, fcnt) {
init_class_id(Class_RangeCheck);
}
virtual int Opcode() const;
virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
};
class IfProjNode : public CProjNode { class IfProjNode : public CProjNode {
public: public:
IfProjNode(IfNode *ifnode, uint idx) : CProjNode(ifnode,idx) {} IfProjNode(IfNode *ifnode, uint idx) : CProjNode(ifnode,idx) {}

View file

@ -138,6 +138,7 @@ macro(Goto)
macro(Halt) macro(Halt)
macro(HasNegatives) macro(HasNegatives)
macro(If) macro(If)
macro(RangeCheck)
macro(IfFalse) macro(IfFalse)
macro(IfTrue) macro(IfTrue)
macro(Initialize) macro(Initialize)

View file

@ -707,7 +707,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level()); _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
} }
#endif #endif
set_print_inlining(directive->PrintInliningOption NOT_PRODUCT( || PrintOptoInlining)); set_print_inlining(directive->PrintInliningOption || PrintOptoInlining);
set_print_intrinsics(directive->PrintIntrinsicsOption); set_print_intrinsics(directive->PrintIntrinsicsOption);
set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it
@ -3181,6 +3181,13 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
n->set_req(MemBarNode::Precedent, top()); n->set_req(MemBarNode::Precedent, top());
} }
break; break;
case Op_RangeCheck: {
RangeCheckNode* rc = n->as_RangeCheck();
Node* iff = new IfNode(rc->in(0), rc->in(1), rc->_prob, rc->_fcnt);
n->subsume_by(iff, this);
frc._tests.push(iff);
break;
}
default: default:
assert( !n->is_Call(), "" ); assert( !n->is_Call(), "" );
assert( !n->is_Mem(), "" ); assert( !n->is_Mem(), "" );
@ -3189,8 +3196,9 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
} }
// Collect CFG split points // Collect CFG split points
if (n->is_MultiBranch()) if (n->is_MultiBranch() && !n->is_RangeCheck()) {
frc._tests.push(n); frc._tests.push(n);
}
} }
//------------------------------final_graph_reshaping_walk--------------------- //------------------------------final_graph_reshaping_walk---------------------

View file

@ -45,7 +45,7 @@ void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMeth
if (TraceTypeProfile || C->print_inlining()) { if (TraceTypeProfile || C->print_inlining()) {
outputStream* out = tty; outputStream* out = tty;
if (!C->print_inlining()) { if (!C->print_inlining()) {
if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) { if (!PrintOpto && !PrintCompilation) {
method->print_short_name(); method->print_short_name();
tty->cr(); tty->cr();
} }
@ -426,12 +426,10 @@ void Parse::do_call() {
// uncommon-trap when callee is unloaded, uninitialized or will not link // uncommon-trap when callee is unloaded, uninitialized or will not link
// bailout when too many arguments for register representation // bailout when too many arguments for register representation
if (!will_link || can_not_compile_call_site(orig_callee, klass)) { if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
#ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) { if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
orig_callee->print_name(); tty->cr(); orig_callee->print_name(); tty->cr();
} }
#endif
return; return;
} }
assert(holder_klass->is_loaded(), ""); assert(holder_klass->is_loaded(), "");
@ -634,12 +632,10 @@ void Parse::do_call() {
// If the return type of the method is not loaded, assert that the // If the return type of the method is not loaded, assert that the
// value we got is a null. Otherwise, we need to recompile. // value we got is a null. Otherwise, we need to recompile.
if (!rtype->is_loaded()) { if (!rtype->is_loaded()) {
#ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) { if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
cg->method()->print_name(); tty->cr(); cg->method()->print_name(); tty->cr();
} }
#endif
if (C->log() != NULL) { if (C->log() != NULL) {
C->log()->elem("assert_null reason='return' klass='%d'", C->log()->elem("assert_null reason='return' klass='%d'",
C->log()->identify(rtype)); C->log()->identify(rtype));
@ -851,11 +847,9 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
if (remaining == 1) { if (remaining == 1) {
push_ex_oop(ex_node); // Push exception oop for handler push_ex_oop(ex_node); // Push exception oop for handler
#ifndef PRODUCT
if (PrintOpto && WizardMode) { if (PrintOpto && WizardMode) {
tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci); tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
} }
#endif
merge_exception(handler_bci); // jump to handler merge_exception(handler_bci); // jump to handler
return; // No more handling to be done here! return; // No more handling to be done here!
} }
@ -882,13 +876,11 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness"); assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst)); Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst));
push_ex_oop(ex_oop); // Push exception oop for handler push_ex_oop(ex_oop); // Push exception oop for handler
#ifndef PRODUCT
if (PrintOpto && WizardMode) { if (PrintOpto && WizardMode) {
tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci); tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
klass->print_name(); klass->print_name();
tty->cr(); tty->cr();
} }
#endif
merge_exception(handler_bci); merge_exception(handler_bci);
} }
set_control(not_subtype_ctrl); set_control(not_subtype_ctrl);
@ -1067,13 +1059,11 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass*
// such method can be changed when its class is redefined. // such method can be changed when its class is redefined.
ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver); ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
if (exact_method != NULL) { if (exact_method != NULL) {
#ifndef PRODUCT
if (PrintOpto) { if (PrintOpto) {
tty->print(" Calling method via exact type @%d --- ", bci); tty->print(" Calling method via exact type @%d --- ", bci);
exact_method->print_name(); exact_method->print_name();
tty->cr(); tty->cr();
} }
#endif
return exact_method; return exact_method;
} }
} }

View file

@ -1457,7 +1457,11 @@ void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
// factory methods in "int adr_idx" // factory methods in "int adr_idx"
Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
int adr_idx, int adr_idx,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) { MemNode::MemOrd mo,
LoadNode::ControlDependency control_dependency,
bool require_atomic_access,
bool unaligned,
bool mismatched) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx)); debug_only(adr_type = C->get_adr_type(adr_idx));
@ -1470,6 +1474,12 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
} else { } else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency); ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
} }
if (unaligned) {
ld->as_Load()->set_unaligned_access();
}
if (mismatched) {
ld->as_Load()->set_mismatched_access();
}
ld = _gvn.transform(ld); ld = _gvn.transform(ld);
if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
// Improve graph before escape analysis and boxing elimination. // Improve graph before escape analysis and boxing elimination.
@ -1481,7 +1491,9 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
int adr_idx, int adr_idx,
MemNode::MemOrd mo, MemNode::MemOrd mo,
bool require_atomic_access) { bool require_atomic_access,
bool unaligned,
bool mismatched) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
const TypePtr* adr_type = NULL; const TypePtr* adr_type = NULL;
debug_only(adr_type = C->get_adr_type(adr_idx)); debug_only(adr_type = C->get_adr_type(adr_idx));
@ -1494,6 +1506,12 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
} else { } else {
st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
} }
if (unaligned) {
st->as_Store()->set_unaligned_access();
}
if (mismatched) {
st->as_Store()->set_mismatched_access();
}
st = _gvn.transform(st); st = _gvn.transform(st);
set_memory(st, adr_idx); set_memory(st, adr_idx);
// Back-to-back stores can only remove intermediate store with DU info // Back-to-back stores can only remove intermediate store with DU info
@ -1587,7 +1605,8 @@ Node* GraphKit::store_oop(Node* ctl,
const TypeOopPtr* val_type, const TypeOopPtr* val_type,
BasicType bt, BasicType bt,
bool use_precise, bool use_precise,
MemNode::MemOrd mo) { MemNode::MemOrd mo,
bool mismatched) {
// Transformation of a value which could be NULL pointer (CastPP #NULL) // Transformation of a value which could be NULL pointer (CastPP #NULL)
// could be delayed during Parse (for example, in adjust_map_after_if()). // could be delayed during Parse (for example, in adjust_map_after_if()).
// Execute transformation here to avoid barrier generation in such case. // Execute transformation here to avoid barrier generation in such case.
@ -1607,7 +1626,7 @@ Node* GraphKit::store_oop(Node* ctl,
NULL /* pre_val */, NULL /* pre_val */,
bt); bt);
Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo); Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
return store; return store;
} }
@ -1619,7 +1638,8 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
const TypePtr* adr_type, const TypePtr* adr_type,
Node* val, Node* val,
BasicType bt, BasicType bt,
MemNode::MemOrd mo) { MemNode::MemOrd mo,
bool mismatched) {
Compile::AliasType* at = C->alias_type(adr_type); Compile::AliasType* at = C->alias_type(adr_type);
const TypeOopPtr* val_type = NULL; const TypeOopPtr* val_type = NULL;
if (adr_type->isa_instptr()) { if (adr_type->isa_instptr()) {
@ -1638,7 +1658,7 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
if (val_type == NULL) { if (val_type == NULL) {
val_type = TypeInstPtr::BOTTOM; val_type = TypeInstPtr::BOTTOM;
} }
return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo); return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
} }

View file

@ -513,23 +513,28 @@ class GraphKit : public Phase {
// of volatile fields. // of volatile fields.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false) { bool require_atomic_access = false, bool unaligned = false,
bool mismatched = false) {
// This version computes alias_index from bottom_type // This version computes alias_index from bottom_type
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
mo, control_dependency, require_atomic_access); mo, control_dependency, require_atomic_access,
unaligned, mismatched);
} }
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false) { bool require_atomic_access = false, bool unaligned = false,
bool mismatched = false) {
// This version computes alias_index from an address type // This version computes alias_index from an address type
assert(adr_type != NULL, "use other make_load factory"); assert(adr_type != NULL, "use other make_load factory");
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
mo, control_dependency, require_atomic_access); mo, control_dependency, require_atomic_access,
unaligned, mismatched);
} }
// This is the base version which is given an alias index. // This is the base version which is given an alias index.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false); bool require_atomic_access = false, bool unaligned = false,
bool mismatched = false);
// Create & transform a StoreNode and store the effect into the // Create & transform a StoreNode and store the effect into the
// parser's memory state. // parser's memory state.
@ -542,19 +547,24 @@ class GraphKit : public Phase {
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
const TypePtr* adr_type, const TypePtr* adr_type,
MemNode::MemOrd mo, MemNode::MemOrd mo,
bool require_atomic_access = false) { bool require_atomic_access = false,
bool unaligned = false,
bool mismatched = false) {
// This version computes alias_index from an address type // This version computes alias_index from an address type
assert(adr_type != NULL, "use other store_to_memory factory"); assert(adr_type != NULL, "use other store_to_memory factory");
return store_to_memory(ctl, adr, val, bt, return store_to_memory(ctl, adr, val, bt,
C->get_alias_index(adr_type), C->get_alias_index(adr_type),
mo, require_atomic_access); mo, require_atomic_access,
unaligned, mismatched);
} }
// This is the base version which is given alias index // This is the base version which is given alias index
// Return the new StoreXNode // Return the new StoreXNode
Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
int adr_idx, int adr_idx,
MemNode::MemOrd, MemNode::MemOrd,
bool require_atomic_access = false); bool require_atomic_access = false,
bool unaligned = false,
bool mismatched = false);
// All in one pre-barrier, store, post_barrier // All in one pre-barrier, store, post_barrier
@ -577,7 +587,8 @@ class GraphKit : public Phase {
const TypeOopPtr* val_type, const TypeOopPtr* val_type,
BasicType bt, BasicType bt,
bool use_precise, bool use_precise,
MemNode::MemOrd mo); MemNode::MemOrd mo,
bool mismatched = false);
Node* store_oop_to_object(Node* ctl, Node* store_oop_to_object(Node* ctl,
Node* obj, // containing obj Node* obj, // containing obj
@ -608,7 +619,8 @@ class GraphKit : public Phase {
const TypePtr* adr_type, const TypePtr* adr_type,
Node* val, Node* val,
BasicType bt, BasicType bt,
MemNode::MemOrd mo); MemNode::MemOrd mo,
bool mismatched = false);
// For the few case where the barriers need special help // For the few case where the barriers need special help
void pre_barrier(bool do_load, Node* ctl, void pre_barrier(bool do_load, Node* ctl,

View file

@ -368,7 +368,8 @@ Node* IdealKit::load(Node* ctl,
Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt, Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
int adr_idx, int adr_idx,
MemNode::MemOrd mo, bool require_atomic_access) { MemNode::MemOrd mo, bool require_atomic_access,
bool mismatched) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory"); assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
const TypePtr* adr_type = NULL; const TypePtr* adr_type = NULL;
debug_only(adr_type = C->get_adr_type(adr_idx)); debug_only(adr_type = C->get_adr_type(adr_idx));
@ -379,6 +380,9 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
} else { } else {
st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
} }
if (mismatched) {
st->as_Store()->set_mismatched_access();
}
st = transform(st); st = transform(st);
set_memory(st, adr_idx); set_memory(st, adr_idx);

View file

@ -229,7 +229,9 @@ class IdealKit: public StackObj {
BasicType bt, BasicType bt,
int adr_idx, int adr_idx,
MemNode::MemOrd mo, MemNode::MemOrd mo,
bool require_atomic_access = false); bool require_atomic_access = false,
bool mismatched = false
);
// Store a card mark ordered after store_oop // Store a card mark ordered after store_oop
Node* storeCM(Node* ctl, Node* storeCM(Node* ctl,

View file

@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "ci/ciTypeFlow.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
#include "opto/addnode.hpp" #include "opto/addnode.hpp"
#include "opto/castnode.hpp" #include "opto/castnode.hpp"
@ -305,12 +306,16 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
Node *b_c = phase->transform(new BoolNode(cmp_c,b->_test._test)); Node *b_c = phase->transform(new BoolNode(cmp_c,b->_test._test));
Node *b_x = phase->transform(new BoolNode(cmp_x,b->_test._test)); Node *b_x = phase->transform(new BoolNode(cmp_x,b->_test._test));
// Make the IfNode // Make the IfNode
IfNode *iff_c = new IfNode(region_c,b_c,iff->_prob,iff->_fcnt); IfNode* iff_c = iff->clone()->as_If();
iff_c->set_req(0, region_c);
iff_c->set_req(1, b_c);
igvn->set_type_bottom(iff_c); igvn->set_type_bottom(iff_c);
igvn->_worklist.push(iff_c); igvn->_worklist.push(iff_c);
hook->init_req(2, iff_c); hook->init_req(2, iff_c);
IfNode *iff_x = new IfNode(region_x,b_x,iff->_prob, iff->_fcnt); IfNode* iff_x = iff->clone()->as_If();
iff_x->set_req(0, region_x);
iff_x->set_req(1, b_x);
igvn->set_type_bottom(iff_x); igvn->set_type_bottom(iff_x);
igvn->_worklist.push(iff_x); igvn->_worklist.push(iff_x);
hook->init_req(3, iff_x); hook->init_req(3, iff_x);
@ -495,7 +500,7 @@ ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) {
// Return 0 if not a range check. Return 1 if a range check and set index and // Return 0 if not a range check. Return 1 if a range check and set index and
// offset. Return 2 if we had to negate the test. Index is NULL if the check // offset. Return 2 if we had to negate the test. Index is NULL if the check
// is versus a constant. // is versus a constant.
int IfNode::is_range_check(Node* &range, Node* &index, jint &offset) { int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) {
int flip_test = 0; int flip_test = 0;
Node* l = NULL; Node* l = NULL;
Node* r = NULL; Node* r = NULL;
@ -723,7 +728,7 @@ bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
return ctrl != NULL && return ctrl != NULL &&
ctrl->is_Proj() && ctrl->is_Proj() &&
ctrl->in(0) != NULL && ctrl->in(0) != NULL &&
ctrl->in(0)->is_If() && ctrl->in(0)->Opcode() == Op_If &&
ctrl->in(0)->outcnt() == 2 && ctrl->in(0)->outcnt() == 2 &&
ctrl->in(0)->as_If()->cmpi_folds(igvn) && ctrl->in(0)->as_If()->cmpi_folds(igvn) &&
// Must compare same value // Must compare same value
@ -771,6 +776,11 @@ bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNod
CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none); CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none);
if (otherproj->outcnt() == 1 && dom_unc != NULL) { if (otherproj->outcnt() == 1 && dom_unc != NULL) {
// We need to re-execute the folded Ifs after deoptimization from the merged traps
if (!dom_unc->jvms()->should_reexecute()) {
return false;
}
CallStaticJavaNode* unc = NULL; CallStaticJavaNode* unc = NULL;
ProjNode* unc_proj = uncommon_trap_proj(unc); ProjNode* unc_proj = uncommon_trap_proj(unc);
if (unc_proj != NULL && unc_proj->outcnt() == 1) { if (unc_proj != NULL && unc_proj->outcnt() == 1) {
@ -784,12 +794,37 @@ bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNod
} else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) { } else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) {
return false; return false;
} }
// Different methods and methods containing jsrs are not supported.
ciMethod* method = unc->jvms()->method();
ciMethod* dom_method = dom_unc->jvms()->method();
if (method != dom_method || method->has_jsrs()) {
return false;
}
// Check that both traps are in the same activation of the method (instead
// of two activations being inlined through different call sites) by verifying
// that the call stacks are equal for both JVMStates.
JVMState* dom_caller = dom_unc->jvms()->caller();
JVMState* caller = unc->jvms()->caller();
if (!dom_caller->same_calls_as(caller)) {
return false;
}
// Check that the bci of the dominating uncommon trap dominates the bci
// of the dominated uncommon trap. Otherwise we may not re-execute
// the dominated check after deoptimization from the merged uncommon trap.
ciTypeFlow* flow = dom_method->get_flow_analysis();
int bci = unc->jvms()->bci();
int dom_bci = dom_unc->jvms()->bci();
if (!flow->is_dominated_by(bci, dom_bci)) {
return false;
}
// See merge_uncommon_traps: the reason of the uncommon trap // See merge_uncommon_traps: the reason of the uncommon trap
// will be changed and the state of the dominating If will be // will be changed and the state of the dominating If will be
// used. Checked that we didn't apply this transformation in a // used. Checked that we didn't apply this transformation in a
// previous compilation and it didn't cause too many traps // previous compilation and it didn't cause too many traps
if (!igvn->C->too_many_traps(dom_unc->jvms()->method(), dom_unc->jvms()->bci(), Deoptimization::Reason_unstable_fused_if) && if (!igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_unstable_fused_if) &&
!igvn->C->too_many_traps(dom_unc->jvms()->method(), dom_unc->jvms()->bci(), Deoptimization::Reason_range_check)) { !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check)) {
success = unc_proj; success = unc_proj;
fail = unc_proj->other_if_proj(); fail = unc_proj->other_if_proj();
return true; return true;
@ -941,8 +976,8 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
if (failtype->_lo > failtype->_hi) { if (failtype->_lo > failtype->_hi) {
// previous if determines the result of this if so // previous if determines the result of this if so
// replace Bool with constant // replace Bool with constant
igvn->hash_delete(this); igvn->_worklist.push(in(1));
set_req(1, igvn->intcon(success->_con)); igvn->replace_input_of(this, 1, igvn->intcon(success->_con));
return true; return true;
} }
} }
@ -961,7 +996,8 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
Node* newbool = igvn->transform(new BoolNode(newcmp, cond)); Node* newbool = igvn->transform(new BoolNode(newcmp, cond));
igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con)); igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
set_req(1, newbool); igvn->_worklist.push(in(1));
igvn->replace_input_of(this, 1, newbool);
return true; return true;
} }
@ -971,7 +1007,10 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f
// Merge the branches that trap for this If and the dominating If into // Merge the branches that trap for this If and the dominating If into
// a single region that branches to the uncommon trap for the // a single region that branches to the uncommon trap for the
// dominating If // dominating If
void IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) { Node* IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) {
Node* res = this;
assert(success->in(0) == this, "bad projection");
ProjNode* otherproj = proj->other_if_proj(); ProjNode* otherproj = proj->other_if_proj();
CallStaticJavaNode* unc = success->is_uncommon_trap_proj(Deoptimization::Reason_none); CallStaticJavaNode* unc = success->is_uncommon_trap_proj(Deoptimization::Reason_none);
@ -1007,6 +1046,8 @@ void IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* f
trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_range_check, action); trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_range_check, action);
improve_address_types(l, r, fail, igvn); improve_address_types(l, r, fail, igvn);
res = igvn->transform(new RangeCheckNode(in(0), in(1), _prob, _fcnt));
} else if (unc != dom_unc) { } else if (unc != dom_unc) {
// If we trap we won't know what CmpI would have caused the trap // If we trap we won't know what CmpI would have caused the trap
// so use a special trap reason to mark this pair of CmpI nodes as // so use a special trap reason to mark this pair of CmpI nodes as
@ -1016,6 +1057,7 @@ void IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* f
trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action); trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_unstable_fused_if, action);
} }
igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request)); igvn->replace_input_of(dom_unc, TypeFunc::Parms, igvn->intcon(trap_request));
return res;
} }
// If we are turning 2 CmpI nodes into a CmpU that follows the pattern // If we are turning 2 CmpI nodes into a CmpU that follows the pattern
@ -1209,8 +1251,7 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) && if (has_only_uncommon_traps(dom_cmp, success, fail, igvn) &&
// Next call modifies graph so must be last // Next call modifies graph so must be last
fold_compares_helper(dom_cmp, success, fail, igvn)) { fold_compares_helper(dom_cmp, success, fail, igvn)) {
merge_uncommon_traps(dom_cmp, success, fail, igvn); return merge_uncommon_traps(dom_cmp, success, fail, igvn);
return this;
} }
return NULL; return NULL;
} else if (ctrl->in(0) != NULL && } else if (ctrl->in(0) != NULL &&
@ -1229,8 +1270,7 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
// Next call modifies graph so must be last // Next call modifies graph so must be last
fold_compares_helper(dom_cmp, success, fail, igvn)) { fold_compares_helper(dom_cmp, success, fail, igvn)) {
reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn); reroute_side_effect_free_unc(other_cmp, dom_cmp, igvn);
merge_uncommon_traps(dom_cmp, success, fail, igvn); return merge_uncommon_traps(dom_cmp, success, fail, igvn);
return this;
} }
} }
} }
@ -1311,14 +1351,10 @@ struct RangeCheck {
jint off; jint off;
}; };
//------------------------------Ideal------------------------------------------ Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (remove_dead_region(phase, can_reshape)) return this; if (remove_dead_region(phase, can_reshape)) return this;
// No Def-Use info? // No Def-Use info?
if (!can_reshape) return NULL; if (!can_reshape) return NULL;
PhaseIterGVN *igvn = phase->is_IterGVN();
// Don't bother trying to transform a dead if // Don't bother trying to transform a dead if
if (in(0)->is_top()) return NULL; if (in(0)->is_top()) return NULL;
@ -1334,24 +1370,291 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (idt_if != NULL) return idt_if; if (idt_if != NULL) return idt_if;
// Try to split the IF // Try to split the IF
PhaseIterGVN *igvn = phase->is_IterGVN();
Node *s = split_if(this, igvn); Node *s = split_if(this, igvn);
if (s != NULL) return s; if (s != NULL) return s;
return NodeSentinel;
}
//------------------------------Ideal------------------------------------------
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* res = Ideal_common(phase, can_reshape);
if (res != NodeSentinel) {
return res;
}
// Check for people making a useless boolean: things like // Check for people making a useless boolean: things like
// if( (x < y ? true : false) ) { ... } // if( (x < y ? true : false) ) { ... }
// Replace with if( x < y ) { ... } // Replace with if( x < y ) { ... }
Node *bol2 = remove_useless_bool(this, phase); Node *bol2 = remove_useless_bool(this, phase);
if( bol2 ) return bol2; if( bol2 ) return bol2;
if (in(0) == NULL) return NULL; // Dead loop?
PhaseIterGVN *igvn = phase->is_IterGVN();
Node* result = fold_compares(igvn);
if (result != NULL) {
return result;
}
// Scan for an equivalent test
Node *cmp;
int dist = 0; // Cutoff limit for search
int op = Opcode();
if( op == Op_If &&
(cmp=in(1)->in(1))->Opcode() == Op_CmpP ) {
if( cmp->in(2) != NULL && // make sure cmp is not already dead
cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
dist = 64; // Limit for null-pointer scans
} else {
dist = 4; // Do not bother for random pointer tests
}
} else {
dist = 4; // Limit for random junky scans
}
Node* prev_dom = search_identical(dist);
if (prev_dom == NULL) {
return NULL;
}
// Replace dominated IfNode
return dominated_by(prev_dom, igvn);
}
//------------------------------dominated_by-----------------------------------
Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) {
#ifndef PRODUCT
if (TraceIterativeGVN) {
tty->print(" Removing IfNode: "); this->dump();
}
if (VerifyOpto && !igvn->allow_progress()) {
// Found an equivalent dominating test,
// we can not guarantee reaching a fix-point for these during iterativeGVN
// since intervening nodes may not change.
return NULL;
}
#endif
igvn->hash_delete(this); // Remove self to prevent spurious V-N
Node *idom = in(0);
// Need opcode to decide which way 'this' test goes
int prev_op = prev_dom->Opcode();
Node *top = igvn->C->top(); // Shortcut to top
// Loop predicates may have depending checks which should not
// be skipped. For example, range check predicate has two checks
// for lower and upper bounds.
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL)
prev_dom = idom;
// Now walk the current IfNode's projections.
// Loop ends when 'this' has no more uses.
for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
Node *ifp = last_out(i); // Get IfTrue/IfFalse
igvn->add_users_to_worklist(ifp);
// Check which projection it is and set target.
// Data-target is either the dominating projection of the same type
// or TOP if the dominating projection is of opposite type.
// Data-target will be used as the new control edge for the non-CFG
// nodes like Casts and Loads.
Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;
// Control-target is just the If's immediate dominator or TOP.
Node *ctrl_target = (ifp->Opcode() == prev_op) ? idom : top;
// For each child of an IfTrue/IfFalse projection, reroute.
// Loop ends when projection has no more uses.
for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse
if( !s->depends_only_on_test() ) {
// Find the control input matching this def-use edge.
// For Regions it may not be in slot 0.
uint l;
for( l = 0; s->in(l) != ifp; l++ ) { }
igvn->replace_input_of(s, l, ctrl_target);
} else { // Else, for control producers,
igvn->replace_input_of(s, 0, data_target); // Move child to data-target
}
} // End for each child of a projection
igvn->remove_dead_node(ifp);
} // End for each IfTrue/IfFalse child of If
// Kill the IfNode
igvn->remove_dead_node(this);
// Must return either the original node (now dead) or a new node
// (Do not return a top here, since that would break the uniqueness of top.)
return new ConINode(TypeInt::ZERO);
}
Node* IfNode::search_identical(int dist) {
// Setup to scan up the CFG looking for a dominating test // Setup to scan up the CFG looking for a dominating test
Node *dom = in(0); Node* dom = in(0);
Node *prev_dom = this; Node* prev_dom = this;
int op = Opcode();
// Search up the dominator tree for an If with an identical test
while( dom->Opcode() != op || // Not same opcode?
dom->in(1) != in(1) || // Not same input 1?
(req() == 3 && dom->in(2) != in(2)) || // Not same input 2?
prev_dom->in(0) != dom ) { // One path of test does not dominate?
if( dist < 0 ) return NULL;
dist--;
prev_dom = dom;
dom = up_one_dom( dom );
if( !dom ) return NULL;
}
// Check that we did not follow a loop back to ourselves
if( this == dom )
return NULL;
if( dist > 2 ) // Add to count of NULL checks elided
explicit_null_checks_elided++;
return prev_dom;
}
//------------------------------Identity---------------------------------------
// If the test is constant & we match, then we are the input Control
Node *IfProjNode::Identity(PhaseTransform *phase) {
// Can only optimize if cannot go the other way
const TypeTuple *t = phase->type(in(0))->is_tuple();
if (t == TypeTuple::IFNEITHER ||
// kill dead branch first otherwise the IfNode's control will
// have 2 control uses (the IfNode that doesn't go away because
// it still has uses and this branch of the
// If). Node::has_special_unique_user() will cause this node to
// be reprocessed once the dead branch is killed.
(always_taken(t) && in(0)->outcnt() == 1)) {
// IfNode control
return in(0)->in(0);
}
// no progress
return this;
}
#ifndef PRODUCT
//-------------------------------related---------------------------------------
// An IfProjNode's related node set consists of its input (an IfNode) including
// the IfNode's condition, plus all of its outputs at level 1. In compact mode,
// the restrictions for IfNode apply (see IfNode::rel).
void IfProjNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
Node* ifNode = this->in(0);
in_rel->append(ifNode);
if (compact) {
ifNode->collect_nodes(in_rel, 3, false, true);
} else {
ifNode->collect_nodes_in_all_data(in_rel, false);
}
this->collect_nodes(out_rel, -1, false, false);
}
//------------------------------dump_spec--------------------------------------
void IfNode::dump_spec(outputStream *st) const {
st->print("P=%f, C=%f",_prob,_fcnt);
}
//-------------------------------related---------------------------------------
// For an IfNode, the set of related output nodes is just the output nodes till
// depth 2, i.e, the IfTrue/IfFalse projection nodes plus the nodes they refer.
// The related input nodes contain no control nodes, but all data nodes
// pertaining to the condition. In compact mode, the input nodes are collected
// up to a depth of 3.
void IfNode::related(GrowableArray <Node *> *in_rel, GrowableArray <Node *> *out_rel, bool compact) const {
if (compact) {
this->collect_nodes(in_rel, 3, false, true);
} else {
this->collect_nodes_in_all_data(in_rel, false);
}
this->collect_nodes(out_rel, -2, false, false);
}
#endif
//------------------------------idealize_test----------------------------------
// Try to canonicalize tests better. Peek at the Cmp/Bool/If sequence and
// come up with a canonical sequence. Bools getting 'eq', 'gt' and 'ge' forms
// converted to 'ne', 'le' and 'lt' forms. IfTrue/IfFalse get swapped as
// needed.
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
assert(iff->in(0) != NULL, "If must be live");
if (iff->outcnt() != 2) return NULL; // Malformed projections.
Node* old_if_f = iff->proj_out(false);
Node* old_if_t = iff->proj_out(true);
// CountedLoopEnds want the back-control test to be TRUE, irregardless of
// whether they are testing a 'gt' or 'lt' condition. The 'gt' condition
// happens in count-down loops
if (iff->is_CountedLoopEnd()) return NULL;
if (!iff->in(1)->is_Bool()) return NULL; // Happens for partially optimized IF tests
BoolNode *b = iff->in(1)->as_Bool();
BoolTest bt = b->_test;
// Test already in good order?
if( bt.is_canonical() )
return NULL;
// Flip test to be canonical. Requires flipping the IfFalse/IfTrue and
// cloning the IfNode.
Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) );
if( !new_b->is_Bool() ) return NULL;
b = new_b->as_Bool();
PhaseIterGVN *igvn = phase->is_IterGVN();
assert( igvn, "Test is not canonical in parser?" );
// The IF node never really changes, but it needs to be cloned
iff = iff->clone()->as_If();
iff->set_req(1, b);
iff->_prob = 1.0-iff->_prob;
Node *prior = igvn->hash_find_insert(iff);
if( prior ) {
igvn->remove_dead_node(iff);
iff = (IfNode*)prior;
} else {
// Cannot call transform on it just yet
igvn->set_type_bottom(iff);
}
igvn->_worklist.push(iff);
// Now handle projections. Cloning not required.
Node* new_if_f = (Node*)(new IfFalseNode( iff ));
Node* new_if_t = (Node*)(new IfTrueNode ( iff ));
igvn->register_new_node_with_optimizer(new_if_f);
igvn->register_new_node_with_optimizer(new_if_t);
// Flip test, so flip trailing control
igvn->replace_node(old_if_f, new_if_t);
igvn->replace_node(old_if_t, new_if_f);
// Progress
return iff;
}
Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* res = Ideal_common(phase, can_reshape);
if (res != NodeSentinel) {
return res;
}
PhaseIterGVN *igvn = phase->is_IterGVN();
// Setup to scan up the CFG looking for a dominating test
Node* prev_dom = this;
// Check for range-check vs other kinds of tests // Check for range-check vs other kinds of tests
Node *index1, *range1; Node* index1;
Node* range1;
jint offset1; jint offset1;
int flip1 = is_range_check(range1, index1, offset1); int flip1 = is_range_check(range1, index1, offset1);
if( flip1 ) { if (flip1) {
Node* dom = in(0);
// Try to remove extra range checks. All 'up_one_dom' gives up at merges // Try to remove extra range checks. All 'up_one_dom' gives up at merges
// so all checks we inspect post-dominate the top-most check we find. // so all checks we inspect post-dominate the top-most check we find.
// If we are going to fail the current check and we reach the top check // If we are going to fail the current check and we reach the top check
@ -1372,13 +1675,14 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Scan for the top checks and collect range of offsets // Scan for the top checks and collect range of offsets
for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
if (dom->Opcode() == Op_If && // Not same opcode? if (dom->Opcode() == Op_RangeCheck && // Not same opcode?
prev_dom->in(0) == dom) { // One path of test does dominate? prev_dom->in(0) == dom) { // One path of test does dominate?
if (dom == this) return NULL; // dead loop if (dom == this) return NULL; // dead loop
// See if this is a range check // See if this is a range check
Node *index2, *range2; Node* index2;
Node* range2;
jint offset2; jint offset2;
int flip2 = dom->as_If()->is_range_check(range2, index2, offset2); int flip2 = dom->as_RangeCheck()->is_range_check(range2, index2, offset2);
// See if this is a _matching_ range check, checking against // See if this is a _matching_ range check, checking against
// the same array bounds. // the same array bounds.
if (flip2 == flip1 && range2 == range1 && index2 == index1 && if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
@ -1486,237 +1790,14 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
prev_dom = rc0.ctl; prev_dom = rc0.ctl;
} }
} }
} else {
prev_dom = search_identical(4);
} else { // Scan for an equivalent test if (prev_dom == NULL) {
Node *cmp;
int dist = 0; // Cutoff limit for search
int op = Opcode();
if( op == Op_If &&
(cmp=in(1)->in(1))->Opcode() == Op_CmpP ) {
if( cmp->in(2) != NULL && // make sure cmp is not already dead
cmp->in(2)->bottom_type() == TypePtr::NULL_PTR ) {
dist = 64; // Limit for null-pointer scans
} else {
dist = 4; // Do not bother for random pointer tests
}
} else {
dist = 4; // Limit for random junky scans
}
// Normal equivalent-test check.
if( !dom ) return NULL; // Dead loop?
Node* result = fold_compares(igvn);
if (result != NULL) {
return result;
}
// Search up the dominator tree for an If with an identical test
while( dom->Opcode() != op || // Not same opcode?
dom->in(1) != in(1) || // Not same input 1?
(req() == 3 && dom->in(2) != in(2)) || // Not same input 2?
prev_dom->in(0) != dom ) { // One path of test does not dominate?
if( dist < 0 ) return NULL;
dist--;
prev_dom = dom;
dom = up_one_dom( dom );
if( !dom ) return NULL;
}
// Check that we did not follow a loop back to ourselves
if( this == dom )
return NULL; return NULL;
}
if( dist > 2 ) // Add to count of NULL checks elided
explicit_null_checks_elided++;
} // End of Else scan for an equivalent test
// Hit! Remove this IF
#ifndef PRODUCT
if( TraceIterativeGVN ) {
tty->print(" Removing IfNode: "); this->dump();
} }
if( VerifyOpto && !phase->allow_progress() ) {
// Found an equivalent dominating test,
// we can not guarantee reaching a fix-point for these during iterativeGVN
// since intervening nodes may not change.
return NULL;
}
#endif
// Replace dominated IfNode // Replace dominated IfNode
dominated_by( prev_dom, igvn ); return dominated_by(prev_dom, igvn);
// Must return either the original node (now dead) or a new node
// (Do not return a top here, since that would break the uniqueness of top.)
return new ConINode(TypeInt::ZERO);
}
//------------------------------dominated_by-----------------------------------
void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
igvn->hash_delete(this); // Remove self to prevent spurious V-N
Node *idom = in(0);
// Need opcode to decide which way 'this' test goes
int prev_op = prev_dom->Opcode();
Node *top = igvn->C->top(); // Shortcut to top
// Loop predicates may have depending checks which should not
// be skipped. For example, range check predicate has two checks
// for lower and upper bounds.
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL)
prev_dom = idom;
// Now walk the current IfNode's projections.
// Loop ends when 'this' has no more uses.
for (DUIterator_Last imin, i = last_outs(imin); i >= imin; --i) {
Node *ifp = last_out(i); // Get IfTrue/IfFalse
igvn->add_users_to_worklist(ifp);
// Check which projection it is and set target.
// Data-target is either the dominating projection of the same type
// or TOP if the dominating projection is of opposite type.
// Data-target will be used as the new control edge for the non-CFG
// nodes like Casts and Loads.
Node *data_target = (ifp->Opcode() == prev_op) ? prev_dom : top;
// Control-target is just the If's immediate dominator or TOP.
Node *ctrl_target = (ifp->Opcode() == prev_op) ? idom : top;
// For each child of an IfTrue/IfFalse projection, reroute.
// Loop ends when projection has no more uses.
for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse
if( !s->depends_only_on_test() ) {
// Find the control input matching this def-use edge.
// For Regions it may not be in slot 0.
uint l;
for( l = 0; s->in(l) != ifp; l++ ) { }
igvn->replace_input_of(s, l, ctrl_target);
} else { // Else, for control producers,
igvn->replace_input_of(s, 0, data_target); // Move child to data-target
}
} // End for each child of a projection
igvn->remove_dead_node(ifp);
} // End for each IfTrue/IfFalse child of If
// Kill the IfNode
igvn->remove_dead_node(this);
}
//------------------------------Identity---------------------------------------
// If the test is constant & we match, then we are the input Control
Node *IfProjNode::Identity(PhaseTransform *phase) {
// Can only optimize if cannot go the other way
const TypeTuple *t = phase->type(in(0))->is_tuple();
if (t == TypeTuple::IFNEITHER ||
// kill dead branch first otherwise the IfNode's control will
// have 2 control uses (the IfNode that doesn't go away because
// it still has uses and this branch of the
// If). Node::has_special_unique_user() will cause this node to
// be reprocessed once the dead branch is killed.
(always_taken(t) && in(0)->outcnt() == 1)) {
// IfNode control
return in(0)->in(0);
}
// no progress
return this;
}
#ifndef PRODUCT
//-------------------------------related---------------------------------------
// An IfProjNode's related node set consists of its input (an IfNode) including
// the IfNode's condition, plus all of its outputs at level 1. In compact mode,
// the restrictions for IfNode apply (see IfNode::rel).
void IfProjNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
Node* ifNode = this->in(0);
in_rel->append(ifNode);
if (compact) {
ifNode->collect_nodes(in_rel, 3, false, true);
} else {
ifNode->collect_nodes_in_all_data(in_rel, false);
}
this->collect_nodes(out_rel, -1, false, false);
}
//------------------------------dump_spec--------------------------------------
void IfNode::dump_spec(outputStream *st) const {
st->print("P=%f, C=%f",_prob,_fcnt);
}
//-------------------------------related---------------------------------------
// For an IfNode, the set of related output nodes is just the output nodes till
// depth 2, i.e, the IfTrue/IfFalse projection nodes plus the nodes they refer.
// The related input nodes contain no control nodes, but all data nodes
// pertaining to the condition. In compact mode, the input nodes are collected
// up to a depth of 3.
void IfNode::related(GrowableArray <Node *> *in_rel, GrowableArray <Node *> *out_rel, bool compact) const {
if (compact) {
this->collect_nodes(in_rel, 3, false, true);
} else {
this->collect_nodes_in_all_data(in_rel, false);
}
this->collect_nodes(out_rel, -2, false, false);
}
#endif
//------------------------------idealize_test----------------------------------
// Try to canonicalize tests better. Peek at the Cmp/Bool/If sequence and
// come up with a canonical sequence. Bools getting 'eq', 'gt' and 'ge' forms
// converted to 'ne', 'le' and 'lt' forms. IfTrue/IfFalse get swapped as
// needed.
static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
assert(iff->in(0) != NULL, "If must be live");
if (iff->outcnt() != 2) return NULL; // Malformed projections.
Node* old_if_f = iff->proj_out(false);
Node* old_if_t = iff->proj_out(true);
// CountedLoopEnds want the back-control test to be TRUE, irregardless of
// whether they are testing a 'gt' or 'lt' condition. The 'gt' condition
// happens in count-down loops
if (iff->is_CountedLoopEnd()) return NULL;
if (!iff->in(1)->is_Bool()) return NULL; // Happens for partially optimized IF tests
BoolNode *b = iff->in(1)->as_Bool();
BoolTest bt = b->_test;
// Test already in good order?
if( bt.is_canonical() )
return NULL;
// Flip test to be canonical. Requires flipping the IfFalse/IfTrue and
// cloning the IfNode.
Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) );
if( !new_b->is_Bool() ) return NULL;
b = new_b->as_Bool();
PhaseIterGVN *igvn = phase->is_IterGVN();
assert( igvn, "Test is not canonical in parser?" );
// The IF node never really changes, but it needs to be cloned
iff = new IfNode( iff->in(0), b, 1.0-iff->_prob, iff->_fcnt);
Node *prior = igvn->hash_find_insert(iff);
if( prior ) {
igvn->remove_dead_node(iff);
iff = (IfNode*)prior;
} else {
// Cannot call transform on it just yet
igvn->set_type_bottom(iff);
}
igvn->_worklist.push(iff);
// Now handle projections. Cloning not required.
Node* new_if_f = (Node*)(new IfFalseNode( iff ));
Node* new_if_t = (Node*)(new IfTrueNode ( iff ));
igvn->register_new_node_with_optimizer(new_if_f);
igvn->register_new_node_with_optimizer(new_if_t);
// Flip test, so flip trailing control
igvn->replace_node(old_if_f, new_if_t);
igvn->replace_node(old_if_t, new_if_f);
// Progress
return iff;
} }

View file

@ -238,7 +238,7 @@ class LibraryCallKit : public GraphKit {
// Generates the guards that check whether the result of // Generates the guards that check whether the result of
// Unsafe.getObject should be recorded in an SATB log buffer. // Unsafe.getObject should be recorded in an SATB log buffer.
void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
static bool klass_needs_init_guard(Node* kls); static bool klass_needs_init_guard(Node* kls);
bool inline_unsafe_allocate(); bool inline_unsafe_allocate();
bool inline_unsafe_copyMemory(); bool inline_unsafe_copyMemory();
@ -544,72 +544,72 @@ bool LibraryCallKit::try_to_inline(int predicate) {
case vmIntrinsics::_inflateStringC: case vmIntrinsics::_inflateStringC:
case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress); case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile); case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile, false);
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile); case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false);
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile); case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile); case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile); case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile); case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile); case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile); case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile); case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile); case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile, false);
case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile); case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile, false);
case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile); case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile); case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile); case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile); case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile); case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile); case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile); case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile); case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile); case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile); case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile); case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile); case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile); case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile); case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile); case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false);
case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile); case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile, false);
case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile); case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile, false);
case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile); case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile, false);
case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile); case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile, false);
case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile); case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile, false);
case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile); case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile, false);
case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile); case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile, false);
case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile); case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile, false);
case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile); case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile, false);
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile); case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile, false);
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile); case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile, false);
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile); case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile, false);
case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile); case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile, false);
case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile); case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile, false);
case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile); case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile, false);
case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile); case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile, false);
case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile); case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile, false);
case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile); case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile, false);
case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile); case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile, false);
case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile); case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile, false);
case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile); case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile, false);
case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile); case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile, false);
case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile); case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile, false);
case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile); case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile, false);
case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile); case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile, false);
case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile); case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile, false);
case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile); case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, true);
case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile); case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, true);
case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile); case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, true);
case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile); case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, true);
case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile); case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, true);
case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile); case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, true);
case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile); case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, true);
case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile); case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, true);
case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg); case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg); case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg);
@ -2385,7 +2385,7 @@ const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_
return NULL; return NULL;
} }
bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) { bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
if (callee()->is_static()) return false; // caller must have the capability! if (callee()->is_static()) return false; // caller must have the capability!
#ifndef PRODUCT #ifndef PRODUCT
@ -2527,7 +2527,24 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
// of safe & unsafe memory. // of safe & unsafe memory.
if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
if (!is_store) { assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
alias_type->field() != NULL || alias_type->element() != NULL, "field, array element or unknown");
bool mismatched = false;
if (alias_type->element() != NULL || alias_type->field() != NULL) {
BasicType bt;
if (alias_type->element() != NULL) {
const Type* element = alias_type->element();
bt = element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
} else {
bt = alias_type->field()->type()->basic_type();
}
if (bt != type) {
mismatched = true;
}
}
assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
if (!is_store) {
Node* p = NULL; Node* p = NULL;
// Try to constant fold a load from a constant field // Try to constant fold a load from a constant field
ciField* field = alias_type->field(); ciField* field = alias_type->field();
@ -2543,7 +2560,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
// To be valid, unsafe loads may depend on other conditions than // To be valid, unsafe loads may depend on other conditions than
// the one that guards them: pin the Load node // the one that guards them: pin the Load node
p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile); p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
// load value // load value
switch (type) { switch (type) {
case T_BOOLEAN: case T_BOOLEAN:
@ -2590,12 +2607,12 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
if (type != T_OBJECT ) { if (type != T_OBJECT ) {
(void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile); (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
} else { } else {
// Possibly an oop being stored to Java heap or native memory // Possibly an oop being stored to Java heap or native memory
if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
// oop to Java heap. // oop to Java heap.
(void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
} else { } else {
// We can't tell at compile time if we are storing in the Java heap or outside // We can't tell at compile time if we are storing in the Java heap or outside
// of it. So we need to emit code to conditionally do the proper type of // of it. So we need to emit code to conditionally do the proper type of
@ -2607,11 +2624,11 @@ bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, Bas
__ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
// Sync IdealKit and graphKit. // Sync IdealKit and graphKit.
sync_kit(ideal); sync_kit(ideal);
Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
// Update IdealKit memory. // Update IdealKit memory.
__ sync_kit(this); __ sync_kit(this);
} __ else_(); { } __ else_(); {
__ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile); __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile, mismatched);
} __ end_if(); } __ end_if();
// Final sync IdealKit and GraphKit. // Final sync IdealKit and GraphKit.
final_sync(ideal); final_sync(ideal);

View file

@ -91,7 +91,8 @@ void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred)
// The true projecttion (if_cont) of the new_iff is returned. // The true projecttion (if_cont) of the new_iff is returned.
// This code is also used to clone predicates to cloned loops. // This code is also used to clone predicates to cloned loops.
ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason) { Deoptimization::DeoptReason reason,
int opcode) {
assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
IfNode* iff = cont_proj->in(0)->as_If(); IfNode* iff = cont_proj->in(0)->as_If();
@ -133,8 +134,13 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
} }
// Create new_iff // Create new_iff
IdealLoopTree* lp = get_loop(entry); IdealLoopTree* lp = get_loop(entry);
IfNode *new_iff = iff->clone()->as_If(); IfNode* new_iff = NULL;
new_iff->set_req(0, entry); if (opcode == Op_If) {
new_iff = new IfNode(entry, iff->in(1), iff->_prob, iff->_fcnt);
} else {
assert(opcode == Op_RangeCheck, "no other if variant here");
new_iff = new RangeCheckNode(entry, iff->in(1), iff->_prob, iff->_fcnt);
}
register_control(new_iff, lp, entry); register_control(new_iff, lp, entry);
Node *if_cont = new IfTrueNode(new_iff); Node *if_cont = new IfTrueNode(new_iff);
Node *if_uct = new IfFalseNode(new_iff); Node *if_uct = new IfFalseNode(new_iff);
@ -183,7 +189,8 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
//------------------------------create_new_if_for_predicate------------------------ //------------------------------create_new_if_for_predicate------------------------
// Create a new if below new_entry for the predicate to be cloned (IGVN optimization) // Create a new if below new_entry for the predicate to be cloned (IGVN optimization)
ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason) { Deoptimization::DeoptReason reason,
int opcode) {
assert(new_entry != 0, "only used for clone predicate"); assert(new_entry != 0, "only used for clone predicate");
assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!"); assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
IfNode* iff = cont_proj->in(0)->as_If(); IfNode* iff = cont_proj->in(0)->as_If();
@ -208,8 +215,13 @@ ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* n
} }
// Create new_iff in new location. // Create new_iff in new location.
IfNode *new_iff = iff->clone()->as_If(); IfNode* new_iff = NULL;
new_iff->set_req(0, new_entry); if (opcode == Op_If) {
new_iff = new IfNode(new_entry, iff->in(1), iff->_prob, iff->_fcnt);
} else {
assert(opcode == Op_RangeCheck, "no other if variant here");
new_iff = new RangeCheckNode(new_entry, iff->in(1), iff->_prob, iff->_fcnt);
}
register_new_node_with_optimizer(new_iff); register_new_node_with_optimizer(new_iff);
Node *if_cont = new IfTrueNode(new_iff); Node *if_cont = new IfTrueNode(new_iff);
@ -249,9 +261,9 @@ ProjNode* PhaseIdealLoop::clone_predicate(ProjNode* predicate_proj, Node* new_en
PhaseIterGVN* igvn) { PhaseIterGVN* igvn) {
ProjNode* new_predicate_proj; ProjNode* new_predicate_proj;
if (loop_phase != NULL) { if (loop_phase != NULL) {
new_predicate_proj = loop_phase->create_new_if_for_predicate(predicate_proj, new_entry, reason); new_predicate_proj = loop_phase->create_new_if_for_predicate(predicate_proj, new_entry, reason, Op_If);
} else { } else {
new_predicate_proj = igvn->create_new_if_for_predicate(predicate_proj, new_entry, reason); new_predicate_proj = igvn->create_new_if_for_predicate(predicate_proj, new_entry, reason, Op_If);
} }
IfNode* iff = new_predicate_proj->in(0)->as_If(); IfNode* iff = new_predicate_proj->in(0)->as_If();
Node* ctrl = iff->in(0); Node* ctrl = iff->in(0);
@ -714,7 +726,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
while (current_proj != head) { while (current_proj != head) {
if (loop == get_loop(current_proj) && // still in the loop ? if (loop == get_loop(current_proj) && // still in the loop ?
current_proj->is_Proj() && // is a projection ? current_proj->is_Proj() && // is a projection ?
current_proj->in(0)->Opcode() == Op_If) { // is a if projection ? (current_proj->in(0)->Opcode() == Op_If ||
current_proj->in(0)->Opcode() == Op_RangeCheck)) { // is a if projection ?
if_proj_list.push(current_proj); if_proj_list.push(current_proj);
} }
current_proj = idom(current_proj); current_proj = idom(current_proj);
@ -753,7 +766,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
if (invar.is_invariant(bol)) { if (invar.is_invariant(bol)) {
// Invariant test // Invariant test
new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL, new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL,
Deoptimization::Reason_predicate); Deoptimization::Reason_predicate,
iff->Opcode());
Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool(); BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
@ -797,8 +811,8 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
// lower_bound test will dominate the upper bound test and all // lower_bound test will dominate the upper bound test and all
// cloned or created nodes will use the lower bound test as // cloned or created nodes will use the lower bound test as
// their declared control. // their declared control.
ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate, iff->Opcode());
ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate); ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate, iff->Opcode());
assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate"); assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0); Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0);

View file

@ -290,7 +290,7 @@ bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
if (ctrl->is_top()) if (ctrl->is_top())
return false; // Found dead test on live IF? No peeling! return false; // Found dead test on live IF? No peeling!
// Standard IF only has one input value to check for loop invariance // Standard IF only has one input value to check for loop invariance
assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); assert(test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd || test->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
// Condition is not a member of this loop? // Condition is not a member of this loop?
if( !is_member(phase->get_loop(ctrl)) && if( !is_member(phase->get_loop(ctrl)) &&
is_loop_exit(test) ) is_loop_exit(test) )
@ -792,8 +792,10 @@ bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
return false; return false;
} }
if(cl->do_unroll_only()) { if (cl->do_unroll_only()) {
NOT_PRODUCT(if (TraceSuperWordLoopUnrollAnalysis) tty->print_cr("policy_unroll passed vector loop(vlen=%d,factor = %d)\n", slp_max_unroll_factor, future_unroll_ct)); if (TraceSuperWordLoopUnrollAnalysis) {
tty->print_cr("policy_unroll passed vector loop(vlen=%d,factor = %d)\n", slp_max_unroll_factor, future_unroll_ct);
}
} }
// Unroll once! (Each trip will soon do double iterations) // Unroll once! (Each trip will soon do double iterations)
@ -818,7 +820,9 @@ void IdealLoopTree::policy_unroll_slp_analysis(CountedLoopNode *cl, PhaseIdealLo
if (slp_max_unroll_factor >= future_unroll_ct) { if (slp_max_unroll_factor >= future_unroll_ct) {
int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor; int new_limit = cl->node_count_before_unroll() * slp_max_unroll_factor;
if (new_limit > LoopUnrollLimit) { if (new_limit > LoopUnrollLimit) {
NOT_PRODUCT(if (TraceSuperWordLoopUnrollAnalysis) tty->print_cr("slp analysis unroll=%d, default limit=%d\n", new_limit, _local_loop_unroll_limit)); if (TraceSuperWordLoopUnrollAnalysis) {
tty->print_cr("slp analysis unroll=%d, default limit=%d\n", new_limit, _local_loop_unroll_limit);
}
_local_loop_unroll_limit = new_limit; _local_loop_unroll_limit = new_limit;
} }
} }
@ -856,7 +860,8 @@ bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
// loop-invariant. // loop-invariant.
for (uint i = 0; i < _body.size(); i++) { for (uint i = 0; i < _body.size(); i++) {
Node *iff = _body[i]; Node *iff = _body[i];
if (iff->Opcode() == Op_If) { // Test? if (iff->Opcode() == Op_If ||
iff->Opcode() == Op_RangeCheck) { // Test?
// Comparing trip+off vs limit // Comparing trip+off vs limit
Node *bol = iff->in(1); Node *bol = iff->in(1);
@ -2035,8 +2040,8 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
// loop-invariant. // loop-invariant.
for( uint i = 0; i < loop->_body.size(); i++ ) { for( uint i = 0; i < loop->_body.size(); i++ ) {
Node *iff = loop->_body[i]; Node *iff = loop->_body[i];
if( iff->Opcode() == Op_If ) { // Test? if (iff->Opcode() == Op_If ||
iff->Opcode() == Op_RangeCheck) { // Test?
// Test is an IfNode, has 2 projections. If BOTH are in the loop // Test is an IfNode, has 2 projections. If BOTH are in the loop
// we need loop unswitching instead of iteration splitting. // we need loop unswitching instead of iteration splitting.
Node *exit = loop->is_loop_exit(iff); Node *exit = loop->is_loop_exit(iff);
@ -2119,10 +2124,9 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
} }
} else { } else {
#ifndef PRODUCT if (PrintOpto) {
if( PrintOpto )
tty->print_cr("missed RCE opportunity"); tty->print_cr("missed RCE opportunity");
#endif }
continue; // In release mode, ignore it continue; // In release mode, ignore it
} }
} else { // Otherwise work on normal compares } else { // Otherwise work on normal compares
@ -2157,10 +2161,9 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
} }
break; break;
default: default:
#ifndef PRODUCT if (PrintOpto) {
if( PrintOpto )
tty->print_cr("missed RCE opportunity"); tty->print_cr("missed RCE opportunity");
#endif }
continue; // Unhandled case continue; // Unhandled case
} }
} }
@ -2504,9 +2507,7 @@ bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_
return false; return false;
} }
if (should_peel) { // Should we peel? if (should_peel) { // Should we peel?
#ifndef PRODUCT if (PrintOpto) { tty->print_cr("should_peel"); }
if (PrintOpto) tty->print_cr("should_peel");
#endif
phase->do_peeling(this,old_new); phase->do_peeling(this,old_new);
} else if (should_unswitch) { } else if (should_unswitch) {
phase->do_unswitching(this, old_new); phase->do_unswitching(this, old_new);

View file

@ -132,7 +132,7 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
head->as_CountedLoop()->set_normal_loop(); head->as_CountedLoop()->set_normal_loop();
} }
ProjNode* proj_true = create_slow_version_of_loop(loop, old_new); ProjNode* proj_true = create_slow_version_of_loop(loop, old_new, unswitch_iff->Opcode());
#ifdef ASSERT #ifdef ASSERT
Node* uniqc = proj_true->unique_ctrl_out(); Node* uniqc = proj_true->unique_ctrl_out();
@ -222,7 +222,8 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
// and inserting an if to select fast-slow versions. // and inserting an if to select fast-slow versions.
// Return control projection of the entry to the fast version. // Return control projection of the entry to the fast version.
ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop, ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
Node_List &old_new) { Node_List &old_new,
int opcode) {
LoopNode* head = loop->_head->as_Loop(); LoopNode* head = loop->_head->as_Loop();
bool counted_loop = head->is_CountedLoop(); bool counted_loop = head->is_CountedLoop();
Node* entry = head->in(LoopNode::EntryControl); Node* entry = head->in(LoopNode::EntryControl);
@ -235,7 +236,8 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
register_node(opq, outer_loop, entry, dom_depth(entry)); register_node(opq, outer_loop, entry, dom_depth(entry));
Node *bol = new Conv2BNode(opq); Node *bol = new Conv2BNode(opq);
register_node(bol, outer_loop, entry, dom_depth(entry)); register_node(bol, outer_loop, entry, dom_depth(entry));
IfNode* iff = new IfNode(entry, bol, PROB_MAX, COUNT_UNKNOWN); IfNode* iff = (opcode == Op_RangeCheck) ? new RangeCheckNode(entry, bol, PROB_MAX, COUNT_UNKNOWN) :
new IfNode(entry, bol, PROB_MAX, COUNT_UNKNOWN);
register_node(iff, outer_loop, entry, dom_depth(entry)); register_node(iff, outer_loop, entry, dom_depth(entry));
ProjNode* iffast = new IfTrueNode(iff); ProjNode* iffast = new IfTrueNode(iff);
register_node(iffast, outer_loop, iff, dom_depth(iff)); register_node(iffast, outer_loop, iff, dom_depth(iff));
@ -359,16 +361,22 @@ bool CountedLoopReserveKit::create_reserve() {
} }
if(!_lpt->_head->is_CountedLoop()) { if(!_lpt->_head->is_CountedLoop()) {
NOT_PRODUCT(if(TraceLoopOpts) {tty->print_cr("CountedLoopReserveKit::create_reserve: %d not counted loop", _lpt->_head->_idx);}) if (TraceLoopOpts) {
tty->print_cr("CountedLoopReserveKit::create_reserve: %d not counted loop", _lpt->_head->_idx);
}
return false; return false;
} }
CountedLoopNode *cl = _lpt->_head->as_CountedLoop(); CountedLoopNode *cl = _lpt->_head->as_CountedLoop();
if (!cl->is_valid_counted_loop()) { if (!cl->is_valid_counted_loop()) {
NOT_PRODUCT(if(TraceLoopOpts) {tty->print_cr("CountedLoopReserveKit::create_reserve: %d not valid counted loop", cl->_idx);}) if (TraceLoopOpts) {
tty->print_cr("CountedLoopReserveKit::create_reserve: %d not valid counted loop", cl->_idx);
}
return false; // skip malformed counted loop return false; // skip malformed counted loop
} }
if (!cl->is_main_loop()) { if (!cl->is_main_loop()) {
NOT_PRODUCT(if(TraceLoopOpts) {tty->print_cr("CountedLoopReserveKit::create_reserve: %d not main loop", cl->_idx);}) if (TraceLoopOpts) {
tty->print_cr("CountedLoopReserveKit::create_reserve: %d not main loop", cl->_idx);
}
return false; // skip normal, pre, and post loops return false; // skip normal, pre, and post loops
} }

View file

@ -2397,11 +2397,9 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// After that switch predicates off and do more loop optimizations. // After that switch predicates off and do more loop optimizations.
if (!C->major_progress() && (C->predicate_count() > 0)) { if (!C->major_progress() && (C->predicate_count() > 0)) {
C->cleanup_loop_predicates(_igvn); C->cleanup_loop_predicates(_igvn);
#ifndef PRODUCT
if (TraceLoopOpts) { if (TraceLoopOpts) {
tty->print_cr("PredicatesOff"); tty->print_cr("PredicatesOff");
} }
#endif
C->set_major_progress(); C->set_major_progress();
} }

View file

@ -916,7 +916,8 @@ public:
// Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
Deoptimization::DeoptReason reason); Deoptimization::DeoptReason reason,
int opcode);
void register_control(Node* n, IdealLoopTree *loop, Node* pred); void register_control(Node* n, IdealLoopTree *loop, Node* pred);
// Clone loop predicates to cloned loops (peeled, unswitched) // Clone loop predicates to cloned loops (peeled, unswitched)
@ -966,7 +967,8 @@ public:
// Create a slow version of the loop by cloning the loop // Create a slow version of the loop by cloning the loop
// and inserting an if to select fast-slow versions. // and inserting an if to select fast-slow versions.
ProjNode* create_slow_version_of_loop(IdealLoopTree *loop, ProjNode* create_slow_version_of_loop(IdealLoopTree *loop,
Node_List &old_new); Node_List &old_new,
int opcode);
// Clone a loop and return the clone head (clone_loop_head). // Clone a loop and return the clone head (clone_loop_head).
// Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse, // Added nodes include int(1), int(0) - disconnected, If, IfTrue, IfFalse,

View file

@ -199,14 +199,11 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// IGVN worklist for later cleanup. Move control-dependent data Nodes on the // IGVN worklist for later cleanup. Move control-dependent data Nodes on the
// live path up to the dominating control. // live path up to the dominating control.
void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) { void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) {
#ifndef PRODUCT if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); }
if (VerifyLoopOptimizations && PrintOpto) tty->print_cr("dominating test");
#endif
// prevdom is the dominating projection of the dominating test. // prevdom is the dominating projection of the dominating test.
assert( iff->is_If(), "" ); assert( iff->is_If(), "" );
assert( iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); assert(iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd || iff->Opcode() == Op_RangeCheck, "Check this code when new subtype is added");
int pop = prevdom->Opcode(); int pop = prevdom->Opcode();
assert( pop == Op_IfFalse || pop == Op_IfTrue, "" ); assert( pop == Op_IfFalse || pop == Op_IfTrue, "" );
if (flip) { if (flip) {
@ -617,9 +614,7 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
} }
} }
if (phi == NULL) break; if (phi == NULL) break;
#ifndef PRODUCT if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); }
if (PrintOpto && VerifyLoopOptimizations) tty->print_cr("CMOV");
#endif
// Move speculative ops // Move speculative ops
for (uint j = 1; j < region->req(); j++) { for (uint j = 1; j < region->req(); j++) {
Node *proj = region->in(j); Node *proj = region->in(j);
@ -963,10 +958,9 @@ static bool merge_point_too_heavy(Compile* C, Node* region) {
} }
int nodes_left = C->max_node_limit() - C->live_nodes(); int nodes_left = C->max_node_limit() - C->live_nodes();
if (weight * 8 > nodes_left) { if (weight * 8 > nodes_left) {
#ifndef PRODUCT if (PrintOpto) {
if (PrintOpto)
tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight); tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight);
#endif }
return true; return true;
} else { } else {
return false; return false;
@ -1123,7 +1117,8 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
int n_op = n->Opcode(); int n_op = n->Opcode();
// Check for an IF being dominated by another IF same test // Check for an IF being dominated by another IF same test
if (n_op == Op_If) { if (n_op == Op_If ||
n_op == Op_RangeCheck) {
Node *bol = n->in(1); Node *bol = n->in(1);
uint max = bol->outcnt(); uint max = bol->outcnt();
// Check for same test used more than once? // Check for same test used more than once?
@ -1489,14 +1484,12 @@ void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) {
void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd, void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd,
Node* side_by_side_idom) { Node* side_by_side_idom) {
#ifndef PRODUCT
if (C->do_vector_loop() && PrintOpto) { if (C->do_vector_loop() && PrintOpto) {
const char* mname = C->method()->name()->as_quoted_ascii(); const char* mname = C->method()->name()->as_quoted_ascii();
if (mname != NULL) { if (mname != NULL) {
tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname); tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname);
} }
} }
#endif
CloneMap& cm = C->clone_map(); CloneMap& cm = C->clone_map();
Dict* dict = cm.dict(); Dict* dict = cm.dict();
@ -1945,7 +1938,10 @@ ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTes
BoolNode* bol = new BoolNode(cmp, relop); BoolNode* bol = new BoolNode(cmp, relop);
register_node(bol, loop, proj2, ddepth); register_node(bol, loop, proj2, ddepth);
IfNode* new_if = new IfNode(proj2, bol, iff->_prob, iff->_fcnt); int opcode = iff->Opcode();
assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode");
IfNode* new_if = (opcode == Op_If) ? new IfNode(proj2, bol, iff->_prob, iff->_fcnt):
new RangeCheckNode(proj2, bol, iff->_prob, iff->_fcnt);
register_node(new_if, loop, proj2, ddepth); register_node(new_if, loop, proj2, ddepth);
proj->set_req(0, new_if); // reattach proj->set_req(0, new_if); // reattach

View file

@ -1569,13 +1569,11 @@ Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node
// Can NOT include the match of a subtree when its memory state // Can NOT include the match of a subtree when its memory state
// is used by any of the other subtrees // is used by any of the other subtrees
(input_mem == NodeSentinel) ) { (input_mem == NodeSentinel) ) {
#ifndef PRODUCT
// Print when we exclude matching due to different memory states at input-loads // Print when we exclude matching due to different memory states at input-loads
if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel) if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
&& !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) { && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
tty->print_cr("invalid input_mem"); tty->print_cr("invalid input_mem");
} }
#endif
// Switch to a register-only opcode; this value must be in a register // Switch to a register-only opcode; this value must be in a register
// and cannot be subsumed as part of a larger instruction. // and cannot be subsumed as part of a larger instruction.
s->DFA( m->ideal_reg(), m ); s->DFA( m->ideal_reg(), m );

View file

@ -269,6 +269,10 @@ public:
// should generate this one. // should generate this one.
static const bool match_rule_supported(int opcode); static const bool match_rule_supported(int opcode);
// identify extra cases that we might want to provide match rules for
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
static const bool match_rule_supported_vector(int opcode, int vlen);
// Some uarchs have different sized float register resources // Some uarchs have different sized float register resources
static const int float_pressure(int default_pressure_threshold); static const int float_pressure(int default_pressure_threshold);

View file

@ -72,8 +72,15 @@ void MemNode::dump_spec(outputStream *st) const {
dump_adr_type(this, _adr_type, st); dump_adr_type(this, _adr_type, st);
Compile* C = Compile::current(); Compile* C = Compile::current();
if( C->alias_type(_adr_type)->is_volatile() ) if (C->alias_type(_adr_type)->is_volatile()) {
st->print(" Volatile!"); st->print(" Volatile!");
}
if (_unaligned_access) {
st->print(" unaligned");
}
if (_mismatched_access) {
st->print(" mismatched");
}
} }
void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) { void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
@ -2393,7 +2400,8 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
st->Opcode() == Op_StoreVector || st->Opcode() == Op_StoreVector ||
Opcode() == Op_StoreVector || Opcode() == Op_StoreVector ||
phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw || phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw ||
(Opcode() == Op_StoreL && st->Opcode() == Op_StoreI), // expanded ClearArrayNode (Opcode() == Op_StoreL && st->Opcode() == Op_StoreI) || // expanded ClearArrayNode
(is_mismatched_access() || st->as_Store()->is_mismatched_access()),
"no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]); "no mismatched stores, except on raw memory: %s %s", NodeClassNames[Opcode()], NodeClassNames[st->Opcode()]);
if (st->in(MemNode::Address)->eqv_uncast(address) && if (st->in(MemNode::Address)->eqv_uncast(address) &&
@ -3213,6 +3221,9 @@ bool InitializeNode::detect_init_independence(Node* n, int& count) {
// within the initialized memory. // within the initialized memory.
intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) { intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) {
const int FAIL = 0; const int FAIL = 0;
if (st->is_unaligned_access()) {
return FAIL;
}
if (st->req() != MemNode::ValueIn + 1) if (st->req() != MemNode::ValueIn + 1)
return FAIL; // an inscrutable StoreNode (card mark?) return FAIL; // an inscrutable StoreNode (card mark?)
Node* ctl = st->in(MemNode::Control); Node* ctl = st->in(MemNode::Control);

View file

@ -39,11 +39,14 @@ class PhaseTransform;
//------------------------------MemNode---------------------------------------- //------------------------------MemNode----------------------------------------
// Load or Store, possibly throwing a NULL pointer exception // Load or Store, possibly throwing a NULL pointer exception
class MemNode : public Node { class MemNode : public Node {
private:
bool _unaligned_access; // Unaligned access from unsafe
bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
protected: protected:
#ifdef ASSERT #ifdef ASSERT
const TypePtr* _adr_type; // What kind of memory is being addressed? const TypePtr* _adr_type; // What kind of memory is being addressed?
#endif #endif
virtual uint size_of() const; // Size is bigger (ASSERT only) virtual uint size_of() const;
public: public:
enum { Control, // When is it safe to do this load? enum { Control, // When is it safe to do this load?
Memory, // Chunk of memory is being loaded from Memory, // Chunk of memory is being loaded from
@ -57,17 +60,17 @@ public:
} MemOrd; } MemOrd;
protected: protected:
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
: Node(c0,c1,c2 ) { : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) {
init_class_id(Class_Mem); init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();) debug_only(_adr_type=at; adr_type();)
} }
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
: Node(c0,c1,c2,c3) { : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
init_class_id(Class_Mem); init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();) debug_only(_adr_type=at; adr_type();)
} }
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
: Node(c0,c1,c2,c3,c4) { : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
init_class_id(Class_Mem); init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();) debug_only(_adr_type=at; adr_type();)
} }
@ -127,6 +130,11 @@ public:
// the given memory state? (The state may or may not be in(Memory).) // the given memory state? (The state may or may not be in(Memory).)
Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
void set_unaligned_access() { _unaligned_access = true; }
bool is_unaligned_access() const { return _unaligned_access; }
void set_mismatched_access() { _mismatched_access = true; }
bool is_mismatched_access() const { return _mismatched_access; }
#ifndef PRODUCT #ifndef PRODUCT
static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
virtual void dump_spec(outputStream *st) const; virtual void dump_spec(outputStream *st) const;

View file

@ -230,9 +230,7 @@ Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Convert to a bool (flipped) // Convert to a bool (flipped)
// Build int->bool conversion // Build int->bool conversion
#ifndef PRODUCT if (PrintOpto) { tty->print_cr("CMOV to I2B"); }
if( PrintOpto ) tty->print_cr("CMOV to I2B");
#endif
Node *n = new Conv2BNode( cmp->in(1) ); Node *n = new Conv2BNode( cmp->in(1) );
if( flip ) if( flip )
n = new XorINode( phase->transform(n), phase->intcon(1) ); n = new XorINode( phase->transform(n), phase->intcon(1) );

View file

@ -44,14 +44,14 @@ Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->
//------------------------------proj_out--------------------------------------- //------------------------------proj_out---------------------------------------
// Get a named projection // Get a named projection
ProjNode* MultiNode::proj_out(uint which_proj) const { ProjNode* MultiNode::proj_out(uint which_proj) const {
assert(Opcode() != Op_If || which_proj == (uint)true || which_proj == (uint)false, "must be 1 or 0"); assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || which_proj == (uint)true || which_proj == (uint)false, "must be 1 or 0");
assert(Opcode() != Op_If || outcnt() == 2, "bad if #1"); assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || outcnt() == 2, "bad if #1");
for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) { for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) {
Node *p = fast_out(i); Node *p = fast_out(i);
if (p->is_Proj()) { if (p->is_Proj()) {
ProjNode *proj = p->as_Proj(); ProjNode *proj = p->as_Proj();
if (proj->_con == which_proj) { if (proj->_con == which_proj) {
assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2"); assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || proj->Opcode() == (which_proj ? Op_IfTrue : Op_IfFalse), "bad if #2");
return proj; return proj;
} }
} else { } else {

View file

@ -125,6 +125,7 @@ class PhaseValues;
class PhiNode; class PhiNode;
class Pipeline; class Pipeline;
class ProjNode; class ProjNode;
class RangeCheckNode;
class RegMask; class RegMask;
class RegionNode; class RegionNode;
class RootNode; class RootNode;
@ -584,6 +585,7 @@ public:
DEFINE_CLASS_ID(Jump, PCTable, 1) DEFINE_CLASS_ID(Jump, PCTable, 1)
DEFINE_CLASS_ID(If, MultiBranch, 1) DEFINE_CLASS_ID(If, MultiBranch, 1)
DEFINE_CLASS_ID(CountedLoopEnd, If, 0) DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
DEFINE_CLASS_ID(RangeCheck, If, 1)
DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
DEFINE_CLASS_ID(Start, Multi, 2) DEFINE_CLASS_ID(Start, Multi, 2)
DEFINE_CLASS_ID(MemBar, Multi, 3) DEFINE_CLASS_ID(MemBar, Multi, 3)
@ -758,6 +760,7 @@ public:
DEFINE_CLASS_QUERY(FastLock) DEFINE_CLASS_QUERY(FastLock)
DEFINE_CLASS_QUERY(FastUnlock) DEFINE_CLASS_QUERY(FastUnlock)
DEFINE_CLASS_QUERY(If) DEFINE_CLASS_QUERY(If)
DEFINE_CLASS_QUERY(RangeCheck)
DEFINE_CLASS_QUERY(IfFalse) DEFINE_CLASS_QUERY(IfFalse)
DEFINE_CLASS_QUERY(IfTrue) DEFINE_CLASS_QUERY(IfTrue)
DEFINE_CLASS_QUERY(Initialize) DEFINE_CLASS_QUERY(Initialize)

View file

@ -91,13 +91,10 @@ void Compile::Output() {
} }
// Break before main entry point // Break before main entry point
if( (_method && C->directive()->BreakAtExecuteOption) if ((_method && C->directive()->BreakAtExecuteOption) ||
#ifndef PRODUCT (OptoBreakpoint && is_method_compilation()) ||
||(OptoBreakpoint && is_method_compilation()) (OptoBreakpointOSR && is_osr_compilation()) ||
||(OptoBreakpointOSR && is_osr_compilation()) (OptoBreakpointC2R && !_method) ) {
||(OptoBreakpointC2R && !_method)
#endif
) {
// checking for _method means that OptoBreakpoint does not apply to // checking for _method means that OptoBreakpoint does not apply to
// runtime stubs or frame converters // runtime stubs or frame converters
_cfg->insert( entry, 1, new MachBreakpointNode() ); _cfg->insert( entry, 1, new MachBreakpointNode() );

View file

@ -958,12 +958,10 @@ void Parse::do_exits() {
PPC64_ONLY(wrote_volatile() ||) PPC64_ONLY(wrote_volatile() ||)
(AlwaysSafeConstructors && wrote_fields()))) { (AlwaysSafeConstructors && wrote_fields()))) {
_exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final()); _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
#ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) { if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); method()->print_name();
tty->print_cr(" writes finals and needs a memory barrier"); tty->print_cr(" writes finals and needs a memory barrier");
} }
#endif
} }
// Any method can write a @Stable field; insert memory barriers after // Any method can write a @Stable field; insert memory barriers after
@ -971,12 +969,10 @@ void Parse::do_exits() {
// barrier there. // barrier there.
if (wrote_stable()) { if (wrote_stable()) {
_exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final()); _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
#ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) { if (PrintOpto && (Verbose || WizardMode)) {
method()->print_name(); method()->print_name();
tty->print_cr(" writes @Stable and needs a memory barrier"); tty->print_cr(" writes @Stable and needs a memory barrier");
} }
#endif
} }
for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) { for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {

Some files were not shown because too many files have changed in this diff Show more