This commit is contained in:
Vladimir Kozlov 2012-06-14 14:59:52 -07:00
commit 7dd9d23eb1
51 changed files with 609 additions and 567 deletions

View file

@ -54,72 +54,72 @@ jprt.sync.push=false
# Define the Solaris platforms we want for the various releases # Define the Solaris platforms we want for the various releases
jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
jprt.my.solaris.sparc.jdk7u4=${jprt.my.solaris.sparc.jdk7} jprt.my.solaris.sparc.jdk7u6=${jprt.my.solaris.sparc.jdk7}
jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}} jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
jprt.my.solaris.sparcv9.jdk7u4=${jprt.my.solaris.sparcv9.jdk7} jprt.my.solaris.sparcv9.jdk7u6=${jprt.my.solaris.sparcv9.jdk7}
jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}} jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
jprt.my.solaris.i586.jdk8=solaris_i586_5.10 jprt.my.solaris.i586.jdk8=solaris_i586_5.10
jprt.my.solaris.i586.jdk7=solaris_i586_5.10 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
jprt.my.solaris.i586.jdk7u4=${jprt.my.solaris.i586.jdk7} jprt.my.solaris.i586.jdk7u6=${jprt.my.solaris.i586.jdk7}
jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}} jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
jprt.my.solaris.x64.jdk8=solaris_x64_5.10 jprt.my.solaris.x64.jdk8=solaris_x64_5.10
jprt.my.solaris.x64.jdk7=solaris_x64_5.10 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
jprt.my.solaris.x64.jdk7u4=${jprt.my.solaris.x64.jdk7} jprt.my.solaris.x64.jdk7u6=${jprt.my.solaris.x64.jdk7}
jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}} jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
jprt.my.linux.i586.jdk8=linux_i586_2.6 jprt.my.linux.i586.jdk8=linux_i586_2.6
jprt.my.linux.i586.jdk7=linux_i586_2.6 jprt.my.linux.i586.jdk7=linux_i586_2.6
jprt.my.linux.i586.jdk7u4=${jprt.my.linux.i586.jdk7} jprt.my.linux.i586.jdk7u6=${jprt.my.linux.i586.jdk7}
jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}} jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
jprt.my.linux.x64.jdk8=linux_x64_2.6 jprt.my.linux.x64.jdk8=linux_x64_2.6
jprt.my.linux.x64.jdk7=linux_x64_2.6 jprt.my.linux.x64.jdk7=linux_x64_2.6
jprt.my.linux.x64.jdk7u4=${jprt.my.linux.x64.jdk7} jprt.my.linux.x64.jdk7u6=${jprt.my.linux.x64.jdk7}
jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}} jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
jprt.my.linux.ppc.jdk8=linux_ppc_2.6 jprt.my.linux.ppc.jdk8=linux_ppc_2.6
jprt.my.linux.ppc.jdk7=linux_ppc_2.6 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
jprt.my.linux.ppc.jdk7u4=${jprt.my.linux.ppc.jdk7} jprt.my.linux.ppc.jdk7u6=${jprt.my.linux.ppc.jdk7}
jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}} jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
jprt.my.linux.ppcv2.jdk7u4=${jprt.my.linux.ppcv2.jdk7} jprt.my.linux.ppcv2.jdk7u6=${jprt.my.linux.ppcv2.jdk7}
jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}} jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
jprt.my.linux.ppcsflt.jdk7u4=${jprt.my.linux.ppcsflt.jdk7} jprt.my.linux.ppcsflt.jdk7u6=${jprt.my.linux.ppcsflt.jdk7}
jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}} jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
jprt.my.linux.armvfp.jdk7u4=${jprt.my.linux.armvfp.jdk7} jprt.my.linux.armvfp.jdk7u6=${jprt.my.linux.armvfp.jdk7}
jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}} jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
jprt.my.linux.armsflt.jdk7u4=${jprt.my.linux.armsflt.jdk7} jprt.my.linux.armsflt.jdk7u6=${jprt.my.linux.armsflt.jdk7}
jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}} jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
jprt.my.macosx.x64.jdk8=macosx_x64_10.7 jprt.my.macosx.x64.jdk8=macosx_x64_10.7
jprt.my.macosx.x64.jdk7=macosx_x64_10.7 jprt.my.macosx.x64.jdk7=macosx_x64_10.7
jprt.my.macosx.x64.jdk7u4=${jprt.my.macosx.x64.jdk7} jprt.my.macosx.x64.jdk7u6=${jprt.my.macosx.x64.jdk7}
jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}} jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
jprt.my.windows.i586.jdk8=windows_i586_5.1 jprt.my.windows.i586.jdk8=windows_i586_5.1
jprt.my.windows.i586.jdk7=windows_i586_5.1 jprt.my.windows.i586.jdk7=windows_i586_5.1
jprt.my.windows.i586.jdk7u4=${jprt.my.windows.i586.jdk7} jprt.my.windows.i586.jdk7u6=${jprt.my.windows.i586.jdk7}
jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}} jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
jprt.my.windows.x64.jdk8=windows_x64_5.2 jprt.my.windows.x64.jdk8=windows_x64_5.2
jprt.my.windows.x64.jdk7=windows_x64_5.2 jprt.my.windows.x64.jdk7=windows_x64_5.2
jprt.my.windows.x64.jdk7u4=${jprt.my.windows.x64.jdk7} jprt.my.windows.x64.jdk7u6=${jprt.my.windows.x64.jdk7}
jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}} jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
# Standard list of jprt build targets for this source tree # Standard list of jprt build targets for this source tree
@ -154,7 +154,7 @@ jprt.build.targets.all=${jprt.build.targets.standard}, \
jprt.build.targets.jdk8=${jprt.build.targets.all} jprt.build.targets.jdk8=${jprt.build.targets.all}
jprt.build.targets.jdk7=${jprt.build.targets.all} jprt.build.targets.jdk7=${jprt.build.targets.all}
jprt.build.targets.jdk7u4=${jprt.build.targets.all} jprt.build.targets.jdk7u6=${jprt.build.targets.all}
jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}} jprt.build.targets=${jprt.build.targets.${jprt.tools.default.release}}
# Subset lists of test targets for this source tree # Subset lists of test targets for this source tree
@ -447,7 +447,7 @@ jprt.test.targets.embedded= \
jprt.test.targets.jdk8=${jprt.test.targets.standard} jprt.test.targets.jdk8=${jprt.test.targets.standard}
jprt.test.targets.jdk7=${jprt.test.targets.standard} jprt.test.targets.jdk7=${jprt.test.targets.standard}
jprt.test.targets.jdk7u4=${jprt.test.targets.jdk7} jprt.test.targets.jdk7u6=${jprt.test.targets.jdk7}
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}} jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
# The default test/Makefile targets that should be run # The default test/Makefile targets that should be run
@ -507,7 +507,7 @@ jprt.make.rule.test.targets.embedded = \
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u4=${jprt.make.rule.test.targets.jdk7} jprt.make.rule.test.targets.jdk7u6=${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}} jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
# 7155453: Work-around to prevent popups on OSX from blocking test completion # 7155453: Work-around to prevent popups on OSX from blocking test completion

View file

@ -644,30 +644,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
} }
void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
LIRItem obj (x->argument_at(0), this); // AtomicLong object
LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
obj.load_item();
cmp_value.load_item();
new_value.load_item();
// generate compare-and-swap and produce zero condition if swap occurs
int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
LIR_Opr addr = FrameMap::O7_opr;
__ add(obj.result(), LIR_OprFact::intConst(value_offset), addr);
LIR_Opr t1 = FrameMap::G1_opr; // temp for 64-bit value
LIR_Opr t2 = FrameMap::G3_opr; // temp for 64-bit value
__ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
// generate conditional move of boolean result
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type"); assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object LIRItem obj (x->argument_at(0), this); // object
@ -989,10 +965,10 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
// cannot re-use same xhandlers for multiple CodeEmitInfos, so // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers. This is handled transparently in other // clone all handlers (NOTE: Usually this is handled transparently
// places by the CodeEmitInfo cloning logic but is handled // by the CodeEmitInfo cloning logic in CodeStub constructors but
// specially here because a stub isn't being used. // is done explicitly here because a stub isn't being used).
x->set_exception_handlers(new XHandlers(x->exception_handlers())); x->set_exception_handlers(new XHandlers(x->exception_handlers()));
} }
CodeEmitInfo* info = state_for(x, x->state()); CodeEmitInfo* info = state_for(x, x->state());

View file

@ -827,7 +827,6 @@ void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int te
// a Load // a Load
// inputs are (0:control, 1:memory, 2:address) // inputs are (0:control, 1:memory, 2:address)
if (!(n->ideal_Opcode()==ld_op) && // Following are special cases if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
!(n->ideal_Opcode()==Op_LoadLLocked && ld_op==Op_LoadI) &&
!(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
!(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
!(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
@ -7306,17 +7305,6 @@ instruct loadPLocked(iRegP dst, memory mem) %{
ins_pipe(iload_mem); ins_pipe(iload_mem);
%} %}
// LoadL-locked. Same as a regular long load when used with a compare-swap
instruct loadLLocked(iRegL dst, memory mem) %{
match(Set dst (LoadLLocked mem));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "LDX $mem,$dst\t! long" %}
opcode(Assembler::ldx_op3);
ins_encode(simple_form3_mem_reg( mem, dst ) );
ins_pipe(iload_mem);
%}
instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
effect( KILL newval ); effect( KILL newval );

View file

@ -6927,21 +6927,42 @@ void MacroAssembler::pow_exp_core_encoding() {
addptr(rsp,sizeof(jdouble)); addptr(rsp,sizeof(jdouble));
} }
void MacroAssembler::increase_precision() {
subptr(rsp, BytesPerWord);
fnstcw(Address(rsp, 0));
movl(rax, Address(rsp, 0));
orl(rax, 0x300);
push(rax);
fldcw(Address(rsp, 0));
pop(rax);
}
void MacroAssembler::restore_precision() {
fldcw(Address(rsp, 0));
addptr(rsp, BytesPerWord);
}
void MacroAssembler::fast_pow() { void MacroAssembler::fast_pow() {
// computes X^Y = 2^(Y * log2(X)) // computes X^Y = 2^(Y * log2(X))
// if fast computation is not possible, result is NaN. Requires // if fast computation is not possible, result is NaN. Requires
// fallback from user of this macro. // fallback from user of this macro.
// increase precision for intermediate steps of the computation
increase_precision();
fyl2x(); // Stack: (Y*log2(X)) ... fyl2x(); // Stack: (Y*log2(X)) ...
pow_exp_core_encoding(); // Stack: exp(X) ... pow_exp_core_encoding(); // Stack: exp(X) ...
restore_precision();
} }
void MacroAssembler::fast_exp() { void MacroAssembler::fast_exp() {
// computes exp(X) = 2^(X * log2(e)) // computes exp(X) = 2^(X * log2(e))
// if fast computation is not possible, result is NaN. Requires // if fast computation is not possible, result is NaN. Requires
// fallback from user of this macro. // fallback from user of this macro.
// increase precision for intermediate steps of the computation
increase_precision();
fldl2e(); // Stack: log2(e) X ... fldl2e(); // Stack: log2(e) X ...
fmulp(1); // Stack: (X*log2(e)) ... fmulp(1); // Stack: (X*log2(e)) ...
pow_exp_core_encoding(); // Stack: exp(X) ... pow_exp_core_encoding(); // Stack: exp(X) ...
restore_precision();
} }
void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) { void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {

View file

@ -2395,6 +2395,8 @@ class MacroAssembler: public Assembler {
// runtime call. // runtime call.
void fast_pow(); void fast_pow();
void fast_exp(); void fast_exp();
void increase_precision();
void restore_precision();
// computes exp(x). Fallback to runtime call included. // computes exp(x). Fallback to runtime call included.
void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); } void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); }

View file

@ -2673,7 +2673,7 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
#endif // _LP64 #endif // _LP64
} }
} else { } else {
ShouldNotReachHere(); fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
} }
// cpu register - address // cpu register - address
} else if (opr2->is_address()) { } else if (opr2->is_address()) {

View file

@ -718,35 +718,6 @@ void LIRGenerator::do_CompareOp(CompareOp* x) {
} }
void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
assert(x->number_of_arguments() == 3, "wrong type");
LIRItem obj (x->argument_at(0), this); // AtomicLong object
LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
// compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
cmp_value.load_item_force(FrameMap::long0_opr);
// new value must be in rcx,ebx (hi,lo)
new_value.load_item_force(FrameMap::long1_opr);
// object pointer register is overwritten with field address
obj.load_item();
// generate compare-and-swap; produces zero condition if swap occurs
int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
LIR_Opr addr = new_pointer_register();
__ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr);
LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed
LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed
__ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
// generate conditional move of boolean result
LIR_Opr result = rlock_result(x);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
}
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
assert(x->number_of_arguments() == 4, "wrong type"); assert(x->number_of_arguments() == 4, "wrong type");
LIRItem obj (x->argument_at(0), this); // object LIRItem obj (x->argument_at(0), this); // object
@ -1116,10 +1087,10 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
if (!x->klass()->is_loaded() || PatchALot) { if (!x->klass()->is_loaded() || PatchALot) {
patching_info = state_for(x, x->state_before()); patching_info = state_for(x, x->state_before());
// cannot re-use same xhandlers for multiple CodeEmitInfos, so // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
// clone all handlers. This is handled transparently in other // clone all handlers (NOTE: Usually this is handled transparently
// places by the CodeEmitInfo cloning logic but is handled // by the CodeEmitInfo cloning logic in CodeStub constructors but
// specially here because a stub isn't being used. // is done explicitly here because a stub isn't being used).
x->set_exception_handlers(new XHandlers(x->exception_handlers())); x->set_exception_handlers(new XHandlers(x->exception_handlers()));
} }
CodeEmitInfo* info = state_for(x, x->state()); CodeEmitInfo* info = state_for(x, x->state());

View file

@ -5555,8 +5555,9 @@ instruct bytes_reverse_long(eRegL dst) %{
ins_pipe( ialu_reg_reg); ins_pipe( ialu_reg_reg);
%} %}
instruct bytes_reverse_unsigned_short(eRegI dst) %{ instruct bytes_reverse_unsigned_short(eRegI dst, eFlagsReg cr) %{
match(Set dst (ReverseBytesUS dst)); match(Set dst (ReverseBytesUS dst));
effect(KILL cr);
format %{ "BSWAP $dst\n\t" format %{ "BSWAP $dst\n\t"
"SHR $dst,16\n\t" %} "SHR $dst,16\n\t" %}
@ -5567,8 +5568,9 @@ instruct bytes_reverse_unsigned_short(eRegI dst) %{
ins_pipe( ialu_reg ); ins_pipe( ialu_reg );
%} %}
instruct bytes_reverse_short(eRegI dst) %{ instruct bytes_reverse_short(eRegI dst, eFlagsReg cr) %{
match(Set dst (ReverseBytesS dst)); match(Set dst (ReverseBytesS dst));
effect(KILL cr);
format %{ "BSWAP $dst\n\t" format %{ "BSWAP $dst\n\t"
"SAR $dst,16\n\t" %} "SAR $dst,16\n\t" %}
@ -5729,9 +5731,10 @@ instruct countTrailingZerosL(eRegI dst, eRegL src, eFlagsReg cr) %{
//---------- Population Count Instructions ------------------------------------- //---------- Population Count Instructions -------------------------------------
instruct popCountI(eRegI dst, eRegI src) %{ instruct popCountI(eRegI dst, eRegI src, eFlagsReg cr) %{
predicate(UsePopCountInstruction); predicate(UsePopCountInstruction);
match(Set dst (PopCountI src)); match(Set dst (PopCountI src));
effect(KILL cr);
format %{ "POPCNT $dst, $src" %} format %{ "POPCNT $dst, $src" %}
ins_encode %{ ins_encode %{
@ -5740,9 +5743,10 @@ instruct popCountI(eRegI dst, eRegI src) %{
ins_pipe(ialu_reg); ins_pipe(ialu_reg);
%} %}
instruct popCountI_mem(eRegI dst, memory mem) %{ instruct popCountI_mem(eRegI dst, memory mem, eFlagsReg cr) %{
predicate(UsePopCountInstruction); predicate(UsePopCountInstruction);
match(Set dst (PopCountI (LoadI mem))); match(Set dst (PopCountI (LoadI mem)));
effect(KILL cr);
format %{ "POPCNT $dst, $mem" %} format %{ "POPCNT $dst, $mem" %}
ins_encode %{ ins_encode %{
@ -7796,50 +7800,6 @@ instruct loadPLocked(eRegP dst, memory mem) %{
ins_pipe( ialu_reg_mem ); ins_pipe( ialu_reg_mem );
%} %}
// LoadLong-locked - same as a volatile long load when used with compare-swap
instruct loadLLocked(stackSlotL dst, memory mem) %{
predicate(UseSSE<=1);
match(Set dst (LoadLLocked mem));
ins_cost(200);
format %{ "FILD $mem\t# Atomic volatile long load\n\t"
"FISTp $dst" %}
ins_encode(enc_loadL_volatile(mem,dst));
ins_pipe( fpu_reg_mem );
%}
instruct loadLX_Locked(stackSlotL dst, memory mem, regD tmp) %{
predicate(UseSSE>=2);
match(Set dst (LoadLLocked mem));
effect(TEMP tmp);
ins_cost(180);
format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t"
"MOVSD $dst,$tmp" %}
ins_encode %{
__ movdbl($tmp$$XMMRegister, $mem$$Address);
__ movdbl(Address(rsp, $dst$$disp), $tmp$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
instruct loadLX_reg_Locked(eRegL dst, memory mem, regD tmp) %{
predicate(UseSSE>=2);
match(Set dst (LoadLLocked mem));
effect(TEMP tmp);
ins_cost(160);
format %{ "MOVSD $tmp,$mem\t# Atomic volatile long load\n\t"
"MOVD $dst.lo,$tmp\n\t"
"PSRLQ $tmp,32\n\t"
"MOVD $dst.hi,$tmp" %}
ins_encode %{
__ movdbl($tmp$$XMMRegister, $mem$$Address);
__ movdl($dst$$Register, $tmp$$XMMRegister);
__ psrlq($tmp$$XMMRegister, 32);
__ movdl(HIGH_FROM_LOW($dst$$Register), $tmp$$XMMRegister);
%}
ins_pipe( pipe_slow );
%}
// Conditional-store of the updated heap-top. // Conditional-store of the updated heap-top.
// Used during allocation of the shared heap. // Used during allocation of the shared heap.
// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel. // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.

View file

@ -6417,14 +6417,14 @@ instruct bytes_reverse_long(rRegL dst) %{
match(Set dst (ReverseBytesL dst)); match(Set dst (ReverseBytesL dst));
format %{ "bswapq $dst" %} format %{ "bswapq $dst" %}
opcode(0x0F, 0xC8); /* Opcode 0F /C8 */ opcode(0x0F, 0xC8); /* Opcode 0F /C8 */
ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) ); ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) );
ins_pipe( ialu_reg); ins_pipe( ialu_reg);
%} %}
instruct bytes_reverse_unsigned_short(rRegI dst) %{ instruct bytes_reverse_unsigned_short(rRegI dst, rFlagsReg cr) %{
match(Set dst (ReverseBytesUS dst)); match(Set dst (ReverseBytesUS dst));
effect(KILL cr);
format %{ "bswapl $dst\n\t" format %{ "bswapl $dst\n\t"
"shrl $dst,16\n\t" %} "shrl $dst,16\n\t" %}
@ -6435,8 +6435,9 @@ instruct bytes_reverse_unsigned_short(rRegI dst) %{
ins_pipe( ialu_reg ); ins_pipe( ialu_reg );
%} %}
instruct bytes_reverse_short(rRegI dst) %{ instruct bytes_reverse_short(rRegI dst, rFlagsReg cr) %{
match(Set dst (ReverseBytesS dst)); match(Set dst (ReverseBytesS dst));
effect(KILL cr);
format %{ "bswapl $dst\n\t" format %{ "bswapl $dst\n\t"
"sar $dst,16\n\t" %} "sar $dst,16\n\t" %}
@ -6564,9 +6565,10 @@ instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
//---------- Population Count Instructions ------------------------------------- //---------- Population Count Instructions -------------------------------------
instruct popCountI(rRegI dst, rRegI src) %{ instruct popCountI(rRegI dst, rRegI src, rFlagsReg cr) %{
predicate(UsePopCountInstruction); predicate(UsePopCountInstruction);
match(Set dst (PopCountI src)); match(Set dst (PopCountI src));
effect(KILL cr);
format %{ "popcnt $dst, $src" %} format %{ "popcnt $dst, $src" %}
ins_encode %{ ins_encode %{
@ -6575,9 +6577,10 @@ instruct popCountI(rRegI dst, rRegI src) %{
ins_pipe(ialu_reg); ins_pipe(ialu_reg);
%} %}
instruct popCountI_mem(rRegI dst, memory mem) %{ instruct popCountI_mem(rRegI dst, memory mem, rFlagsReg cr) %{
predicate(UsePopCountInstruction); predicate(UsePopCountInstruction);
match(Set dst (PopCountI (LoadI mem))); match(Set dst (PopCountI (LoadI mem)));
effect(KILL cr);
format %{ "popcnt $dst, $mem" %} format %{ "popcnt $dst, $mem" %}
ins_encode %{ ins_encode %{
@ -6587,9 +6590,10 @@ instruct popCountI_mem(rRegI dst, memory mem) %{
%} %}
// Note: Long.bitCount(long) returns an int. // Note: Long.bitCount(long) returns an int.
instruct popCountL(rRegI dst, rRegL src) %{ instruct popCountL(rRegI dst, rRegL src, rFlagsReg cr) %{
predicate(UsePopCountInstruction); predicate(UsePopCountInstruction);
match(Set dst (PopCountL src)); match(Set dst (PopCountL src));
effect(KILL cr);
format %{ "popcnt $dst, $src" %} format %{ "popcnt $dst, $src" %}
ins_encode %{ ins_encode %{
@ -6599,9 +6603,10 @@ instruct popCountL(rRegI dst, rRegL src) %{
%} %}
// Note: Long.bitCount(long) returns an int. // Note: Long.bitCount(long) returns an int.
instruct popCountL_mem(rRegI dst, memory mem) %{ instruct popCountL_mem(rRegI dst, memory mem, rFlagsReg cr) %{
predicate(UsePopCountInstruction); predicate(UsePopCountInstruction);
match(Set dst (PopCountL (LoadL mem))); match(Set dst (PopCountL (LoadL mem)));
effect(KILL cr);
format %{ "popcnt $dst, $mem" %} format %{ "popcnt $dst, $mem" %}
ins_encode %{ ins_encode %{
@ -7492,18 +7497,6 @@ instruct loadPLocked(rRegP dst, memory mem)
ins_pipe(ialu_reg_mem); // XXX ins_pipe(ialu_reg_mem); // XXX
%} %}
// LoadL-locked - same as a regular LoadL when used with compare-swap
instruct loadLLocked(rRegL dst, memory mem)
%{
match(Set dst (LoadLLocked mem));
ins_cost(125); // XXX
format %{ "movq $dst, $mem\t# long locked" %}
opcode(0x8B);
ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem));
ins_pipe(ialu_reg_mem); // XXX
%}
// Conditional-store of the updated heap-top. // Conditional-store of the updated heap-top.
// Used during allocation of the shared heap. // Used during allocation of the shared heap.
// Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel. // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel.

View file

@ -261,7 +261,6 @@ Form::DataType Form::is_load_from_memory(const char *opType) const {
if( strcmp(opType,"LoadL")==0 ) return Form::idealL; if( strcmp(opType,"LoadL")==0 ) return Form::idealL;
if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL; if( strcmp(opType,"LoadL_unaligned")==0 ) return Form::idealL;
if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP; if( strcmp(opType,"LoadPLocked")==0 ) return Form::idealP;
if( strcmp(opType,"LoadLLocked")==0 ) return Form::idealL;
if( strcmp(opType,"LoadP")==0 ) return Form::idealP; if( strcmp(opType,"LoadP")==0 ) return Form::idealP;
if( strcmp(opType,"LoadN")==0 ) return Form::idealN; if( strcmp(opType,"LoadN")==0 ) return Form::idealN;
if( strcmp(opType,"LoadRange")==0 ) return Form::idealI; if( strcmp(opType,"LoadRange")==0 ) return Form::idealI;

View file

@ -3387,7 +3387,7 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
"Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" , "Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
"Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S", "Load8B" ,"Load4B" ,"Load8C" ,"Load4C" ,"Load2C" ,"Load8S", "Load4S","Load2S",
"LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned", "LoadRange", "LoadKlass", "LoadNKlass", "LoadL_unaligned", "LoadD_unaligned",
"LoadPLocked", "LoadLLocked", "LoadPLocked",
"StorePConditional", "StoreIConditional", "StoreLConditional", "StorePConditional", "StoreIConditional", "StoreLConditional",
"CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN", "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
"StoreCM", "StoreCM",

View file

@ -42,6 +42,11 @@ void Canonicalizer::set_canonical(Value x) {
// the instruction stream (because the instruction list is embedded // the instruction stream (because the instruction list is embedded
// in the instructions). // in the instructions).
if (canonical() != x) { if (canonical() != x) {
#ifndef PRODUCT
if (!x->has_printable_bci()) {
x->set_printable_bci(bci());
}
#endif
if (PrintCanonicalization) { if (PrintCanonicalization) {
PrintValueVisitor do_print_value; PrintValueVisitor do_print_value;
canonical()->input_values_do(&do_print_value); canonical()->input_values_do(&do_print_value);
@ -451,6 +456,28 @@ void Canonicalizer::do_Intrinsic (Intrinsic* x) {
} }
break; break;
} }
case vmIntrinsics::_isInstance : {
assert(x->number_of_arguments() == 2, "wrong type");
InstanceConstant* c = x->argument_at(0)->type()->as_InstanceConstant();
if (c != NULL && !c->value()->is_null_object()) {
// ciInstance::java_mirror_type() returns non-NULL only for Java mirrors
ciType* t = c->value()->as_instance()->java_mirror_type();
if (t->is_klass()) {
// substitute cls.isInstance(obj) of a constant Class into
// an InstantOf instruction
InstanceOf* i = new InstanceOf(t->as_klass(), x->argument_at(1), x->state_before());
set_canonical(i);
// and try to canonicalize even further
do_InstanceOf(i);
} else {
assert(t->is_primitive_type(), "should be a primitive type");
// cls.isInstance(obj) always returns false for primitive classes
set_constant(0);
}
}
break;
}
} }
} }
@ -677,8 +704,8 @@ void Canonicalizer::do_If(If* x) {
return; return;
} }
} }
set_canonical(canon);
set_bci(cmp->state_before()->bci()); set_bci(cmp->state_before()->bci());
set_canonical(canon);
} }
} }
} else if (l->as_InstanceOf() != NULL) { } else if (l->as_InstanceOf() != NULL) {

View file

@ -3170,6 +3170,7 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
break; break;
case vmIntrinsics::_getClass : case vmIntrinsics::_getClass :
case vmIntrinsics::_isInstance :
if (!InlineClassNatives) return false; if (!InlineClassNatives) return false;
preserves_state = true; preserves_state = true;
break; break;
@ -3194,13 +3195,6 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
preserves_state = true; preserves_state = true;
break; break;
// sun/misc/AtomicLong.attemptUpdate
case vmIntrinsics::_attemptUpdate :
if (!VM_Version::supports_cx8()) return false;
if (!InlineAtomicLong) return false;
preserves_state = true;
break;
// Use special nodes for Unsafe instructions so we can more easily // Use special nodes for Unsafe instructions so we can more easily
// perform an address-mode optimization on the raw variants // perform an address-mode optimization on the raw variants
case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false); case vmIntrinsics::_getObject : return append_unsafe_get_obj(callee, T_OBJECT, false);

View file

@ -302,8 +302,6 @@ class Instruction: public CompilationResourceObj {
void update_exception_state(ValueStack* state); void update_exception_state(ValueStack* state);
bool has_printable_bci() const { return NOT_PRODUCT(_printable_bci != -99) PRODUCT_ONLY(false); }
protected: protected:
void set_type(ValueType* type) { void set_type(ValueType* type) {
assert(type != NULL, "type must exist"); assert(type != NULL, "type must exist");
@ -392,8 +390,9 @@ class Instruction: public CompilationResourceObj {
// accessors // accessors
int id() const { return _id; } int id() const { return _id; }
#ifndef PRODUCT #ifndef PRODUCT
bool has_printable_bci() const { return _printable_bci != -99; }
int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; } int printable_bci() const { assert(has_printable_bci(), "_printable_bci should have been set"); return _printable_bci; }
void set_printable_bci(int bci) { NOT_PRODUCT(_printable_bci = bci;) } void set_printable_bci(int bci) { _printable_bci = bci; }
#endif #endif
int use_count() const { return _use_count; } int use_count() const { return _use_count; }
int pin_state() const { return _pin_state; } int pin_state() const { return _pin_state; }
@ -576,6 +575,7 @@ LEAF(Phi, Instruction)
, _block(b) , _block(b)
, _index(index) , _index(index)
{ {
NOT_PRODUCT(set_printable_bci(Value(b)->printable_bci()));
if (type->is_illegal()) { if (type->is_illegal()) {
make_illegal(); make_illegal();
} }
@ -631,7 +631,9 @@ LEAF(Local, Instruction)
: Instruction(type) : Instruction(type)
, _java_index(index) , _java_index(index)
, _declared_type(declared) , _declared_type(declared)
{} {
NOT_PRODUCT(set_printable_bci(-1));
}
// accessors // accessors
int java_index() const { return _java_index; } int java_index() const { return _java_index; }

View file

@ -1242,6 +1242,36 @@ void LIRGenerator::do_Reference_get(Intrinsic* x) {
NULL /* info */); NULL /* info */);
} }
// Example: clazz.isInstance(object)
void LIRGenerator::do_isInstance(Intrinsic* x) {
assert(x->number_of_arguments() == 2, "wrong type");
// TODO could try to substitute this node with an equivalent InstanceOf
// if clazz is known to be a constant Class. This will pick up newly found
// constants after HIR construction. I'll leave this to a future change.
// as a first cut, make a simple leaf call to runtime to stay platform independent.
// could follow the aastore example in a future change.
LIRItem clazz(x->argument_at(0), this);
LIRItem object(x->argument_at(1), this);
clazz.load_item();
object.load_item();
LIR_Opr result = rlock_result(x);
// need to perform null check on clazz
if (x->needs_null_check()) {
CodeEmitInfo* info = state_for(x);
__ null_check(clazz.result(), info);
}
LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
x->type(),
NULL); // NULL CodeEmitInfo results in a leaf call
__ move(call_result, result);
}
// Example: object.getClass () // Example: object.getClass ()
void LIRGenerator::do_getClass(Intrinsic* x) { void LIRGenerator::do_getClass(Intrinsic* x) {
assert(x->number_of_arguments() == 1, "wrong type"); assert(x->number_of_arguments() == 1, "wrong type");
@ -2777,31 +2807,29 @@ void LIRGenerator::do_Invoke(Invoke* x) {
int index = bcs.get_method_index(); int index = bcs.get_method_index();
size_t call_site_offset = cpcache->get_f1_offset(index); size_t call_site_offset = cpcache->get_f1_offset(index);
// Load CallSite object from constant pool cache.
LIR_Opr call_site = new_register(objectType);
__ oop2reg(cpcache->constant_encoding(), call_site);
__ move_wide(new LIR_Address(call_site, call_site_offset, T_OBJECT), call_site);
// If this invokedynamic call site hasn't been executed yet in // If this invokedynamic call site hasn't been executed yet in
// the interpreter, the CallSite object in the constant pool // the interpreter, the CallSite object in the constant pool
// cache is still null and we need to deoptimize. // cache is still null and we need to deoptimize.
if (cpcache->is_f1_null_at(index)) { if (cpcache->is_f1_null_at(index)) {
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so // Only deoptimize if the CallSite object is still null; we don't
// clone all handlers. This is handled transparently in other // recompile methods in C1 after deoptimization so this call site
// places by the CodeEmitInfo cloning logic but is handled // might be resolved the next time we execute it after OSR.
// specially here because a stub isn't being used.
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info); DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
__ jump(deopt_stub); __ cmp(lir_cond_equal, call_site, LIR_OprFact::oopConst(NULL));
__ branch(lir_cond_equal, T_OBJECT, deopt_stub);
} }
// Use the receiver register for the synthetic MethodHandle // Use the receiver register for the synthetic MethodHandle
// argument. // argument.
receiver = LIR_Assembler::receiverOpr(); receiver = LIR_Assembler::receiverOpr();
LIR_Opr tmp = new_register(objectType);
// Load CallSite object from constant pool cache.
__ oop2reg(cpcache->constant_encoding(), tmp);
__ move_wide(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
// Load target MethodHandle from CallSite object. // Load target MethodHandle from CallSite object.
__ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); __ load(new LIR_Address(call_site, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
__ call_dynamic(target, receiver, result_register, __ call_dynamic(target, receiver, result_register,
SharedRuntime::get_resolve_opt_virtual_call_stub(), SharedRuntime::get_resolve_opt_virtual_call_stub(),
@ -2809,7 +2837,7 @@ void LIRGenerator::do_Invoke(Invoke* x) {
break; break;
} }
default: default:
ShouldNotReachHere(); fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
break; break;
} }
@ -2951,6 +2979,7 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
break; break;
case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
case vmIntrinsics::_isInstance: do_isInstance(x); break;
case vmIntrinsics::_getClass: do_getClass(x); break; case vmIntrinsics::_getClass: do_getClass(x); break;
case vmIntrinsics::_currentThread: do_currentThread(x); break; case vmIntrinsics::_currentThread: do_currentThread(x); break;
@ -2978,11 +3007,6 @@ void LIRGenerator::do_Intrinsic(Intrinsic* x) {
do_CompareAndSwap(x, longType); do_CompareAndSwap(x, longType);
break; break;
// sun.misc.AtomicLongCSImpl.attemptUpdate
case vmIntrinsics::_attemptUpdate:
do_AttemptUpdate(x);
break;
case vmIntrinsics::_Reference_get: case vmIntrinsics::_Reference_get:
do_Reference_get(x); do_Reference_get(x);
break; break;
@ -3223,4 +3247,3 @@ void LIRGenerator::do_MemBar(MemBar* x) {
} }
} }
} }

View file

@ -238,12 +238,12 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr getThreadPointer(); LIR_Opr getThreadPointer();
void do_RegisterFinalizer(Intrinsic* x); void do_RegisterFinalizer(Intrinsic* x);
void do_isInstance(Intrinsic* x);
void do_getClass(Intrinsic* x); void do_getClass(Intrinsic* x);
void do_currentThread(Intrinsic* x); void do_currentThread(Intrinsic* x);
void do_MathIntrinsic(Intrinsic* x); void do_MathIntrinsic(Intrinsic* x);
void do_ArrayCopy(Intrinsic* x); void do_ArrayCopy(Intrinsic* x);
void do_CompareAndSwap(Intrinsic* x, ValueType* type); void do_CompareAndSwap(Intrinsic* x, ValueType* type);
void do_AttemptUpdate(Intrinsic* x);
void do_NIOCheckIndex(Intrinsic* x); void do_NIOCheckIndex(Intrinsic* x);
void do_FPIntrinsics(Intrinsic* x); void do_FPIntrinsics(Intrinsic* x);
void do_Reference_get(Intrinsic* x); void do_Reference_get(Intrinsic* x);

View file

@ -294,6 +294,7 @@ const char* Runtime1::name_for_address(address entry) {
FUNCTION_CASE(entry, SharedRuntime::lrem); FUNCTION_CASE(entry, SharedRuntime::lrem);
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry); FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit); FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
FUNCTION_CASE(entry, is_instance_of);
FUNCTION_CASE(entry, trace_block_entry); FUNCTION_CASE(entry, trace_block_entry);
#ifdef TRACE_HAVE_INTRINSICS #ifdef TRACE_HAVE_INTRINSICS
FUNCTION_CASE(entry, TRACE_TIME_METHOD); FUNCTION_CASE(entry, TRACE_TIME_METHOD);
@ -1270,6 +1271,19 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
JRT_END JRT_END
JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
// had to return int instead of bool, otherwise there may be a mismatch
// between the C calling convention and the Java one.
// e.g., on x86, GCC may clear only %al when returning a bool false, but
// JVM takes the whole %eax as the return value, which may misinterpret
// the return value as a boolean true.
assert(mirror != NULL, "should null-check on mirror before calling");
klassOop k = java_lang_Class::as_klassOop(mirror);
return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
JRT_END
#ifndef PRODUCT #ifndef PRODUCT
void Runtime1::print_statistics() { void Runtime1::print_statistics() {
tty->print_cr("C1 Runtime statistics:"); tty->print_cr("C1 Runtime statistics:");

View file

@ -186,6 +186,7 @@ class Runtime1: public AllStatic {
static int arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length); static int arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length);
static void primitive_arraycopy(HeapWord* src, HeapWord* dst, int length); static void primitive_arraycopy(HeapWord* src, HeapWord* dst, int length);
static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length); static void oop_arraycopy(HeapWord* src, HeapWord* dst, int length);
static int is_instance_of(oopDesc* mirror, oopDesc* obj);
static void print_statistics() PRODUCT_RETURN; static void print_statistics() PRODUCT_RETURN;
}; };

View file

@ -141,8 +141,11 @@ class ValueNumberingVisitor: public InstructionVisitor {
// visitor functions // visitor functions
void do_StoreField (StoreField* x) { void do_StoreField (StoreField* x) {
if (x->is_init_point()) { if (x->is_init_point() || // putstatic is an initialization point so treat it as a wide kill
// putstatic is an initialization point so treat it as a wide kill // This is actually too strict and the JMM doesn't require
// this in all cases (e.g. load a; volatile store b; load a)
// but possible future optimizations might require this.
x->field()->is_volatile()) {
kill_memory(); kill_memory();
} else { } else {
kill_field(x->field()); kill_field(x->field());
@ -160,8 +163,8 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_Local (Local* x) { /* nothing to do */ } void do_Local (Local* x) { /* nothing to do */ }
void do_Constant (Constant* x) { /* nothing to do */ } void do_Constant (Constant* x) { /* nothing to do */ }
void do_LoadField (LoadField* x) { void do_LoadField (LoadField* x) {
if (x->is_init_point()) { if (x->is_init_point() || // getstatic is an initialization point so treat it as a wide kill
// getstatic is an initialization point so treat it as a wide kill x->field()->is_volatile()) { // the JMM requires this
kill_memory(); kill_memory();
} }
} }

View file

@ -2919,7 +2919,6 @@ int java_lang_AssertionStatusDirectives::packages_offset;
int java_lang_AssertionStatusDirectives::packageEnabled_offset; int java_lang_AssertionStatusDirectives::packageEnabled_offset;
int java_lang_AssertionStatusDirectives::deflt_offset; int java_lang_AssertionStatusDirectives::deflt_offset;
int java_nio_Buffer::_limit_offset; int java_nio_Buffer::_limit_offset;
int sun_misc_AtomicLongCSImpl::_value_offset;
int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0; int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
int sun_reflect_ConstantPool::_cp_oop_offset; int sun_reflect_ConstantPool::_cp_oop_offset;
int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset; int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
@ -2979,21 +2978,6 @@ void java_nio_Buffer::compute_offsets() {
compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature()); compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
} }
// Support for intrinsification of sun.misc.AtomicLongCSImpl.attemptUpdate
int sun_misc_AtomicLongCSImpl::value_offset() {
assert(SystemDictionary::AtomicLongCSImpl_klass() != NULL, "can't call this");
return _value_offset;
}
void sun_misc_AtomicLongCSImpl::compute_offsets() {
klassOop k = SystemDictionary::AtomicLongCSImpl_klass();
// If this class is not present, its value field offset won't be referenced.
if (k != NULL) {
compute_offset(_value_offset, k, vmSymbols::value_name(), vmSymbols::long_signature());
}
}
void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) { void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
if (_owner_offset != 0) return; if (_owner_offset != 0) return;
@ -3098,7 +3082,6 @@ void JavaClasses::compute_offsets() {
sun_reflect_ConstantPool::compute_offsets(); sun_reflect_ConstantPool::compute_offsets();
sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets(); sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
} }
sun_misc_AtomicLongCSImpl::compute_offsets();
// generated interpreter code wants to know about the offsets we just computed: // generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values(); AbstractAssembler::update_delayed_values();

View file

@ -1383,15 +1383,6 @@ class java_nio_Buffer: AllStatic {
static void compute_offsets(); static void compute_offsets();
}; };
class sun_misc_AtomicLongCSImpl: AllStatic {
private:
static int _value_offset;
public:
static int value_offset();
static void compute_offsets();
};
class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic { class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
private: private:
static int _owner_offset; static int _owner_offset;

View file

@ -170,9 +170,6 @@ class SymbolPropertyTable;
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
template(nio_Buffer_klass, java_nio_Buffer, Opt) \ template(nio_Buffer_klass, java_nio_Buffer, Opt) \
\ \
/* If this class isn't present, it won't be referenced. */ \
template(AtomicLongCSImpl_klass, sun_misc_AtomicLongCSImpl, Opt) \
\
template(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \ template(DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
\ \
template(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \ template(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt) \

View file

@ -722,15 +722,6 @@
/* java/lang/ref/Reference */ \ /* java/lang/ref/Reference */ \
do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \ do_intrinsic(_Reference_get, java_lang_ref_Reference, get_name, void_object_signature, F_R) \
\ \
\
do_class(sun_misc_AtomicLongCSImpl, "sun/misc/AtomicLongCSImpl") \
do_intrinsic(_get_AtomicLong, sun_misc_AtomicLongCSImpl, get_name, void_long_signature, F_R) \
/* (symbols get_name and void_long_signature defined above) */ \
\
do_intrinsic(_attemptUpdate, sun_misc_AtomicLongCSImpl, attemptUpdate_name, attemptUpdate_signature, F_R) \
do_name( attemptUpdate_name, "attemptUpdate") \
do_signature(attemptUpdate_signature, "(JJ)Z") \
\
/* support for sun.misc.Unsafe */ \ /* support for sun.misc.Unsafe */ \
do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \
\ \

View file

@ -844,6 +844,14 @@ nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, addr
int bci = method->bci_from(fr.interpreter_frame_bcp()); int bci = method->bci_from(fr.interpreter_frame_bcp());
nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false); nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
} }
#ifndef PRODUCT
if (TraceOnStackReplacement) {
if (nm != NULL) {
tty->print("OSR entry @ pc: " INTPTR_FORMAT ": ", nm->osr_entry());
nm->print();
}
}
#endif
return nm; return nm;
} }

View file

@ -70,11 +70,11 @@ address methodOopDesc::get_c2i_unverified_entry() {
return _adapter->get_c2i_unverified_entry(); return _adapter->get_c2i_unverified_entry();
} }
char* methodOopDesc::name_and_sig_as_C_string() { char* methodOopDesc::name_and_sig_as_C_string() const {
return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature()); return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature());
} }
char* methodOopDesc::name_and_sig_as_C_string(char* buf, int size) { char* methodOopDesc::name_and_sig_as_C_string(char* buf, int size) const {
return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature(), buf, size); return name_and_sig_as_C_string(Klass::cast(constants()->pool_holder()), name(), signature(), buf, size);
} }
@ -177,7 +177,8 @@ void methodOopDesc::mask_for(int bci, InterpreterOopMap* mask) {
int methodOopDesc::bci_from(address bcp) const { int methodOopDesc::bci_from(address bcp) const {
assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(), "bcp doesn't belong to this method"); assert(is_native() && bcp == code_base() || contains(bcp) || is_error_reported(),
err_msg("bcp doesn't belong to this method: bcp: " INTPTR_FORMAT ", method: %s", bcp, name_and_sig_as_C_string()));
return bcp - code_base(); return bcp - code_base();
} }

View file

@ -196,8 +196,8 @@ class methodOopDesc : public oopDesc {
// C string, for the purpose of providing more useful NoSuchMethodErrors // C string, for the purpose of providing more useful NoSuchMethodErrors
// and fatal error handling. The string is allocated in resource // and fatal error handling. The string is allocated in resource
// area if a buffer is not provided by the caller. // area if a buffer is not provided by the caller.
char* name_and_sig_as_C_string(); char* name_and_sig_as_C_string() const;
char* name_and_sig_as_C_string(char* buf, int size); char* name_and_sig_as_C_string(char* buf, int size) const;
// Static routine in the situations we don't have a methodOop // Static routine in the situations we don't have a methodOop
static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature);

View file

@ -147,7 +147,6 @@ macro(LoadNKlass)
macro(LoadL) macro(LoadL)
macro(LoadL_unaligned) macro(LoadL_unaligned)
macro(LoadPLocked) macro(LoadPLocked)
macro(LoadLLocked)
macro(LoadP) macro(LoadP)
macro(LoadN) macro(LoadN)
macro(LoadRange) macro(LoadRange)

View file

@ -2297,7 +2297,6 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
case Op_LoadL: case Op_LoadL:
case Op_LoadL_unaligned: case Op_LoadL_unaligned:
case Op_LoadPLocked: case Op_LoadPLocked:
case Op_LoadLLocked:
case Op_LoadP: case Op_LoadP:
case Op_LoadN: case Op_LoadN:
case Op_LoadRange: case Op_LoadRange:

View file

@ -284,9 +284,14 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con
const int N = 64; const int N = 64;
// Dummy node to keep intermediate nodes alive during construction
Node* hook = new (phase->C, 4) Node(4);
// u0 = u & 0xFFFFFFFF; u1 = u >> 32; // u0 = u & 0xFFFFFFFF; u1 = u >> 32;
Node* u0 = phase->transform(new (phase->C, 3) AndLNode(dividend, phase->longcon(0xFFFFFFFF))); Node* u0 = phase->transform(new (phase->C, 3) AndLNode(dividend, phase->longcon(0xFFFFFFFF)));
Node* u1 = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N / 2))); Node* u1 = phase->transform(new (phase->C, 3) RShiftLNode(dividend, phase->intcon(N / 2)));
hook->init_req(0, u0);
hook->init_req(1, u1);
// v0 = v & 0xFFFFFFFF; v1 = v >> 32; // v0 = v & 0xFFFFFFFF; v1 = v >> 32;
Node* v0 = phase->longcon(magic_const & 0xFFFFFFFF); Node* v0 = phase->longcon(magic_const & 0xFFFFFFFF);
@ -299,19 +304,14 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con
Node* u1v0 = phase->transform(new (phase->C, 3) MulLNode(u1, v0)); Node* u1v0 = phase->transform(new (phase->C, 3) MulLNode(u1, v0));
Node* temp = phase->transform(new (phase->C, 3) URShiftLNode(w0, phase->intcon(N / 2))); Node* temp = phase->transform(new (phase->C, 3) URShiftLNode(w0, phase->intcon(N / 2)));
Node* t = phase->transform(new (phase->C, 3) AddLNode(u1v0, temp)); Node* t = phase->transform(new (phase->C, 3) AddLNode(u1v0, temp));
hook->init_req(2, t);
// w1 = t & 0xFFFFFFFF; // w1 = t & 0xFFFFFFFF;
Node* w1 = new (phase->C, 3) AndLNode(t, phase->longcon(0xFFFFFFFF)); Node* w1 = phase->transform(new (phase->C, 3) AndLNode(t, phase->longcon(0xFFFFFFFF)));
hook->init_req(3, w1);
// w2 = t >> 32; // w2 = t >> 32;
Node* w2 = new (phase->C, 3) RShiftLNode(t, phase->intcon(N / 2)); Node* w2 = phase->transform(new (phase->C, 3) RShiftLNode(t, phase->intcon(N / 2)));
// 6732154: Construct both w1 and w2 before transforming, so t
// doesn't go dead prematurely.
// 6837011: We need to transform w2 before w1 because the
// transformation of w1 could return t.
w2 = phase->transform(w2);
w1 = phase->transform(w1);
// w1 = u0*v1 + w1; // w1 = u0*v1 + w1;
Node* u0v1 = phase->transform(new (phase->C, 3) MulLNode(u0, v1)); Node* u0v1 = phase->transform(new (phase->C, 3) MulLNode(u0, v1));
@ -322,6 +322,16 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con
Node* temp1 = phase->transform(new (phase->C, 3) AddLNode(u1v1, w2)); Node* temp1 = phase->transform(new (phase->C, 3) AddLNode(u1v1, w2));
Node* temp2 = phase->transform(new (phase->C, 3) RShiftLNode(w1, phase->intcon(N / 2))); Node* temp2 = phase->transform(new (phase->C, 3) RShiftLNode(w1, phase->intcon(N / 2)));
// Remove the bogus extra edges used to keep things alive
PhaseIterGVN* igvn = phase->is_IterGVN();
if (igvn != NULL) {
igvn->remove_dead_node(hook);
} else {
for (int i = 0; i < 4; i++) {
hook->set_req(i, NULL);
}
}
return new (phase->C, 3) AddLNode(temp1, temp2); return new (phase->C, 3) AddLNode(temp1, temp2);
} }

View file

@ -465,15 +465,11 @@ void PhaseIdealLoop::Dominators() {
// Kill dead input path // Kill dead input path
assert( !visited.test(whead->in(i)->_idx), assert( !visited.test(whead->in(i)->_idx),
"input with no loop must be dead" ); "input with no loop must be dead" );
_igvn.hash_delete(whead); _igvn.delete_input_of(whead, i);
whead->del_req(i);
_igvn._worklist.push(whead);
for (DUIterator_Fast jmax, j = whead->fast_outs(jmax); j < jmax; j++) { for (DUIterator_Fast jmax, j = whead->fast_outs(jmax); j < jmax; j++) {
Node* p = whead->fast_out(j); Node* p = whead->fast_out(j);
if( p->is_Phi() ) { if( p->is_Phi() ) {
_igvn.hash_delete(p); _igvn.delete_input_of(p, i);
p->del_req(i);
_igvn._worklist.push(p);
} }
} }
i--; // Rerun same iteration i--; // Rerun same iteration

View file

@ -338,8 +338,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
Node *phi_f = NULL; // do not construct unless needed Node *phi_f = NULL; // do not construct unless needed
for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) { for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) {
Node* v = phi->last_out(i2);// User of the phi Node* v = phi->last_out(i2);// User of the phi
igvn->hash_delete(v); // Have to fixup other Phi users igvn->rehash_node_delayed(v); // Have to fixup other Phi users
igvn->_worklist.push(v);
uint vop = v->Opcode(); uint vop = v->Opcode();
Node *proj = NULL; Node *proj = NULL;
if( vop == Op_Phi ) { // Remote merge point if( vop == Op_Phi ) { // Remote merge point
@ -552,9 +551,8 @@ static void adjust_check(Node* proj, Node* range, Node* index,
if( new_cmp == cmp ) return; if( new_cmp == cmp ) return;
// Else, adjust existing check // Else, adjust existing check
Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) ); Node *new_bol = gvn->transform( new (gvn->C, 2) BoolNode( new_cmp, bol->as_Bool()->_test._test ) );
igvn->hash_delete( iff ); igvn->rehash_node_delayed( iff );
iff->set_req_X( 1, new_bol, igvn ); iff->set_req_X( 1, new_bol, igvn );
igvn->_worklist.push( iff );
} }
//------------------------------up_one_dom------------------------------------- //------------------------------up_one_dom-------------------------------------
@ -732,9 +730,7 @@ Node* IfNode::fold_compares(PhaseGVN* phase) {
Node* adjusted = phase->transform(new (phase->C, 3) SubINode(n, phase->intcon(failtype->_lo))); Node* adjusted = phase->transform(new (phase->C, 3) SubINode(n, phase->intcon(failtype->_lo)));
Node* newcmp = phase->transform(new (phase->C, 3) CmpUNode(adjusted, phase->intcon(bound))); Node* newcmp = phase->transform(new (phase->C, 3) CmpUNode(adjusted, phase->intcon(bound)));
Node* newbool = phase->transform(new (phase->C, 2) BoolNode(newcmp, cond)); Node* newbool = phase->transform(new (phase->C, 2) BoolNode(newcmp, cond));
phase->hash_delete(dom_iff); phase->is_IterGVN()->replace_input_of(dom_iff, 1, phase->intcon(ctrl->as_Proj()->_con));
dom_iff->set_req(1, phase->intcon(ctrl->as_Proj()->_con));
phase->is_IterGVN()->_worklist.push(dom_iff);
phase->hash_delete(this); phase->hash_delete(this);
set_req(1, newbool); set_req(1, newbool);
return this; return this;
@ -1042,17 +1038,15 @@ void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
// Loop ends when projection has no more uses. // Loop ends when projection has no more uses.
for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) { for (DUIterator_Last jmin, j = ifp->last_outs(jmin); j >= jmin; --j) {
Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse Node* s = ifp->last_out(j); // Get child of IfTrue/IfFalse
igvn->hash_delete(s); // Yank from hash table before edge hacking
if( !s->depends_only_on_test() ) { if( !s->depends_only_on_test() ) {
// Find the control input matching this def-use edge. // Find the control input matching this def-use edge.
// For Regions it may not be in slot 0. // For Regions it may not be in slot 0.
uint l; uint l;
for( l = 0; s->in(l) != ifp; l++ ) { } for( l = 0; s->in(l) != ifp; l++ ) { }
s->set_req(l, ctrl_target); igvn->replace_input_of(s, l, ctrl_target);
} else { // Else, for control producers, } else { // Else, for control producers,
s->set_req(0, data_target); // Move child to data-target igvn->replace_input_of(s, 0, data_target); // Move child to data-target
} }
igvn->_worklist.push(s); // Revisit collapsed Phis
} // End for each child of a projection } // End for each child of a projection
igvn->remove_dead_node(ifp); igvn->remove_dead_node(ifp);

View file

@ -192,8 +192,6 @@ class LibraryCallKit : public GraphKit {
void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark); void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
bool inline_native_clone(bool is_virtual); bool inline_native_clone(bool is_virtual);
bool inline_native_Reflection_getCallerClass(); bool inline_native_Reflection_getCallerClass();
bool inline_native_AtomicLong_get();
bool inline_native_AtomicLong_attemptUpdate();
bool is_method_invoke_or_aux_frame(JVMState* jvms); bool is_method_invoke_or_aux_frame(JVMState* jvms);
// Helper function for inlining native object hash method // Helper function for inlining native object hash method
bool inline_native_hashcode(bool is_virtual, bool is_static); bool inline_native_hashcode(bool is_virtual, bool is_static);
@ -331,11 +329,6 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
// We do not intrinsify this. The optimizer does fine with it. // We do not intrinsify this. The optimizer does fine with it.
return NULL; return NULL;
case vmIntrinsics::_get_AtomicLong:
case vmIntrinsics::_attemptUpdate:
if (!InlineAtomicLong) return NULL;
break;
case vmIntrinsics::_getCallerClass: case vmIntrinsics::_getCallerClass:
if (!UseNewReflection) return NULL; if (!UseNewReflection) return NULL;
if (!InlineReflectionGetCallerClass) return NULL; if (!InlineReflectionGetCallerClass) return NULL;
@ -711,11 +704,6 @@ bool LibraryCallKit::try_to_inline() {
case vmIntrinsics::_reverseBytes_c: case vmIntrinsics::_reverseBytes_c:
return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id()); return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
case vmIntrinsics::_get_AtomicLong:
return inline_native_AtomicLong_get();
case vmIntrinsics::_attemptUpdate:
return inline_native_AtomicLong_attemptUpdate();
case vmIntrinsics::_getCallerClass: case vmIntrinsics::_getCallerClass:
return inline_native_Reflection_getCallerClass(); return inline_native_Reflection_getCallerClass();
@ -4006,113 +3994,6 @@ bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
return false; return false;
} }
static int value_field_offset = -1; // offset of the "value" field of AtomicLongCSImpl. This is needed by
// inline_native_AtomicLong_attemptUpdate() but it has no way of
// computing it since there is no lookup field by name function in the
// CI interface. This is computed and set by inline_native_AtomicLong_get().
// Using a static variable here is safe even if we have multiple compilation
// threads because the offset is constant. At worst the same offset will be
// computed and stored multiple
bool LibraryCallKit::inline_native_AtomicLong_get() {
// Restore the stack and pop off the argument
_sp+=1;
Node *obj = pop();
// get the offset of the "value" field. Since the CI interfaces
// does not provide a way to look up a field by name, we scan the bytecodes
// to get the field index. We expect the first 2 instructions of the method
// to be:
// 0 aload_0
// 1 getfield "value"
ciMethod* method = callee();
if (value_field_offset == -1)
{
ciField* value_field;
ciBytecodeStream iter(method);
Bytecodes::Code bc = iter.next();
if ((bc != Bytecodes::_aload_0) &&
((bc != Bytecodes::_aload) || (iter.get_index() != 0)))
return false;
bc = iter.next();
if (bc != Bytecodes::_getfield)
return false;
bool ignore;
value_field = iter.get_field(ignore);
value_field_offset = value_field->offset_in_bytes();
}
// Null check without removing any arguments.
_sp++;
obj = do_null_check(obj, T_OBJECT);
_sp--;
// Check for locking null object
if (stopped()) return true;
Node *adr = basic_plus_adr(obj, obj, value_field_offset);
const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
int alias_idx = C->get_alias_index(adr_type);
Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));
push_pair(result);
return true;
}
bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {
// Restore the stack and pop off the arguments
_sp+=5;
Node *newVal = pop_pair();
Node *oldVal = pop_pair();
Node *obj = pop();
// we need the offset of the "value" field which was computed when
// inlining the get() method. Give up if we don't have it.
if (value_field_offset == -1)
return false;
// Null check without removing any arguments.
_sp+=5;
obj = do_null_check(obj, T_OBJECT);
_sp-=5;
// Check for locking null object
if (stopped()) return true;
Node *adr = basic_plus_adr(obj, obj, value_field_offset);
const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
int alias_idx = C->get_alias_index(adr_type);
Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));
Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
set_memory(store_proj, alias_idx);
Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );
Node *result;
// CMove node is not used to be able fold a possible check code
// after attemptUpdate() call. This code could be transformed
// into CMove node by loop optimizations.
{
RegionNode *r = new (C, 3) RegionNode(3);
result = new (C, 3) PhiNode(r, TypeInt::BOOL);
Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
Node *iftrue = opt_iff(r, iff);
r->init_req(1, iftrue);
result->init_req(1, intcon(1));
result->init_req(2, intcon(0));
set_control(_gvn.transform(r));
record_for_igvn(r);
C->set_has_split_ifs(true); // Has chance for split-if optimization
}
push(_gvn.transform(result));
return true;
}
bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
// restore the arguments // restore the arguments
_sp += arg_size(); _sp += arg_size();

View file

@ -212,9 +212,8 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
Node* use = rgn->fast_out(i); Node* use = rgn->fast_out(i);
if (use->is_Phi() && use->outcnt() > 0) { if (use->is_Phi() && use->outcnt() > 0) {
assert(use->in(0) == rgn, ""); assert(use->in(0) == rgn, "");
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
use->add_req(use->in(proj_index)); use->add_req(use->in(proj_index));
_igvn._worklist.push(use);
has_phi = true; has_phi = true;
} }
} }
@ -284,9 +283,8 @@ ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* n
for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = rgn->fast_outs(imax); i < imax; i++) {
Node* use = rgn->fast_out(i); Node* use = rgn->fast_out(i);
if (use->is_Phi() && use->outcnt() > 0) { if (use->is_Phi() && use->outcnt() > 0) {
hash_delete(use); rehash_node_delayed(use);
use->add_req(use->in(proj_index)); use->add_req(use->in(proj_index));
_worklist.push(use);
has_phi = true; has_phi = true;
} }
} }

View file

@ -961,9 +961,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
set_loop(zer_iff, loop->_parent); set_loop(zer_iff, loop->_parent);
// Plug in the false-path, taken if we need to skip post-loop // Plug in the false-path, taken if we need to skip post-loop
_igvn.hash_delete( main_exit ); _igvn.replace_input_of(main_exit, 0, zer_iff);
main_exit->set_req(0, zer_iff);
_igvn._worklist.push(main_exit);
set_idom(main_exit, zer_iff, dd_main_exit); set_idom(main_exit, zer_iff, dd_main_exit);
set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
// Make the true-path, must enter the post loop // Make the true-path, must enter the post loop
@ -1956,9 +1954,7 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
C->set_major_progress(); C->set_major_progress();
Node *kill_con = _igvn.intcon( 1-flip ); Node *kill_con = _igvn.intcon( 1-flip );
set_ctrl(kill_con, C->root()); set_ctrl(kill_con, C->root());
_igvn.hash_delete(iff); _igvn.replace_input_of(iff, 1, kill_con);
iff->set_req(1, kill_con);
_igvn._worklist.push(iff);
// Find surviving projection // Find surviving projection
assert(iff->is_If(), ""); assert(iff->is_If(), "");
ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip); ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
@ -1966,11 +1962,9 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
Node* cd = dp->fast_out(i); // Control-dependent node Node* cd = dp->fast_out(i); // Control-dependent node
if( cd->is_Load() ) { // Loads can now float around in the loop if( cd->is_Load() ) { // Loads can now float around in the loop
_igvn.hash_delete(cd);
// Allow the load to float around in the loop, or before it // Allow the load to float around in the loop, or before it
// but NOT before the pre-loop. // but NOT before the pre-loop.
cd->set_req(0, ctrl); // ctrl, not NULL _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
_igvn._worklist.push(cd);
--i; --i;
--imax; --imax;
} }
@ -2029,14 +2023,10 @@ void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
main_bol->set_req(1,main_cmp); main_bol->set_req(1,main_cmp);
} }
// Hack the now-private loop bounds // Hack the now-private loop bounds
_igvn.hash_delete(main_cmp); _igvn.replace_input_of(main_cmp, 2, main_limit);
main_cmp->set_req(2, main_limit);
_igvn._worklist.push(main_cmp);
// The OpaqueNode is unshared by design // The OpaqueNode is unshared by design
_igvn.hash_delete(opqzm);
assert( opqzm->outcnt() == 1, "cannot hack shared node" ); assert( opqzm->outcnt() == 1, "cannot hack shared node" );
opqzm->set_req(1,main_limit); _igvn.replace_input_of(opqzm, 1, main_limit);
_igvn._worklist.push(opqzm);
} }
//------------------------------DCE_loop_body---------------------------------- //------------------------------DCE_loop_body----------------------------------
@ -2178,9 +2168,7 @@ bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
Node* cmp = cl->loopexit()->cmp_node(); Node* cmp = cl->loopexit()->cmp_node();
assert(cl->limit() == cmp->in(2), "sanity"); assert(cl->limit() == cmp->in(2), "sanity");
phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
phase->_igvn.hash_delete(cmp); phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
cmp->set_req(2, exact_limit);
phase->_igvn._worklist.push(cmp); // put cmp on worklist
} }
// Note: the final value after increment should not overflow since // Note: the final value after increment should not overflow since
// counted loop has limit check predicate. // counted loop has limit check predicate.

View file

@ -174,27 +174,21 @@ void PhaseIdealLoop::do_unswitching (IdealLoopTree *loop, Node_List &old_new) {
Node* use = worklist.pop(); Node* use = worklist.pop();
Node* nuse = use->clone(); Node* nuse = use->clone();
nuse->set_req(0, invar_proj); nuse->set_req(0, invar_proj);
_igvn.hash_delete(use); _igvn.replace_input_of(use, 1, nuse);
use->set_req(1, nuse);
_igvn._worklist.push(use);
register_new_node(nuse, invar_proj); register_new_node(nuse, invar_proj);
// Same for the clone // Same for the clone
Node* use_clone = old_new[use->_idx]; Node* use_clone = old_new[use->_idx];
_igvn.hash_delete(use_clone); _igvn.replace_input_of(use_clone, 1, nuse);
use_clone->set_req(1, nuse);
_igvn._worklist.push(use_clone);
} }
} }
// Hardwire the control paths in the loops into if(true) and if(false) // Hardwire the control paths in the loops into if(true) and if(false)
_igvn.hash_delete(unswitch_iff); _igvn.rehash_node_delayed(unswitch_iff);
short_circuit_if(unswitch_iff, proj_true); short_circuit_if(unswitch_iff, proj_true);
_igvn._worklist.push(unswitch_iff);
IfNode* unswitch_iff_clone = old_new[unswitch_iff->_idx]->as_If(); IfNode* unswitch_iff_clone = old_new[unswitch_iff->_idx]->as_If();
_igvn.hash_delete(unswitch_iff_clone); _igvn.rehash_node_delayed(unswitch_iff_clone);
short_circuit_if(unswitch_iff_clone, proj_false); short_circuit_if(unswitch_iff_clone, proj_false);
_igvn._worklist.push(unswitch_iff_clone);
// Reoptimize loops // Reoptimize loops
loop->record_for_igvn(); loop->record_for_igvn();
@ -224,8 +218,7 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
LoopNode* head = loop->_head->as_Loop(); LoopNode* head = loop->_head->as_Loop();
bool counted_loop = head->is_CountedLoop(); bool counted_loop = head->is_CountedLoop();
Node* entry = head->in(LoopNode::EntryControl); Node* entry = head->in(LoopNode::EntryControl);
_igvn.hash_delete(entry); _igvn.rehash_node_delayed(entry);
_igvn._worklist.push(entry);
IdealLoopTree* outer_loop = loop->_parent; IdealLoopTree* outer_loop = loop->_parent;
Node *cont = _igvn.intcon(1); Node *cont = _igvn.intcon(1);
@ -249,18 +242,14 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
// Fast (true) control // Fast (true) control
Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop); Node* iffast_pred = clone_loop_predicates(entry, iffast, !counted_loop);
_igvn.hash_delete(head); _igvn.replace_input_of(head, LoopNode::EntryControl, iffast_pred);
head->set_req(LoopNode::EntryControl, iffast_pred);
set_idom(head, iffast_pred, dom_depth(head)); set_idom(head, iffast_pred, dom_depth(head));
_igvn._worklist.push(head);
// Slow (false) control // Slow (false) control
Node* ifslow_pred = clone_loop_predicates(entry, ifslow, !counted_loop); Node* ifslow_pred = clone_loop_predicates(entry, ifslow, !counted_loop);
LoopNode* slow_head = old_new[head->_idx]->as_Loop(); LoopNode* slow_head = old_new[head->_idx]->as_Loop();
_igvn.hash_delete(slow_head); _igvn.replace_input_of(slow_head, LoopNode::EntryControl, ifslow_pred);
slow_head->set_req(LoopNode::EntryControl, ifslow_pred);
set_idom(slow_head, ifslow_pred, dom_depth(slow_head)); set_idom(slow_head, ifslow_pred, dom_depth(slow_head));
_igvn._worklist.push(slow_head);
recompute_dom_depth(); recompute_dom_depth();

View file

@ -1129,8 +1129,7 @@ void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) {
// I'm mid-iteration over the Region's uses. // I'm mid-iteration over the Region's uses.
for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) { for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) {
Node* use = old_phi->last_out(i); Node* use = old_phi->last_out(i);
igvn.hash_delete(use); igvn.rehash_node_delayed(use);
igvn._worklist.push(use);
uint uses_found = 0; uint uses_found = 0;
for (uint j = 0; j < use->len(); j++) { for (uint j = 0; j < use->len(); j++) {
if (use->in(j) == old_phi) { if (use->in(j) == old_phi) {
@ -1186,10 +1185,8 @@ void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) {
phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx)); phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx));
phi = igvn.register_new_node_with_optimizer(phi, old_phi); phi = igvn.register_new_node_with_optimizer(phi, old_phi);
// Make old Phi point to new Phi on the fall-in path // Make old Phi point to new Phi on the fall-in path
igvn.hash_delete(old_phi); igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi);
old_phi->set_req(LoopNode::EntryControl, phi);
old_phi->del_req(outer_idx); old_phi->del_req(outer_idx);
igvn._worklist.push(old_phi);
} }
} }
@ -1992,9 +1989,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts)
// we do it here. // we do it here.
for( uint i = 1; i < C->root()->req(); i++ ) { for( uint i = 1; i < C->root()->req(); i++ ) {
if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root? if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root?
_igvn.hash_delete(C->root()); _igvn.delete_input_of(C->root(), i);
C->root()->del_req(i);
_igvn._worklist.push(C->root());
i--; // Rerun same iteration on compressed edges i--; // Rerun same iteration on compressed edges
} }
} }

View file

@ -216,9 +216,7 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO); Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO);
set_ctrl(con, C->root()); // Constant gets a new use set_ctrl(con, C->root()); // Constant gets a new use
// Hack the dominated test // Hack the dominated test
_igvn.hash_delete(iff); _igvn.replace_input_of(iff, 1, con);
iff->set_req(1, con);
_igvn._worklist.push(iff);
// If I dont have a reachable TRUE and FALSE path following the IfNode then // If I dont have a reachable TRUE and FALSE path following the IfNode then
// I can assume this path reaches an infinite loop. In this case it's not // I can assume this path reaches an infinite loop. In this case it's not
@ -245,10 +243,8 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
Node* cd = dp->fast_out(i); // Control-dependent node Node* cd = dp->fast_out(i); // Control-dependent node
if (cd->depends_only_on_test()) { if (cd->depends_only_on_test()) {
assert(cd->in(0) == dp, ""); assert(cd->in(0) == dp, "");
_igvn.hash_delete(cd); _igvn.replace_input_of(cd, 0, prevdom);
cd->set_req(0, prevdom);
set_early_ctrl(cd); set_early_ctrl(cd);
_igvn._worklist.push(cd);
IdealLoopTree *new_loop = get_loop(get_ctrl(cd)); IdealLoopTree *new_loop = get_loop(get_ctrl(cd));
if (old_loop != new_loop) { if (old_loop != new_loop) {
if (!old_loop->_child) old_loop->_body.yank(cd); if (!old_loop->_child) old_loop->_body.yank(cd);
@ -952,8 +948,7 @@ void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
if (!n->is_Load() || late_load_ctrl != n_ctrl) { if (!n->is_Load() || late_load_ctrl != n_ctrl) {
for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) { for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) {
Node *u = n->last_out(j); // Clone private computation per use Node *u = n->last_out(j); // Clone private computation per use
_igvn.hash_delete(u); _igvn.rehash_node_delayed(u);
_igvn._worklist.push(u);
Node *x = n->clone(); // Clone computation Node *x = n->clone(); // Clone computation
Node *x_ctrl = NULL; Node *x_ctrl = NULL;
if( u->is_Phi() ) { if( u->is_Phi() ) {
@ -1089,9 +1084,7 @@ BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) {
for( i = 1; i < phi->req(); i++ ) { for( i = 1; i < phi->req(); i++ ) {
Node *b = phi->in(i); Node *b = phi->in(i);
if( b->is_Phi() ) { if( b->is_Phi() ) {
_igvn.hash_delete(phi); _igvn.replace_input_of(phi, i, clone_iff( b->as_Phi(), loop ));
_igvn._worklist.push(phi);
phi->set_req(i, clone_iff( b->as_Phi(), loop ));
} else { } else {
assert( b->is_Bool(), "" ); assert( b->is_Bool(), "" );
} }
@ -1161,9 +1154,7 @@ CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) {
for( i = 1; i < phi->req(); i++ ) { for( i = 1; i < phi->req(); i++ ) {
Node *b = phi->in(i); Node *b = phi->in(i);
if( b->is_Phi() ) { if( b->is_Phi() ) {
_igvn.hash_delete(phi); _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop ));
_igvn._worklist.push(phi);
phi->set_req(i, clone_bool( b->as_Phi(), loop ));
} else { } else {
assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" ); assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" );
} }
@ -1347,8 +1338,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
// The original user of 'use' uses 'r' instead. // The original user of 'use' uses 'r' instead.
for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) { for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) {
Node* useuse = use->last_out(l); Node* useuse = use->last_out(l);
_igvn.hash_delete(useuse); _igvn.rehash_node_delayed(useuse);
_igvn._worklist.push(useuse);
uint uses_found = 0; uint uses_found = 0;
if( useuse->in(0) == use ) { if( useuse->in(0) == use ) {
useuse->set_req(0, r); useuse->set_req(0, r);
@ -1435,9 +1425,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
if( use->is_Phi() ) // Phi use is in prior block if( use->is_Phi() ) // Phi use is in prior block
cfg = prev->in(idx); // NOT in block of Phi itself cfg = prev->in(idx); // NOT in block of Phi itself
if (cfg->is_top()) { // Use is dead? if (cfg->is_top()) { // Use is dead?
_igvn.hash_delete(use); _igvn.replace_input_of(use, idx, C->top());
_igvn._worklist.push(use);
use->set_req(idx, C->top());
continue; continue;
} }
@ -1487,9 +1475,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
set_ctrl(phi, prev); set_ctrl(phi, prev);
} }
// Make 'use' use the Phi instead of the old loop body exit value // Make 'use' use the Phi instead of the old loop body exit value
_igvn.hash_delete(use); _igvn.replace_input_of(use, idx, phi);
_igvn._worklist.push(use);
use->set_req(idx, phi);
if( use->_idx >= new_counter ) { // If updating new phis if( use->_idx >= new_counter ) { // If updating new phis
// Not needed for correctness, but prevents a weak assert // Not needed for correctness, but prevents a weak assert
// in AddPNode from tripping (when we end up with different // in AddPNode from tripping (when we end up with different
@ -1517,9 +1503,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
Node *iff = split_if_set->pop(); Node *iff = split_if_set->pop();
if( iff->in(1)->is_Phi() ) { if( iff->in(1)->is_Phi() ) {
BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop ); BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop );
_igvn.hash_delete(iff); _igvn.replace_input_of(iff, 1, b);
_igvn._worklist.push(iff);
iff->set_req(1, b);
} }
} }
} }
@ -1529,9 +1513,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd
Node *phi = b->in(1); Node *phi = b->in(1);
assert( phi->is_Phi(), "" ); assert( phi->is_Phi(), "" );
CmpNode *cmp = clone_bool( (PhiNode*)phi, loop ); CmpNode *cmp = clone_bool( (PhiNode*)phi, loop );
_igvn.hash_delete(b); _igvn.replace_input_of(b, 1, cmp);
_igvn._worklist.push(b);
b->set_req(1, cmp);
} }
} }
if( split_cex_set ) { if( split_cex_set ) {
@ -1686,10 +1668,8 @@ ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTes
ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
int ddepth = dom_depth(proj); int ddepth = dom_depth(proj);
_igvn.hash_delete(iff); _igvn.rehash_node_delayed(iff);
_igvn._worklist.push(iff); _igvn.rehash_node_delayed(proj);
_igvn.hash_delete(proj);
_igvn._worklist.push(proj);
proj->set_req(0, NULL); // temporary disconnect proj->set_req(0, NULL); // temporary disconnect
ProjNode* proj2 = proj_clone(proj, iff); ProjNode* proj2 = proj_clone(proj, iff);
@ -1745,10 +1725,8 @@ RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj();
int ddepth = dom_depth(proj); int ddepth = dom_depth(proj);
_igvn.hash_delete(iff); _igvn.rehash_node_delayed(iff);
_igvn._worklist.push(iff); _igvn.rehash_node_delayed(proj);
_igvn.hash_delete(proj);
_igvn._worklist.push(proj);
proj->set_req(0, NULL); // temporary disconnect proj->set_req(0, NULL); // temporary disconnect
ProjNode* proj2 = proj_clone(proj, iff); ProjNode* proj2 = proj_clone(proj, iff);
@ -1970,9 +1948,7 @@ void PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, N
// clone "n" and insert it between the inputs of "n" and the use outside the loop // clone "n" and insert it between the inputs of "n" and the use outside the loop
Node* n_clone = n->clone(); Node* n_clone = n->clone();
_igvn.hash_delete(use); _igvn.replace_input_of(use, j, n_clone);
use->set_req(j, n_clone);
_igvn._worklist.push(use);
Node* use_c; Node* use_c;
if (!use->is_Phi()) { if (!use->is_Phi()) {
use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0);
@ -2028,8 +2004,7 @@ void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Nod
#endif #endif
while( worklist.size() ) { while( worklist.size() ) {
Node *use = worklist.pop(); Node *use = worklist.pop();
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
_igvn._worklist.push(use);
for (uint j = 1; j < use->req(); j++) { for (uint j = 1; j < use->req(); j++) {
if (use->in(j) == n) { if (use->in(j) == n) {
use->set_req(j, n_clone); use->set_req(j, n_clone);
@ -2055,9 +2030,7 @@ void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_va
_igvn.remove_dead_node(phi); _igvn.remove_dead_node(phi);
phi = hit; phi = hit;
} }
_igvn.hash_delete(use); _igvn.replace_input_of(use, idx, phi);
_igvn._worklist.push(use);
use->set_req(idx, phi);
} }
#ifdef ASSERT #ifdef ASSERT
@ -2630,9 +2603,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
// use is in loop // use is in loop
if (old_new[use->_idx] != NULL) { // null for dead code if (old_new[use->_idx] != NULL) { // null for dead code
Node* use_clone = old_new[use->_idx]; Node* use_clone = old_new[use->_idx];
_igvn.hash_delete(use); _igvn.replace_input_of(use, j, C->top());
use->set_req(j, C->top());
_igvn._worklist.push(use);
insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone ); insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone );
} }
} else { } else {
@ -2667,46 +2638,35 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) {
if (!n->is_CFG() && n->in(0) != NULL && if (!n->is_CFG() && n->in(0) != NULL &&
not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) { not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) {
Node* n_clone = old_new[n->_idx]; Node* n_clone = old_new[n->_idx];
_igvn.hash_delete(n_clone); _igvn.replace_input_of(n_clone, 0, new_head_clone);
n_clone->set_req(0, new_head_clone);
_igvn._worklist.push(n_clone);
} }
} }
// Backedge of the surviving new_head (the clone) is original last_peel // Backedge of the surviving new_head (the clone) is original last_peel
_igvn.hash_delete(new_head_clone); _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel);
new_head_clone->set_req(LoopNode::LoopBackControl, last_peel);
_igvn._worklist.push(new_head_clone);
// Cut first node in original not_peel set // Cut first node in original not_peel set
_igvn.hash_delete(new_head); _igvn.rehash_node_delayed(new_head); // Multiple edge updates:
new_head->set_req(LoopNode::EntryControl, C->top()); new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of
new_head->set_req(LoopNode::LoopBackControl, C->top()); new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls
_igvn._worklist.push(new_head);
// Copy head_clone back-branch info to original head // Copy head_clone back-branch info to original head
// and remove original head's loop entry and // and remove original head's loop entry and
// clone head's back-branch // clone head's back-branch
_igvn.hash_delete(head); _igvn.rehash_node_delayed(head); // Multiple edge updates
_igvn.hash_delete(head_clone); head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl));
head->set_req(LoopNode::LoopBackControl, C->top()); head->set_req(LoopNode::LoopBackControl, C->top());
head_clone->set_req(LoopNode::LoopBackControl, C->top()); _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top());
_igvn._worklist.push(head);
_igvn._worklist.push(head_clone);
// Similarly modify the phis // Similarly modify the phis
for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) { for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) {
Node* use = head->fast_out(k); Node* use = head->fast_out(k);
if (use->is_Phi() && use->outcnt() > 0) { if (use->is_Phi() && use->outcnt() > 0) {
Node* use_clone = old_new[use->_idx]; Node* use_clone = old_new[use->_idx];
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use); // Multiple edge updates
_igvn.hash_delete(use_clone); use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl));
use->set_req(LoopNode::LoopBackControl, C->top()); use->set_req(LoopNode::LoopBackControl, C->top());
use_clone->set_req(LoopNode::LoopBackControl, C->top()); _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top());
_igvn._worklist.push(use);
_igvn._worklist.push(use_clone);
} }
} }
@ -2792,8 +2752,7 @@ void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) {
set_ctrl(neg_stride, C->root()); set_ctrl(neg_stride, C->root());
Node *post = new (C, 3) AddINode( opaq, neg_stride); Node *post = new (C, 3) AddINode( opaq, neg_stride);
register_new_node( post, u_ctrl ); register_new_node( post, u_ctrl );
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
_igvn._worklist.push(use);
for (uint j = 1; j < use->req(); j++) { for (uint j = 1; j < use->req(); j++) {
if (use->in(j) == phi) if (use->in(j) == phi)
use->set_req(j, post); use->set_req(j, post);

View file

@ -1447,9 +1447,8 @@ void PhaseMacroExpand::expand_allocate_common(
if (!always_slow && _memproj_fallthrough != NULL) { if (!always_slow && _memproj_fallthrough != NULL) {
for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
Node *use = _memproj_fallthrough->fast_out(i); Node *use = _memproj_fallthrough->fast_out(i);
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem); imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem);
_igvn._worklist.push(use);
// back up iterator // back up iterator
--i; --i;
} }
@ -1463,9 +1462,8 @@ void PhaseMacroExpand::expand_allocate_common(
} }
for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
Node *use = _memproj_catchall->fast_out(i); Node *use = _memproj_catchall->fast_out(i);
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough); imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough);
_igvn._worklist.push(use);
// back up iterator // back up iterator
--i; --i;
} }
@ -1481,9 +1479,8 @@ void PhaseMacroExpand::expand_allocate_common(
if (_ioproj_fallthrough != NULL) { if (_ioproj_fallthrough != NULL) {
for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
Node *use = _ioproj_fallthrough->fast_out(i); Node *use = _ioproj_fallthrough->fast_out(i);
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o); imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o);
_igvn._worklist.push(use);
// back up iterator // back up iterator
--i; --i;
} }
@ -1497,9 +1494,8 @@ void PhaseMacroExpand::expand_allocate_common(
} }
for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) { for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
Node *use = _ioproj_catchall->fast_out(i); Node *use = _ioproj_catchall->fast_out(i);
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough); imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
_igvn._worklist.push(use);
// back up iterator // back up iterator
--i; --i;
} }
@ -1857,18 +1853,16 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) { if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) {
// Replace Box and mark eliminated all related locks and unlocks. // Replace Box and mark eliminated all related locks and unlocks.
alock->set_non_esc_obj(); alock->set_non_esc_obj();
_igvn.hash_delete(alock); _igvn.rehash_node_delayed(alock);
alock->set_box_node(newbox); alock->set_box_node(newbox);
_igvn._worklist.push(alock);
next_edge = false; next_edge = false;
} }
} }
if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) { if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) {
FastLockNode* flock = u->as_FastLock(); FastLockNode* flock = u->as_FastLock();
assert(flock->box_node() == oldbox, "sanity"); assert(flock->box_node() == oldbox, "sanity");
_igvn.hash_delete(flock); _igvn.rehash_node_delayed(flock);
flock->set_box_node(newbox); flock->set_box_node(newbox);
_igvn._worklist.push(flock);
next_edge = false; next_edge = false;
} }
@ -1886,9 +1880,7 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) {
Node* box_node = sfn->monitor_box(jvms, idx); Node* box_node = sfn->monitor_box(jvms, idx);
if (box_node == oldbox && obj_node->eqv_uncast(obj)) { if (box_node == oldbox && obj_node->eqv_uncast(obj)) {
int j = jvms->monitor_box_offset(idx); int j = jvms->monitor_box_offset(idx);
_igvn.hash_delete(u); _igvn.replace_input_of(u, j, newbox);
u->set_req(j, newbox);
_igvn._worklist.push(u);
next_edge = false; next_edge = false;
} }
} }

View file

@ -717,6 +717,22 @@ Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) {
adr = adr->in(1); adr = adr->in(1);
continue; continue;
case Op_EncodeP:
// EncodeP node's control edge could be set by this method
// when EncodeP node depends on CastPP node.
//
// Use its control edge for memory op because EncodeP may go away
// later when it is folded with following or preceding DecodeN node.
if (adr->in(0) == NULL) {
// Keep looking for cast nodes.
adr = adr->in(1);
continue;
}
ccp->hash_delete(n);
n->set_req(MemNode::Control, adr->in(0));
ccp->hash_insert(n);
return n;
case Op_CastPP: case Op_CastPP:
// If the CastPP is useless, just peek on through it. // If the CastPP is useless, just peek on through it.
if( ccp->type(adr) == ccp->type(adr->in(1)) ) { if( ccp->type(adr) == ccp->type(adr->in(1)) ) {

View file

@ -636,17 +636,6 @@ public:
virtual bool depends_only_on_test() const { return true; } virtual bool depends_only_on_test() const { return true; }
}; };
//------------------------------LoadLLockedNode---------------------------------
// Load-locked a pointer from memory (either object or array).
// On Sparc & Intel this is implemented as a normal long load.
class LoadLLockedNode : public LoadLNode {
public:
LoadLLockedNode( Node *c, Node *mem, Node *adr )
: LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
virtual int Opcode() const;
virtual int store_Opcode() const { return Op_StoreLConditional; }
};
//------------------------------SCMemProjNode--------------------------------------- //------------------------------SCMemProjNode---------------------------------------
// This class defines a projection of the memory state of a store conditional node. // This class defines a projection of the memory state of a store conditional node.
// These nodes return a value, but also update memory. // These nodes return a value, but also update memory.

View file

@ -527,6 +527,9 @@ class Parse : public GraphKit {
int repush_if_args(); int repush_if_args();
void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
Block* path, Block* other_path); Block* path, Block* other_path);
void sharpen_type_after_if(BoolTest::mask btest,
Node* con, const Type* tcon,
Node* val, const Type* tval);
IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask); IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
Node* jump_if_join(Node* iffalse, Node* iftrue); Node* jump_if_join(Node* iffalse, Node* iftrue);
void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index); void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);

View file

@ -1233,6 +1233,71 @@ void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
if (!have_con) // remaining adjustments need a con if (!have_con) // remaining adjustments need a con
return; return;
sharpen_type_after_if(btest, con, tcon, val, tval);
}
static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
Node* ldk;
if (n->is_DecodeN()) {
if (n->in(1)->Opcode() != Op_LoadNKlass) {
return NULL;
} else {
ldk = n->in(1);
}
} else if (n->Opcode() != Op_LoadKlass) {
return NULL;
} else {
ldk = n;
}
assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
Node* adr = ldk->in(MemNode::Address);
intptr_t off = 0;
Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
return NULL;
const TypePtr* tp = gvn->type(obj)->is_ptr();
if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
return NULL;
return obj;
}
void Parse::sharpen_type_after_if(BoolTest::mask btest,
Node* con, const Type* tcon,
Node* val, const Type* tval) {
// Look for opportunities to sharpen the type of a node
// whose klass is compared with a constant klass.
if (btest == BoolTest::eq && tcon->isa_klassptr()) {
Node* obj = extract_obj_from_klass_load(&_gvn, val);
const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) {
// Found:
// Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
// or the narrowOop equivalent.
const Type* obj_type = _gvn.type(obj);
const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr();
if (tboth != NULL && tboth != obj_type && tboth->higher_equal(obj_type)) {
// obj has to be of the exact type Foo if the CmpP succeeds.
assert(tboth->klass_is_exact(), "klass should be exact");
int obj_in_map = map()->find_edge(obj);
JVMState* jvms = this->jvms();
if (obj_in_map >= 0 &&
(jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
TypeNode* ccast = new (C, 2) CheckCastPPNode(control(), obj, tboth);
const Type* tcc = ccast->as_Type()->type();
assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
// Delay transform() call to allow recovery of pre-cast value
// at the control merge.
_gvn.set_type_bottom(ccast);
record_for_igvn(ccast);
// Here's the payoff.
replace_in_map(obj, ccast);
}
}
}
}
int val_in_map = map()->find_edge(val); int val_in_map = map()->find_edge(val);
if (val_in_map < 0) return; // replace_in_map would be useless if (val_in_map < 0) return; // replace_in_map would be useless
@ -1265,6 +1330,7 @@ void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
// Exclude tests vs float/double 0 as these could be // Exclude tests vs float/double 0 as these could be
// either +0 or -0. Just because you are equal to +0 // either +0 or -0. Just because you are equal to +0
// doesn't mean you ARE +0! // doesn't mean you ARE +0!
// Note, following code also replaces Long and Oop values.
if ((!tf || tf->_f != 0.0) && if ((!tf || tf->_f != 0.0) &&
(!td || td->_d != 0.0)) (!td || td->_d != 0.0))
cast = con; // Replace non-constant val by con. cast = con; // Replace non-constant val by con.

View file

@ -460,6 +460,25 @@ public:
subsume_node(old, nn); subsume_node(old, nn);
} }
// Delayed node rehash: remove a node from the hash table and rehash it during
// next optimizing pass
void rehash_node_delayed(Node* n) {
hash_delete(n);
_worklist.push(n);
}
// Replace ith edge of "n" with "in"
void replace_input_of(Node* n, int i, Node* in) {
rehash_node_delayed(n);
n->set_req(i, in);
}
// Delete ith edge of "n"
void delete_input_of(Node* n, int i) {
rehash_node_delayed(n);
n->del_req(i);
}
bool delay_transform() const { return _delay_transform; } bool delay_transform() const { return _delay_transform; }
void set_delay_transform(bool delay) { void set_delay_transform(bool delay) {

View file

@ -137,9 +137,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff); Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
Node *x = bol->clone(); Node *x = bol->clone();
register_new_node(x, iff_ctrl); register_new_node(x, iff_ctrl);
_igvn.hash_delete(iff); _igvn.replace_input_of(iff, 1, x);
iff->set_req(1, x);
_igvn._worklist.push(iff);
} }
_igvn.remove_dead_node( bol ); _igvn.remove_dead_node( bol );
--i; --i;
@ -151,9 +149,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
assert( bol->in(1) == n, "" ); assert( bol->in(1) == n, "" );
Node *x = n->clone(); Node *x = n->clone();
register_new_node(x, get_ctrl(bol)); register_new_node(x, get_ctrl(bol));
_igvn.hash_delete(bol); _igvn.replace_input_of(bol, 1, x);
bol->set_req(1, x);
_igvn._worklist.push(bol);
} }
_igvn.remove_dead_node( n ); _igvn.remove_dead_node( n );
@ -387,9 +383,7 @@ void PhaseIdealLoop::handle_use( Node *use, Node *def, small_cache *cache, Node
if( use->in(i) == def ) if( use->in(i) == def )
break; break;
assert( i < use->req(), "def should be among use's inputs" ); assert( i < use->req(), "def should be among use's inputs" );
_igvn.hash_delete(use); _igvn.replace_input_of(use, i, new_def);
use->set_req(i, new_def);
_igvn._worklist.push(use);
} }
//------------------------------do_split_if------------------------------------ //------------------------------do_split_if------------------------------------

View file

@ -702,12 +702,84 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
return TypeInt::CC; return TypeInt::CC;
} }
static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
// Return the klass node for
// LoadP(AddP(foo:Klass, #java_mirror))
// or NULL if not matching.
if (n->Opcode() != Op_LoadP) return NULL;
const TypeInstPtr* tp = phase->type(n)->isa_instptr();
if (!tp || tp->klass() != phase->C->env()->Class_klass()) return NULL;
Node* adr = n->in(MemNode::Address);
intptr_t off = 0;
Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
if (k == NULL) return NULL;
const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr();
if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return NULL;
// We've found the klass node of a Java mirror load.
return k;
}
static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) {
// for ConP(Foo.class) return ConP(Foo.klass)
// otherwise return NULL
if (!n->is_Con()) return NULL;
const TypeInstPtr* tp = phase->type(n)->isa_instptr();
if (!tp) return NULL;
ciType* mirror_type = tp->java_mirror_type();
// TypeInstPtr::java_mirror_type() returns non-NULL for compile-
// time Class constants only.
if (!mirror_type) return NULL;
// x.getClass() == int.class can never be true (for all primitive types)
// Return a ConP(NULL) node for this case.
if (mirror_type->is_classless()) {
return phase->makecon(TypePtr::NULL_PTR);
}
// return the ConP(Foo.klass)
assert(mirror_type->is_klass(), "mirror_type should represent a klassOop");
return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass()));
}
//------------------------------Ideal------------------------------------------ //------------------------------Ideal------------------------------------------
// Check for the case of comparing an unknown klass loaded from the primary // Normalize comparisons between Java mirror loads to compare the klass instead.
//
// Also check for the case of comparing an unknown klass loaded from the primary
// super-type array vs a known klass with no subtypes. This amounts to // super-type array vs a known klass with no subtypes. This amounts to
// checking to see an unknown klass subtypes a known klass with no subtypes; // checking to see an unknown klass subtypes a known klass with no subtypes;
// this only happens on an exact match. We can shorten this test by 1 load. // this only happens on an exact match. We can shorten this test by 1 load.
Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
// Normalize comparisons between Java mirrors into comparisons of the low-
// level klass, where a dependent load could be shortened.
//
// The new pattern has a nice effect of matching the same pattern used in the
// fast path of instanceof/checkcast/Class.isInstance(), which allows
// redundant exact type check be optimized away by GVN.
// For example, in
// if (x.getClass() == Foo.class) {
// Foo foo = (Foo) x;
// // ... use a ...
// }
// a CmpPNode could be shared between if_acmpne and checkcast
{
Node* k1 = isa_java_mirror_load(phase, in(1));
Node* k2 = isa_java_mirror_load(phase, in(2));
Node* conk2 = isa_const_java_mirror(phase, in(2));
if (k1 && (k2 || conk2)) {
Node* lhs = k1;
Node* rhs = (k2 != NULL) ? k2 : conk2;
this->set_req(1, lhs);
this->set_req(2, rhs);
return this;
}
}
// Constant pointer on right? // Constant pointer on right?
const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr(); const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr();
if (t2 == NULL || !t2->klass_is_exact()) if (t2 == NULL || !t2->klass_is_exact())

View file

@ -944,7 +944,7 @@ void SuperWord::schedule() {
void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip, void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
Node *uip, Unique_Node_List &sched_before) { Node *uip, Unique_Node_List &sched_before) {
Node* my_mem = current->in(MemNode::Memory); Node* my_mem = current->in(MemNode::Memory);
_igvn.hash_delete(current); _igvn.rehash_node_delayed(current);
_igvn.hash_delete(my_mem); _igvn.hash_delete(my_mem);
//remove current_store from its current position in the memmory graph //remove current_store from its current position in the memmory graph
@ -952,7 +952,7 @@ void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
Node* use = current->out(i); Node* use = current->out(i);
if (use->is_Mem()) { if (use->is_Mem()) {
assert(use->in(MemNode::Memory) == current, "must be"); assert(use->in(MemNode::Memory) == current, "must be");
_igvn.hash_delete(use); _igvn.rehash_node_delayed(use);
if (use == prev) { // connect prev to my_mem if (use == prev) { // connect prev to my_mem
use->set_req(MemNode::Memory, my_mem); use->set_req(MemNode::Memory, my_mem);
} else if (sched_before.member(use)) { } else if (sched_before.member(use)) {
@ -962,7 +962,6 @@ void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
_igvn.hash_delete(lip); _igvn.hash_delete(lip);
use->set_req(MemNode::Memory, lip); use->set_req(MemNode::Memory, lip);
} }
_igvn._worklist.push(use);
--i; //deleted this edge; rescan position --i; //deleted this edge; rescan position
} }
} }
@ -976,25 +975,20 @@ void SuperWord::remove_and_insert(MemNode *current, MemNode *prev, MemNode *lip,
Node* use = insert_pt->out(i); Node* use = insert_pt->out(i);
if (use->is_Mem()) { if (use->is_Mem()) {
assert(use->in(MemNode::Memory) == insert_pt, "must be"); assert(use->in(MemNode::Memory) == insert_pt, "must be");
_igvn.hash_delete(use); _igvn.replace_input_of(use, MemNode::Memory, current);
use->set_req(MemNode::Memory, current);
_igvn._worklist.push(use);
--i; //deleted this edge; rescan position --i; //deleted this edge; rescan position
} else if (!sched_up && use->is_Phi() && use->bottom_type() == Type::MEMORY) { } else if (!sched_up && use->is_Phi() && use->bottom_type() == Type::MEMORY) {
uint pos; //lip (lower insert point) must be the last one in the memory slice uint pos; //lip (lower insert point) must be the last one in the memory slice
_igvn.hash_delete(use);
for (pos=1; pos < use->req(); pos++) { for (pos=1; pos < use->req(); pos++) {
if (use->in(pos) == insert_pt) break; if (use->in(pos) == insert_pt) break;
} }
use->set_req(pos, current); _igvn.replace_input_of(use, pos, current);
_igvn._worklist.push(use);
--i; --i;
} }
} }
//connect current to insert_pt //connect current to insert_pt
current->set_req(MemNode::Memory, insert_pt); current->set_req(MemNode::Memory, insert_pt);
_igvn._worklist.push(current);
} }
//------------------------------co_locate_pack---------------------------------- //------------------------------co_locate_pack----------------------------------
@ -1077,15 +1071,13 @@ void SuperWord::co_locate_pack(Node_List* pk) {
Node* use = current->out(i); Node* use = current->out(i);
if (use->is_Mem() && use != previous) { if (use->is_Mem() && use != previous) {
assert(use->in(MemNode::Memory) == current, "must be"); assert(use->in(MemNode::Memory) == current, "must be");
_igvn.hash_delete(use);
if (schedule_before_pack.member(use)) { if (schedule_before_pack.member(use)) {
_igvn.hash_delete(upper_insert_pt); _igvn.hash_delete(upper_insert_pt);
use->set_req(MemNode::Memory, upper_insert_pt); _igvn.replace_input_of(use, MemNode::Memory, upper_insert_pt);
} else { } else {
_igvn.hash_delete(lower_insert_pt); _igvn.hash_delete(lower_insert_pt);
use->set_req(MemNode::Memory, lower_insert_pt); _igvn.replace_input_of(use, MemNode::Memory, lower_insert_pt);
} }
_igvn._worklist.push(use);
--i; // deleted this edge; rescan position --i; // deleted this edge; rescan position
} }
} }
@ -1122,9 +1114,7 @@ void SuperWord::co_locate_pack(Node_List* pk) {
// Give each load the same memory state // Give each load the same memory state
for (uint i = 0; i < pk->size(); i++) { for (uint i = 0; i < pk->size(); i++) {
LoadNode* ld = pk->at(i)->as_Load(); LoadNode* ld = pk->at(i)->as_Load();
_igvn.hash_delete(ld); _igvn.replace_input_of(ld, MemNode::Memory, mem_input);
ld->set_req(MemNode::Memory, mem_input);
_igvn._worklist.push(ld);
} }
} }
} }
@ -1282,16 +1272,14 @@ void SuperWord::insert_extracts(Node_List* p) {
// Insert extract operation // Insert extract operation
_igvn.hash_delete(def); _igvn.hash_delete(def);
_igvn.hash_delete(use);
int def_pos = alignment(def) / data_size(def); int def_pos = alignment(def) / data_size(def);
const Type* def_t = velt_type(def); const Type* def_t = velt_type(def);
Node* ex = ExtractNode::make(_phase->C, def, def_pos, def_t); Node* ex = ExtractNode::make(_phase->C, def, def_pos, def_t);
_phase->_igvn.register_new_node_with_optimizer(ex); _phase->_igvn.register_new_node_with_optimizer(ex);
_phase->set_ctrl(ex, _phase->get_ctrl(def)); _phase->set_ctrl(ex, _phase->get_ctrl(def));
use->set_req(idx, ex); _igvn.replace_input_of(use, idx, ex);
_igvn._worklist.push(def); _igvn._worklist.push(def);
_igvn._worklist.push(use);
bb_insert_after(ex, bb_idx(def)); bb_insert_after(ex, bb_idx(def));
set_velt_type(ex, def_t); set_velt_type(ex, def_t);

View file

@ -634,7 +634,7 @@ JNIEXPORT jobject JNICALL
JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused); JVM_AssertionStatusDirectives(JNIEnv *env, jclass unused);
/* /*
* sun.misc.AtomicLong * java.util.concurrent.atomic.AtomicLong
*/ */
JNIEXPORT jboolean JNICALL JNIEXPORT jboolean JNICALL
JVM_SupportsCX8(void); JVM_SupportsCX8(void);

View file

@ -631,9 +631,6 @@ class CommandLineFlags {
develop(bool, InlineClassNatives, true, \ develop(bool, InlineClassNatives, true, \
"inline Class.isInstance, etc") \ "inline Class.isInstance, etc") \
\ \
develop(bool, InlineAtomicLong, true, \
"inline sun.misc.AtomicLong") \
\
develop(bool, InlineThreadNatives, true, \ develop(bool, InlineThreadNatives, true, \
"inline Thread.currentThread, etc") \ "inline Thread.currentThread, etc") \
\ \

View file

@ -1875,7 +1875,6 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_c2_type(StoreNNode, StoreNode) \ declare_c2_type(StoreNNode, StoreNode) \
declare_c2_type(StoreCMNode, StoreNode) \ declare_c2_type(StoreCMNode, StoreNode) \
declare_c2_type(LoadPLockedNode, LoadPNode) \ declare_c2_type(LoadPLockedNode, LoadPNode) \
declare_c2_type(LoadLLockedNode, LoadLNode) \
declare_c2_type(SCMemProjNode, ProjNode) \ declare_c2_type(SCMemProjNode, ProjNode) \
declare_c2_type(LoadStoreNode, Node) \ declare_c2_type(LoadStoreNode, Node) \
declare_c2_type(StorePConditionalNode, LoadStoreNode) \ declare_c2_type(StorePConditionalNode, LoadStoreNode) \

View file

@ -0,0 +1,111 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 6732154
* @summary REG: Printing an Image using image/gif doc flavor crashes the VM, Solsparc
*
* @run main/othervm -Xcomp -XX:CompileOnly="Test6732154::ascii85Encode" Test6732154
*/
public class Test6732154 {
// Exact copy of sun.print.PSPrinterJob.ascii85Encode([b)[b
private byte[] ascii85Encode(byte[] inArr) {
byte[] outArr = new byte[((inArr.length+4) * 5 / 4) + 2];
long p1 = 85;
long p2 = p1*p1;
long p3 = p1*p2;
long p4 = p1*p3;
byte pling = '!';
int i = 0;
int olen = 0;
long val, rem;
while (i+3 < inArr.length) {
val = ((long)((inArr[i++]&0xff))<<24) +
((long)((inArr[i++]&0xff))<<16) +
((long)((inArr[i++]&0xff))<< 8) +
((long)(inArr[i++]&0xff));
if (val == 0) {
outArr[olen++] = 'z';
} else {
rem = val;
outArr[olen++] = (byte)(rem / p4 + pling); rem = rem % p4;
outArr[olen++] = (byte)(rem / p3 + pling); rem = rem % p3;
outArr[olen++] = (byte)(rem / p2 + pling); rem = rem % p2;
outArr[olen++] = (byte)(rem / p1 + pling); rem = rem % p1;
outArr[olen++] = (byte)(rem + pling);
}
}
// input not a multiple of 4 bytes, write partial output.
if (i < inArr.length) {
int n = inArr.length - i; // n bytes remain to be written
val = 0;
while (i < inArr.length) {
val = (val << 8) + (inArr[i++]&0xff);
}
int append = 4 - n;
while (append-- > 0) {
val = val << 8;
}
byte []c = new byte[5];
rem = val;
c[0] = (byte)(rem / p4 + pling); rem = rem % p4;
c[1] = (byte)(rem / p3 + pling); rem = rem % p3;
c[2] = (byte)(rem / p2 + pling); rem = rem % p2;
c[3] = (byte)(rem / p1 + pling); rem = rem % p1;
c[4] = (byte)(rem + pling);
for (int b = 0; b < n+1 ; b++) {
outArr[olen++] = c[b];
}
}
// write EOD marker.
outArr[olen++]='~'; outArr[olen++]='>';
/* The original intention was to insert a newline after every 78 bytes.
* This was mainly intended for legibility but I decided against this
* partially because of the (small) amount of extra space, and
* partially because for line breaks either would have to hardwire
* ascii 10 (newline) or calculate space in bytes to allocate for
* the platform's newline byte sequence. Also need to be careful
* about where its inserted:
* Ascii 85 decoder ignores white space except for one special case:
* you must ensure you do not split the EOD marker across lines.
*/
byte[] retArr = new byte[olen];
System.arraycopy(outArr, 0, retArr, 0, olen);
return retArr;
}
public static void main(String[] args) {
new Test6732154().ascii85Encode(new byte[0]);
System.out.println("Test passed.");
}
}

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @bug 7169782
* @summary C2: SIGSEGV in LShiftLNode::Ideal(PhaseGVN*, bool)
*
* @run main/othervm -Xcomp -XX:CompileOnly="Test7169782::<clinit>" Test7169782
*/
public class Test7169782 {
static long var_8;
static {
var_8 /= (long)(1E100 + ("".startsWith("a", 0) ? 1 : 2));
}
public static void main(String[] args) {
System.out.println("Test passed.");
}
}