mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-25 13:54:38 +02:00
Merge
This commit is contained in:
commit
84f5de835b
43 changed files with 693 additions and 247 deletions
|
@ -150,6 +150,7 @@ jprt.build.targets= \
|
|||
|
||||
jprt.my.solaris.sparc.test.targets= \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \
|
||||
${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \
|
||||
${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \
|
||||
|
@ -168,6 +169,7 @@ jprt.my.solaris.sparc.test.targets= \
|
|||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_default, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \
|
||||
${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_CMS, \
|
||||
|
@ -176,6 +178,7 @@ jprt.my.solaris.sparc.test.targets= \
|
|||
|
||||
jprt.my.solaris.sparcv9.test.targets= \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.solaris.sparcv9}-product-c2-runThese, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default, \
|
||||
|
@ -193,6 +196,7 @@ jprt.my.solaris.sparcv9.test.targets= \
|
|||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_CMS, \
|
||||
|
@ -201,6 +205,7 @@ jprt.my.solaris.sparcv9.test.targets= \
|
|||
|
||||
jprt.my.solaris.x64.test.targets= \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.solaris.x64}-product-c2-runThese, \
|
||||
${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \
|
||||
|
@ -219,6 +224,7 @@ jprt.my.solaris.x64.test.targets= \
|
|||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
|
||||
|
@ -227,6 +233,7 @@ jprt.my.solaris.x64.test.targets= \
|
|||
|
||||
jprt.my.solaris.i586.test.targets= \
|
||||
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
${jprt.my.solaris.i586}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.solaris.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
||||
${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \
|
||||
${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \
|
||||
|
@ -253,6 +260,7 @@ jprt.my.solaris.i586.test.targets= \
|
|||
${jprt.my.solaris.i586}-product-c1-GCOld_G1, \
|
||||
${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \
|
||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_default, \
|
||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_tiered, \
|
||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \
|
||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \
|
||||
${jprt.my.solaris.i586}-fastdebug-c2-jbb_G1, \
|
||||
|
@ -260,6 +268,7 @@ jprt.my.solaris.i586.test.targets= \
|
|||
|
||||
jprt.my.linux.i586.test.targets = \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
||||
${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \
|
||||
${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \
|
||||
|
@ -279,6 +288,7 @@ jprt.my.linux.i586.test.targets = \
|
|||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
|
||||
${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_default, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
|
||||
${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_G1, \
|
||||
|
@ -286,6 +296,7 @@ jprt.my.linux.i586.test.targets = \
|
|||
|
||||
jprt.my.linux.x64.test.targets = \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_default, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
|
||||
|
@ -302,12 +313,14 @@ jprt.my.linux.x64.test.targets = \
|
|||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
|
||||
${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParOldGC
|
||||
|
||||
jprt.my.windows.i586.test.targets = \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jvm98, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-scimark, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \
|
||||
|
@ -327,6 +340,7 @@ jprt.my.windows.i586.test.targets = \
|
|||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_G1, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParOldGC, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-jbb_default, \
|
||||
${jprt.my.windows.i586}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_ParallelGC, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_CMS, \
|
||||
${jprt.my.windows.i586}-product-{c1|c2}-jbb_G1, \
|
||||
|
@ -334,6 +348,7 @@ jprt.my.windows.i586.test.targets = \
|
|||
|
||||
jprt.my.windows.x64.test.targets = \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jvm98_tiered, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
|
||||
${jprt.my.windows.x64}-product-c2-runThese, \
|
||||
${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \
|
||||
|
@ -351,6 +366,7 @@ jprt.my.windows.x64.test.targets = \
|
|||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_G1, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default, \
|
||||
${jprt.my.windows.x64}-{product|fastdebug}-c2-jbb_default_tiered, \
|
||||
${jprt.my.windows.x64}-product-c2-jbb_CMS, \
|
||||
${jprt.my.windows.x64}-product-c2-jbb_ParallelGC, \
|
||||
${jprt.my.windows.x64}-product-c2-jbb_G1, \
|
||||
|
|
|
@ -61,7 +61,7 @@ include $(GAMMADIR)/make/scm.make
|
|||
QUIETLY$(MAKE_VERBOSE) = @
|
||||
|
||||
# For now, until the compiler is less wobbly:
|
||||
TESTFLAGS = -Xbatch -showversion
|
||||
TESTFLAGS = -Xbatch -Xmx32m -showversion
|
||||
|
||||
### maye ARCH_XXX instead?
|
||||
ifdef USE_GCC
|
||||
|
|
|
@ -119,6 +119,10 @@ else
|
|||
LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle
|
||||
endif # sparcWorks
|
||||
|
||||
ifeq ("${Platform_arch}", "sparc")
|
||||
LIBS += -lkstat
|
||||
endif
|
||||
|
||||
# By default, link the *.o into the library, not the executable.
|
||||
LINK_INTO$(LINK_INTO) = LIBJVM
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "assembler_sparc.inline.hpp"
|
||||
#include "gc_interface/collectedHeap.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
|
@ -1327,37 +1328,38 @@ void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d)
|
|||
}
|
||||
|
||||
|
||||
int MacroAssembler::size_of_sethi(address a, bool worst_case) {
|
||||
int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
|
||||
#ifdef _LP64
|
||||
if (worst_case) return 7;
|
||||
intptr_t iaddr = (intptr_t)a;
|
||||
int hi32 = (int)(iaddr >> 32);
|
||||
int lo32 = (int)(iaddr);
|
||||
int inst_count;
|
||||
if (hi32 == 0 && lo32 >= 0)
|
||||
inst_count = 1;
|
||||
else if (hi32 == -1)
|
||||
inst_count = 2;
|
||||
if (worst_case) return 7;
|
||||
intptr_t iaddr = (intptr_t) a;
|
||||
int msb32 = (int) (iaddr >> 32);
|
||||
int lsb32 = (int) (iaddr);
|
||||
int count;
|
||||
if (msb32 == 0 && lsb32 >= 0)
|
||||
count = 1;
|
||||
else if (msb32 == -1)
|
||||
count = 2;
|
||||
else {
|
||||
inst_count = 2;
|
||||
if ( hi32 & 0x3ff )
|
||||
inst_count++;
|
||||
if ( lo32 & 0xFFFFFC00 ) {
|
||||
if( (lo32 >> 20) & 0xfff ) inst_count += 2;
|
||||
if( (lo32 >> 10) & 0x3ff ) inst_count += 2;
|
||||
count = 2;
|
||||
if (msb32 & 0x3ff)
|
||||
count++;
|
||||
if (lsb32 & 0xFFFFFC00 ) {
|
||||
if ((lsb32 >> 20) & 0xfff) count += 2;
|
||||
if ((lsb32 >> 10) & 0x3ff) count += 2;
|
||||
}
|
||||
}
|
||||
return BytesPerInstWord * inst_count;
|
||||
return count;
|
||||
#else
|
||||
return BytesPerInstWord;
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
int MacroAssembler::worst_case_size_of_set() {
|
||||
return size_of_sethi(NULL, true) + 1;
|
||||
int MacroAssembler::worst_case_insts_for_set() {
|
||||
return insts_for_sethi(NULL, true) + 1;
|
||||
}
|
||||
|
||||
|
||||
// Keep in sync with MacroAssembler::insts_for_internal_set
|
||||
void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
|
||||
intptr_t value = addrlit.value();
|
||||
|
||||
|
@ -1379,6 +1381,23 @@ void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, boo
|
|||
}
|
||||
}
|
||||
|
||||
// Keep in sync with MacroAssembler::internal_set
|
||||
int MacroAssembler::insts_for_internal_set(intptr_t value) {
|
||||
// can optimize
|
||||
if (-4096 <= value && value <= 4095) {
|
||||
return 1;
|
||||
}
|
||||
if (inv_hi22(hi22(value)) == value) {
|
||||
return insts_for_sethi((address) value);
|
||||
}
|
||||
int count = insts_for_sethi((address) value);
|
||||
AddressLiteral al(value);
|
||||
if (al.low10() != 0) {
|
||||
count++;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
void MacroAssembler::set(const AddressLiteral& al, Register d) {
|
||||
internal_set(al, d, false);
|
||||
}
|
||||
|
@ -1443,11 +1462,11 @@ void MacroAssembler::set64(jlong value, Register d, Register tmp) {
|
|||
}
|
||||
}
|
||||
|
||||
int MacroAssembler::size_of_set64(jlong value) {
|
||||
int MacroAssembler::insts_for_set64(jlong value) {
|
||||
v9_dep();
|
||||
|
||||
int hi = (int)(value >> 32);
|
||||
int lo = (int)(value & ~0);
|
||||
int hi = (int) (value >> 32);
|
||||
int lo = (int) (value & ~0);
|
||||
int count = 0;
|
||||
|
||||
// (Matcher::isSimpleConstant64 knows about the following optimizations.)
|
||||
|
|
|
@ -1884,23 +1884,24 @@ public:
|
|||
void sethi(const AddressLiteral& addrlit, Register d);
|
||||
void patchable_sethi(const AddressLiteral& addrlit, Register d);
|
||||
|
||||
// compute the size of a sethi/set
|
||||
static int size_of_sethi( address a, bool worst_case = false );
|
||||
static int worst_case_size_of_set();
|
||||
// compute the number of instructions for a sethi/set
|
||||
static int insts_for_sethi( address a, bool worst_case = false );
|
||||
static int worst_case_insts_for_set();
|
||||
|
||||
// set may be either setsw or setuw (high 32 bits may be zero or sign)
|
||||
private:
|
||||
void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
|
||||
static int insts_for_internal_set(intptr_t value);
|
||||
public:
|
||||
void set(const AddressLiteral& addrlit, Register d);
|
||||
void set(intptr_t value, Register d);
|
||||
void set(address addr, Register d, RelocationHolder const& rspec);
|
||||
static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); }
|
||||
|
||||
void patchable_set(const AddressLiteral& addrlit, Register d);
|
||||
void patchable_set(intptr_t value, Register d);
|
||||
void set64(jlong value, Register d, Register tmp);
|
||||
|
||||
// Compute size of set64.
|
||||
static int size_of_set64(jlong value);
|
||||
static int insts_for_set64(jlong value);
|
||||
|
||||
// sign-extend 32 to 64
|
||||
inline void signx( Register s, Register d ) { sra( s, G0, d); }
|
||||
|
|
|
@ -1705,8 +1705,7 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
|||
}
|
||||
|
||||
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
|
||||
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
|
||||
Assembler::Condition acond;
|
||||
switch (condition) {
|
||||
case lir_cond_equal: acond = Assembler::equal; break;
|
||||
|
@ -1737,7 +1736,12 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
|
|||
ShouldNotReachHere();
|
||||
}
|
||||
Label skip;
|
||||
__ br(acond, false, Assembler::pt, skip);
|
||||
#ifdef _LP64
|
||||
if (type == T_INT) {
|
||||
__ br(acond, false, Assembler::pt, skip);
|
||||
} else
|
||||
#endif
|
||||
__ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
|
||||
if (opr1->is_constant() && opr1->type() == T_INT) {
|
||||
Register dest = result->as_register();
|
||||
if (Assembler::is_simm13(opr1->as_jint())) {
|
||||
|
@ -2688,6 +2692,11 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
|||
#ifdef _LP64
|
||||
__ mov(cmp_value_lo, t1);
|
||||
__ mov(new_value_lo, t2);
|
||||
// perform the compare and swap operation
|
||||
__ casx(addr, t1, t2);
|
||||
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
|
||||
// overwritten with the original value in "addr" and will be equal to t1.
|
||||
__ cmp(t1, t2);
|
||||
#else
|
||||
// move high and low halves of long values into single registers
|
||||
__ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
|
||||
|
@ -2696,13 +2705,15 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
|||
__ sllx(new_value_hi, 32, t2);
|
||||
__ srl(new_value_lo, 0, new_value_lo);
|
||||
__ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
|
||||
#endif
|
||||
// perform the compare and swap operation
|
||||
__ casx(addr, t1, t2);
|
||||
// generate condition code - if the swap succeeded, t2 ("new value" reg) was
|
||||
// overwritten with the original value in "addr" and will be equal to t1.
|
||||
__ cmp(t1, t2);
|
||||
|
||||
// Produce icc flag for 32bit.
|
||||
__ sub(t1, t2, t2);
|
||||
__ srlx(t2, 32, t1);
|
||||
__ orcc(t2, t1, G0);
|
||||
#endif
|
||||
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
|
||||
Register addr = op->addr()->as_pointer_register();
|
||||
Register cmp_value = op->cmp_value()->as_register();
|
||||
|
|
|
@ -662,7 +662,7 @@ void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
|
|||
|
||||
// generate conditional move of boolean result
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
|
||||
}
|
||||
|
||||
|
||||
|
@ -699,10 +699,10 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
|||
else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
// generate conditional move of boolean result
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
|
||||
result, as_BasicType(type));
|
||||
if (type == objectType) { // Write-barrier needed for Object fields.
|
||||
// Precise card mark since could either be object or array
|
||||
post_barrier(addr, val.result());
|
||||
|
|
|
@ -395,18 +395,23 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
|
|||
//
|
||||
// Generate an "entry" field for a method handle.
|
||||
// This determines how the method handle will respond to calls.
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) {
|
||||
// Here is the register state during an interpreted call,
|
||||
// as set up by generate_method_handle_interpreter_entry():
|
||||
// - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
|
||||
// - G3: receiver method handle
|
||||
// - O5_savedSP: sender SP (must preserve)
|
||||
|
||||
Register O0_argslot = O0;
|
||||
Register O1_scratch = O1;
|
||||
Register O2_scratch = O2;
|
||||
Register O3_scratch = O3;
|
||||
Register G5_index = G5;
|
||||
const Register O0_argslot = O0;
|
||||
const Register O1_scratch = O1;
|
||||
const Register O2_scratch = O2;
|
||||
const Register O3_scratch = O3;
|
||||
const Register G5_index = G5;
|
||||
|
||||
// Argument registers for _raise_exception.
|
||||
const Register O0_code = O0;
|
||||
const Register O1_actual = O1;
|
||||
const Register O2_required = O2;
|
||||
|
||||
guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
|
||||
|
||||
|
@ -439,48 +444,36 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
case _raise_exception:
|
||||
{
|
||||
// Not a real MH entry, but rather shared code for raising an
|
||||
// exception. Extra local arguments are passed in scratch
|
||||
// registers, as required type in O3, failing object (or NULL)
|
||||
// in O2, failing bytecode type in O1.
|
||||
// exception. Since we use a C2I adapter to set up the
|
||||
// interpreter state, arguments are expected in compiler
|
||||
// argument registers.
|
||||
methodHandle mh(raise_exception_method());
|
||||
address c2i_entry = methodOopDesc::make_adapters(mh, CATCH);
|
||||
|
||||
__ mov(O5_savedSP, SP); // Cut the stack back to where the caller started.
|
||||
|
||||
// Push arguments as if coming from the interpreter.
|
||||
Register O0_scratch = O0_argslot;
|
||||
int stackElementSize = Interpreter::stackElementSize;
|
||||
|
||||
// Make space on the stack for the arguments and set Gargs
|
||||
// correctly.
|
||||
__ sub(SP, 4*stackElementSize, SP); // Keep stack aligned.
|
||||
__ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs);
|
||||
|
||||
// void raiseException(int code, Object actual, Object required)
|
||||
__ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code
|
||||
__ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize)); // actual
|
||||
__ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize)); // required
|
||||
|
||||
Label no_method;
|
||||
Label L_no_method;
|
||||
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
|
||||
__ set(AddressLiteral((address) &_raise_exception_method), G5_method);
|
||||
__ ld_ptr(Address(G5_method, 0), G5_method);
|
||||
__ tst(G5_method);
|
||||
__ brx(Assembler::zero, false, Assembler::pn, no_method);
|
||||
__ brx(Assembler::zero, false, Assembler::pn, L_no_method);
|
||||
__ delayed()->nop();
|
||||
|
||||
int jobject_oop_offset = 0;
|
||||
const int jobject_oop_offset = 0;
|
||||
__ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
|
||||
__ tst(G5_method);
|
||||
__ brx(Assembler::zero, false, Assembler::pn, no_method);
|
||||
__ brx(Assembler::zero, false, Assembler::pn, L_no_method);
|
||||
__ delayed()->nop();
|
||||
|
||||
__ verify_oop(G5_method);
|
||||
__ jump_indirect_to(G5_method_fie, O1_scratch);
|
||||
__ jump_to(AddressLiteral(c2i_entry), O3_scratch);
|
||||
__ delayed()->nop();
|
||||
|
||||
// If we get here, the Java runtime did not do its job of creating the exception.
|
||||
// Do something that is at least causes a valid throw from the interpreter.
|
||||
__ bind(no_method);
|
||||
__ unimplemented("_raise_exception no method");
|
||||
__ bind(L_no_method);
|
||||
__ unimplemented("call throw_WrongMethodType_entry");
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -570,10 +563,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
// Throw an exception.
|
||||
// For historical reasons, it will be IncompatibleClassChangeError.
|
||||
__ unimplemented("not tested yet");
|
||||
__ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch); // required interface
|
||||
__ mov(O0_klass, O2_scratch); // bad receiver
|
||||
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
|
||||
__ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch); // who is complaining?
|
||||
__ ld_ptr(Address(O1_intf, java_mirror_offset), O2_required); // required interface
|
||||
__ mov( O0_klass, O1_actual); // bad receiver
|
||||
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
|
||||
__ delayed()->mov(Bytecodes::_invokeinterface, O0_code); // who is complaining?
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -663,11 +656,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
__ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
|
||||
|
||||
// If we get here, the type check failed!
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot); // reload argslot field
|
||||
__ load_heap_oop(G3_amh_argument, O3_scratch); // required class
|
||||
__ ld_ptr(vmarg, O2_scratch); // bad object
|
||||
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
|
||||
__ delayed()->mov(Bytecodes::_checkcast, O1_scratch); // who is complaining?
|
||||
__ load_heap_oop(G3_amh_argument, O2_required); // required class
|
||||
__ ld_ptr( vmarg, O1_actual); // bad object
|
||||
__ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O3_scratch);
|
||||
__ delayed()->mov(Bytecodes::_checkcast, O0_code); // who is complaining?
|
||||
|
||||
__ bind(done);
|
||||
// Get the new MH:
|
||||
|
|
|
@ -1086,9 +1086,9 @@ void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
|
|||
uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
|
||||
if (UseRDPCForConstantTableBase) {
|
||||
// This is really the worst case but generally it's only 1 instruction.
|
||||
return 4 /*rdpc*/ + 4 /*sub*/ + MacroAssembler::worst_case_size_of_set();
|
||||
return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord;
|
||||
} else {
|
||||
return MacroAssembler::worst_case_size_of_set();
|
||||
return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1240,7 +1240,7 @@ const Pipeline * MachEpilogNode::pipeline() const {
|
|||
|
||||
int MachEpilogNode::safepoint_offset() const {
|
||||
assert( do_polling(), "no return for this epilog node");
|
||||
return MacroAssembler::size_of_sethi(os::get_polling_page());
|
||||
return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
@ -3553,9 +3553,10 @@ operand immP() %{
|
|||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Pointer Immediate: 32 or 64-bit
|
||||
#ifdef _LP64
|
||||
// Pointer Immediate: 64-bit
|
||||
operand immP_set() %{
|
||||
predicate(!VM_Version::is_niagara1_plus());
|
||||
predicate(!VM_Version::is_niagara_plus());
|
||||
match(ConP);
|
||||
|
||||
op_cost(5);
|
||||
|
@ -3564,10 +3565,10 @@ operand immP_set() %{
|
|||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Pointer Immediate: 32 or 64-bit
|
||||
// Pointer Immediate: 64-bit
|
||||
// From Niagara2 processors on a load should be better than materializing.
|
||||
operand immP_load() %{
|
||||
predicate(VM_Version::is_niagara1_plus());
|
||||
predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
|
||||
match(ConP);
|
||||
|
||||
op_cost(5);
|
||||
|
@ -3576,6 +3577,18 @@ operand immP_load() %{
|
|||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Pointer Immediate: 64-bit
|
||||
operand immP_no_oop_cheap() %{
|
||||
predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
|
||||
match(ConP);
|
||||
|
||||
op_cost(5);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
#endif
|
||||
|
||||
operand immP13() %{
|
||||
predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
|
||||
match(ConP);
|
||||
|
@ -3673,7 +3686,7 @@ operand immL_32bits() %{
|
|||
|
||||
// Long Immediate: cheap (materialize in <= 3 instructions)
|
||||
operand immL_cheap() %{
|
||||
predicate(!VM_Version::is_niagara1_plus() || MacroAssembler::size_of_set64(n->get_long()) <= 3);
|
||||
predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
|
@ -3683,7 +3696,7 @@ operand immL_cheap() %{
|
|||
|
||||
// Long Immediate: expensive (materialize in > 3 instructions)
|
||||
operand immL_expensive() %{
|
||||
predicate(VM_Version::is_niagara1_plus() && MacroAssembler::size_of_set64(n->get_long()) > 3);
|
||||
predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
|
@ -6094,8 +6107,18 @@ instruct loadConP_load(iRegP dst, immP_load con) %{
|
|||
ins_cost(MEMORY_REF_COST);
|
||||
format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %}
|
||||
ins_encode %{
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
|
||||
__ ld_ptr($constanttablebase, con_offset, $dst$$Register);
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
|
||||
__ ld_ptr($constanttablebase, con_offset, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(loadConP);
|
||||
%}
|
||||
|
||||
instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(DEFAULT_COST * 3/2);
|
||||
format %{ "SET $con,$dst\t! non-oop ptr" %}
|
||||
ins_encode %{
|
||||
__ set($con$$constant, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(loadConP);
|
||||
%}
|
||||
|
|
|
@ -38,12 +38,6 @@
|
|||
int VM_Version::_features = VM_Version::unknown_m;
|
||||
const char* VM_Version::_features_str = "";
|
||||
|
||||
bool VM_Version::is_niagara1_plus() {
|
||||
// This is a placeholder until the real test is determined.
|
||||
return is_niagara1() &&
|
||||
(os::processor_count() > maximum_niagara1_processor_count());
|
||||
}
|
||||
|
||||
void VM_Version::initialize() {
|
||||
_features = determine_features();
|
||||
PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes();
|
||||
|
@ -69,11 +63,21 @@ void VM_Version::initialize() {
|
|||
|
||||
_supports_cx8 = has_v9();
|
||||
|
||||
if (is_niagara1()) {
|
||||
if (is_niagara()) {
|
||||
// Indirect branch is the same cost as direct
|
||||
if (FLAG_IS_DEFAULT(UseInlineCaches)) {
|
||||
FLAG_SET_DEFAULT(UseInlineCaches, false);
|
||||
}
|
||||
// Align loops on a single instruction boundary.
|
||||
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
|
||||
FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
|
||||
}
|
||||
// When using CMS, we cannot use memset() in BOT updates because
|
||||
// the sun4v/CMT version in libc_psr uses BIS which exposes
|
||||
// "phantom zeros" to concurrent readers. See 6948537.
|
||||
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && UseConcMarkSweepGC) {
|
||||
FLAG_SET_DEFAULT(UseMemSetInBOT, false);
|
||||
}
|
||||
#ifdef _LP64
|
||||
// 32-bit oops don't make sense for the 64-bit VM on sparc
|
||||
// since the 32-bit VM has the same registers and smaller objects.
|
||||
|
@ -89,7 +93,7 @@ void VM_Version::initialize() {
|
|||
if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) {
|
||||
FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
|
||||
}
|
||||
if (is_niagara1_plus()) {
|
||||
if (is_niagara_plus()) {
|
||||
if (has_blk_init() && AllocatePrefetchStyle > 0 &&
|
||||
FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
|
||||
// Use BIS instruction for allocation prefetch.
|
||||
|
@ -105,15 +109,6 @@ void VM_Version::initialize() {
|
|||
}
|
||||
}
|
||||
#endif
|
||||
if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
|
||||
FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
|
||||
}
|
||||
// When using CMS, we cannot use memset() in BOT updates because
|
||||
// the sun4v/CMT version in libc_psr uses BIS which exposes
|
||||
// "phantom zeros" to concurrent readers. See 6948537.
|
||||
if (FLAG_IS_DEFAULT(UseMemSetInBOT) && UseConcMarkSweepGC) {
|
||||
FLAG_SET_DEFAULT(UseMemSetInBOT, false);
|
||||
}
|
||||
}
|
||||
|
||||
// Use hardware population count instruction if available.
|
||||
|
@ -129,17 +124,18 @@ void VM_Version::initialize() {
|
|||
#endif
|
||||
|
||||
char buf[512];
|
||||
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
(has_v8() ? ", has_v8" : ""),
|
||||
(has_v9() ? ", has_v9" : ""),
|
||||
(has_hardware_popc() ? ", popc" : ""),
|
||||
(has_vis1() ? ", has_vis1" : ""),
|
||||
(has_vis2() ? ", has_vis2" : ""),
|
||||
(has_vis3() ? ", has_vis3" : ""),
|
||||
(has_blk_init() ? ", has_blk_init" : ""),
|
||||
(is_ultra3() ? ", is_ultra3" : ""),
|
||||
(is_sun4v() ? ", is_sun4v" : ""),
|
||||
(is_niagara1() ? ", is_niagara1" : ""),
|
||||
(is_niagara1_plus() ? ", is_niagara1_plus" : ""),
|
||||
(is_niagara() ? ", is_niagara" : ""),
|
||||
(is_niagara_plus() ? ", is_niagara_plus" : ""),
|
||||
(is_sparc64() ? ", is_sparc64" : ""),
|
||||
(!has_hardware_mul32() ? ", no-mul32" : ""),
|
||||
(!has_hardware_div32() ? ", no-div32" : ""),
|
||||
|
@ -190,17 +186,18 @@ int VM_Version::determine_features() {
|
|||
warning("Cannot recognize SPARC version. Default to V9");
|
||||
}
|
||||
|
||||
if (UseNiagaraInstrs) {
|
||||
if (is_niagara1(features)) {
|
||||
assert(is_T_family(features) == is_niagara(features), "Niagara should be T series");
|
||||
if (UseNiagaraInstrs) { // Force code generation for Niagara
|
||||
if (is_T_family(features)) {
|
||||
// Happy to accomodate...
|
||||
} else {
|
||||
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-Niagara");)
|
||||
features = niagara1_m;
|
||||
features |= T_family_m;
|
||||
}
|
||||
} else {
|
||||
if (is_niagara1(features) && !FLAG_IS_DEFAULT(UseNiagaraInstrs)) {
|
||||
if (is_T_family(features) && !FLAG_IS_DEFAULT(UseNiagaraInstrs)) {
|
||||
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Version is Forced-Not-Niagara");)
|
||||
features &= ~niagara1_unique_m;
|
||||
features &= ~(T_family_m | T1_model_m);
|
||||
} else {
|
||||
// Happy to accomodate...
|
||||
}
|
||||
|
@ -222,7 +219,7 @@ void VM_Version::revert() {
|
|||
|
||||
unsigned int VM_Version::calc_parallel_worker_threads() {
|
||||
unsigned int result;
|
||||
if (is_niagara1_plus()) {
|
||||
if (is_niagara_plus()) {
|
||||
result = nof_parallel_worker_threads(5, 16, 8);
|
||||
} else {
|
||||
result = nof_parallel_worker_threads(5, 8, 8);
|
||||
|
|
|
@ -41,7 +41,12 @@ protected:
|
|||
vis2_instructions = 7,
|
||||
sun4v_instructions = 8,
|
||||
blk_init_instructions = 9,
|
||||
fmaf_instructions = 10
|
||||
fmaf_instructions = 10,
|
||||
fmau_instructions = 11,
|
||||
vis3_instructions = 12,
|
||||
sparc64_family = 13,
|
||||
T_family = 14,
|
||||
T1_model = 15
|
||||
};
|
||||
|
||||
enum Feature_Flag_Set {
|
||||
|
@ -59,6 +64,11 @@ protected:
|
|||
sun4v_m = 1 << sun4v_instructions,
|
||||
blk_init_instructions_m = 1 << blk_init_instructions,
|
||||
fmaf_instructions_m = 1 << fmaf_instructions,
|
||||
fmau_instructions_m = 1 << fmau_instructions,
|
||||
vis3_instructions_m = 1 << vis3_instructions,
|
||||
sparc64_family_m = 1 << sparc64_family,
|
||||
T_family_m = 1 << T_family,
|
||||
T1_model_m = 1 << T1_model,
|
||||
|
||||
generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
|
||||
generic_v9_m = generic_v8_m | v9_instructions_m,
|
||||
|
@ -76,8 +86,13 @@ protected:
|
|||
static int determine_features();
|
||||
static int platform_features(int features);
|
||||
|
||||
static bool is_niagara1(int features) { return (features & sun4v_m) != 0; }
|
||||
static bool is_sparc64(int features) { return (features & fmaf_instructions_m) != 0; }
|
||||
// Returns true if the platform is in the niagara line (T series)
|
||||
static bool is_T_family(int features) { return (features & T_family_m) != 0; }
|
||||
static bool is_niagara() { return is_T_family(_features); }
|
||||
DEBUG_ONLY( static bool is_niagara(int features) { return (features & sun4v_m) != 0; } )
|
||||
|
||||
// Returns true if it is niagara1 (T1).
|
||||
static bool is_T1_model(int features) { return is_T_family(features) && ((features & T1_model_m) != 0); }
|
||||
|
||||
static int maximum_niagara1_processor_count() { return 32; }
|
||||
|
||||
|
@ -94,6 +109,7 @@ public:
|
|||
static bool has_hardware_popc() { return (_features & hardware_popc_m) != 0; }
|
||||
static bool has_vis1() { return (_features & vis1_instructions_m) != 0; }
|
||||
static bool has_vis2() { return (_features & vis2_instructions_m) != 0; }
|
||||
static bool has_vis3() { return (_features & vis3_instructions_m) != 0; }
|
||||
static bool has_blk_init() { return (_features & blk_init_instructions_m) != 0; }
|
||||
|
||||
static bool supports_compare_and_exchange()
|
||||
|
@ -101,14 +117,14 @@ public:
|
|||
|
||||
static bool is_ultra3() { return (_features & ultra3_m) == ultra3_m; }
|
||||
static bool is_sun4v() { return (_features & sun4v_m) != 0; }
|
||||
static bool is_niagara1() { return is_niagara1(_features); }
|
||||
// Returns true if the platform is in the niagara line and
|
||||
// newer than the niagara1.
|
||||
static bool is_niagara1_plus();
|
||||
static bool is_sparc64() { return is_sparc64(_features); }
|
||||
// Returns true if the platform is in the niagara line (T series)
|
||||
// and newer than the niagara1.
|
||||
static bool is_niagara_plus() { return is_T_family(_features) && !is_T1_model(_features); }
|
||||
// Fujitsu SPARC64
|
||||
static bool is_sparc64() { return (_features & sparc64_family_m) != 0; }
|
||||
|
||||
static bool has_fast_fxtof() { return has_v9() && !is_ultra3(); }
|
||||
static bool has_fast_idiv() { return is_niagara1_plus() || is_sparc64(); }
|
||||
static bool has_fast_fxtof() { return is_niagara() || is_sparc64() || has_v9() && !is_ultra3(); }
|
||||
static bool has_fast_idiv() { return is_niagara_plus() || is_sparc64(); }
|
||||
|
||||
static const char* cpu_features() { return _features_str; }
|
||||
|
||||
|
|
|
@ -2036,7 +2036,7 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
|||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
|
||||
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
|
||||
Assembler::Condition acond, ncond;
|
||||
switch (condition) {
|
||||
case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
|
||||
|
|
|
@ -741,7 +741,7 @@ void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
|
|||
|
||||
// generate conditional move of boolean result
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
|
||||
}
|
||||
|
||||
|
||||
|
@ -810,7 +810,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
|||
|
||||
// generate conditional move of boolean result
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
|
||||
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
|
||||
result, as_BasicType(type));
|
||||
if (type == objectType) { // Write-barrier needed for Object fields.
|
||||
// Seems to be precise
|
||||
post_barrier(addr, val.result());
|
||||
|
|
|
@ -385,9 +385,12 @@ int MethodHandles::adapter_conversion_ops_supported_mask() {
|
|||
// FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// MethodHandles::generate_method_handle_stub
|
||||
//
|
||||
// Generate an "entry" field for a method handle.
|
||||
// This determines how the method handle will respond to calls.
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
|
||||
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek, TRAPS) {
|
||||
// Here is the register state during an interpreted call,
|
||||
// as set up by generate_method_handle_interpreter_entry():
|
||||
// - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
|
||||
|
@ -396,14 +399,21 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
// - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
|
||||
// - rdx: garbage temp, can blow away
|
||||
|
||||
Register rcx_recv = rcx;
|
||||
Register rax_argslot = rax;
|
||||
Register rbx_temp = rbx;
|
||||
Register rdx_temp = rdx;
|
||||
const Register rcx_recv = rcx;
|
||||
const Register rax_argslot = rax;
|
||||
const Register rbx_temp = rbx;
|
||||
const Register rdx_temp = rdx;
|
||||
|
||||
// This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
|
||||
// and gen_c2i_adapter (from compiled calls):
|
||||
Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
|
||||
const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
|
||||
|
||||
// Argument registers for _raise_exception.
|
||||
// 32-bit: Pass first two oop/int args in registers ECX and EDX.
|
||||
const Register rarg0_code = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
|
||||
const Register rarg1_actual = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
|
||||
const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
|
||||
assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp);
|
||||
|
||||
guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
|
||||
|
||||
|
@ -437,47 +447,41 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
switch ((int) ek) {
|
||||
case _raise_exception:
|
||||
{
|
||||
// Not a real MH entry, but rather shared code for raising an exception.
|
||||
// Extra local arguments are pushed on stack, as required type at TOS+8,
|
||||
// failing object (or NULL) at TOS+4, failing bytecode type at TOS.
|
||||
// Beyond those local arguments are the PC, of course.
|
||||
Register rdx_code = rdx_temp;
|
||||
Register rcx_fail = rcx_recv;
|
||||
Register rax_want = rax_argslot;
|
||||
Register rdi_pc = rdi;
|
||||
__ pop(rdx_code); // TOS+0
|
||||
__ pop(rcx_fail); // TOS+4
|
||||
__ pop(rax_want); // TOS+8
|
||||
__ pop(rdi_pc); // caller PC
|
||||
// Not a real MH entry, but rather shared code for raising an
|
||||
// exception. Since we use a C2I adapter to set up the
|
||||
// interpreter state, arguments are expected in compiler
|
||||
// argument registers.
|
||||
methodHandle mh(raise_exception_method());
|
||||
address c2i_entry = methodOopDesc::make_adapters(mh, CHECK);
|
||||
|
||||
__ mov(rsp, rsi); // cut the stack back to where the caller started
|
||||
|
||||
// Repush the arguments as if coming from the interpreter.
|
||||
__ push(rdx_code);
|
||||
__ push(rcx_fail);
|
||||
__ push(rax_want);
|
||||
const Register rdi_pc = rax;
|
||||
__ pop(rdi_pc); // caller PC
|
||||
__ mov(rsp, saved_last_sp); // cut the stack back to where the caller started
|
||||
|
||||
Register rbx_method = rbx_temp;
|
||||
Label no_method;
|
||||
Label L_no_method;
|
||||
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
|
||||
__ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
|
||||
__ testptr(rbx_method, rbx_method);
|
||||
__ jccb(Assembler::zero, no_method);
|
||||
int jobject_oop_offset = 0;
|
||||
__ jccb(Assembler::zero, L_no_method);
|
||||
|
||||
const int jobject_oop_offset = 0;
|
||||
__ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
|
||||
__ testptr(rbx_method, rbx_method);
|
||||
__ jccb(Assembler::zero, no_method);
|
||||
__ jccb(Assembler::zero, L_no_method);
|
||||
__ verify_oop(rbx_method);
|
||||
__ push(rdi_pc); // and restore caller PC
|
||||
__ jmp(rbx_method_fie);
|
||||
|
||||
// 32-bit: push remaining arguments as if coming from the compiler.
|
||||
NOT_LP64(__ push(rarg2_required));
|
||||
|
||||
__ push(rdi_pc); // restore caller PC
|
||||
__ jump(ExternalAddress(c2i_entry)); // do C2I transition
|
||||
|
||||
// If we get here, the Java runtime did not do its job of creating the exception.
|
||||
// Do something that is at least causes a valid throw from the interpreter.
|
||||
__ bind(no_method);
|
||||
__ pop(rax_want);
|
||||
__ pop(rcx_fail);
|
||||
__ push(rax_want);
|
||||
__ push(rcx_fail);
|
||||
__ bind(L_no_method);
|
||||
__ push(rarg2_required);
|
||||
__ push(rarg1_actual);
|
||||
__ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
|
||||
}
|
||||
break;
|
||||
|
@ -572,9 +576,11 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
__ bind(no_such_interface);
|
||||
// Throw an exception.
|
||||
// For historical reasons, it will be IncompatibleClassChangeError.
|
||||
__ pushptr(Address(rdx_intf, java_mirror_offset)); // required interface
|
||||
__ push(rcx_recv); // bad receiver
|
||||
__ push((int)Bytecodes::_invokeinterface); // who is complaining?
|
||||
__ mov(rbx_temp, rcx_recv); // rarg2_required might be RCX
|
||||
assert_different_registers(rarg2_required, rbx_temp);
|
||||
__ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset)); // required interface
|
||||
__ mov( rarg1_actual, rbx_temp); // bad receiver
|
||||
__ movl( rarg0_code, (int) Bytecodes::_invokeinterface); // who is complaining?
|
||||
__ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
|
||||
}
|
||||
break;
|
||||
|
@ -669,10 +675,10 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
__ movl(rax_argslot, rcx_amh_vmargslot); // reload argslot field
|
||||
__ movptr(rdx_temp, vmarg);
|
||||
|
||||
__ load_heap_oop(rbx_klass, rcx_amh_argument); // required class
|
||||
__ push(rbx_klass);
|
||||
__ push(rdx_temp); // bad object
|
||||
__ push((int)Bytecodes::_checkcast); // who is complaining?
|
||||
assert_different_registers(rarg2_required, rdx_temp);
|
||||
__ load_heap_oop(rarg2_required, rcx_amh_argument); // required class
|
||||
__ mov( rarg1_actual, rdx_temp); // bad object
|
||||
__ movl( rarg0_code, (int) Bytecodes::_checkcast); // who is complaining?
|
||||
__ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
|
||||
|
||||
__ bind(done);
|
||||
|
@ -1189,16 +1195,18 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
|||
|
||||
__ bind(bad_array_klass);
|
||||
UNPUSH_RSI_RDI;
|
||||
__ pushptr(Address(rdx_array_klass, java_mirror_offset)); // required type
|
||||
__ pushptr(vmarg); // bad array
|
||||
__ push((int)Bytecodes::_aaload); // who is complaining?
|
||||
assert(!vmarg.uses(rarg2_required), "must be different registers");
|
||||
__ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset)); // required type
|
||||
__ movptr(rarg1_actual, vmarg); // bad array
|
||||
__ movl( rarg0_code, (int) Bytecodes::_aaload); // who is complaining?
|
||||
__ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
|
||||
|
||||
__ bind(bad_array_length);
|
||||
UNPUSH_RSI_RDI;
|
||||
__ push(rcx_recv); // AMH requiring a certain length
|
||||
__ pushptr(vmarg); // bad array
|
||||
__ push((int)Bytecodes::_arraylength); // who is complaining?
|
||||
assert(!vmarg.uses(rarg2_required), "must be different registers");
|
||||
__ mov (rarg2_required, rcx_recv); // AMH requiring a certain length
|
||||
__ movptr(rarg1_actual, vmarg); // bad array
|
||||
__ movl( rarg0_code, (int) Bytecodes::_arraylength); // who is complaining?
|
||||
__ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
|
||||
|
||||
#undef UNPUSH_RSI_RDI
|
||||
|
|
|
@ -3090,7 +3090,7 @@ char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
|||
if (addr == NULL) {
|
||||
jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
|
||||
}
|
||||
warning("attempt_reserve_memory_at: couldn't reserve %d bytes at "
|
||||
warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
|
||||
PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
|
||||
"%s", bytes, requested_addr, addr, buf);
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
# include <sys/auxv.h>
|
||||
# include <sys/auxv_SPARC.h>
|
||||
# include <sys/systeminfo.h>
|
||||
# include <kstat.h>
|
||||
|
||||
// We need to keep these here as long as we have to build on Solaris
|
||||
// versions before 10.
|
||||
|
@ -95,12 +96,24 @@ int VM_Version::platform_features(int features) {
|
|||
// but Solaris 8 is used for jdk6 update builds.
|
||||
#ifndef AV_SPARC_ASI_BLK_INIT
|
||||
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
|
||||
#endif
|
||||
#ifndef AV_SPARC_FMAF
|
||||
#define AV_SPARC_FMAF 0x0100 /* Sparc64 Fused Multiply-Add */
|
||||
#endif
|
||||
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_FMAF
|
||||
#define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */
|
||||
#endif
|
||||
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_FMAU
|
||||
#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */
|
||||
#endif
|
||||
if (av & AV_SPARC_FMAU) features |= fmau_instructions_m;
|
||||
|
||||
#ifndef AV_SPARC_VIS3
|
||||
#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */
|
||||
#endif
|
||||
if (av & AV_SPARC_VIS3) features |= vis3_instructions_m;
|
||||
|
||||
} else {
|
||||
// getisax(2) failed, use the old legacy code.
|
||||
#ifndef PRODUCT
|
||||
|
@ -140,5 +153,59 @@ int VM_Version::platform_features(int features) {
|
|||
// Determine the machine type.
|
||||
do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m);
|
||||
|
||||
{
|
||||
// Using kstat to determine the machine type.
|
||||
kstat_ctl_t* kc = kstat_open();
|
||||
kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, NULL);
|
||||
const char* implementation = "UNKNOWN";
|
||||
if (ksp != NULL) {
|
||||
if (kstat_read(kc, ksp, NULL) != -1 && ksp->ks_data != NULL) {
|
||||
kstat_named_t* knm = (kstat_named_t *)ksp->ks_data;
|
||||
for (int i = 0; i < ksp->ks_ndata; i++) {
|
||||
if (strcmp((const char*)&(knm[i].name),"implementation") == 0) {
|
||||
#ifndef KSTAT_DATA_STRING
|
||||
#define KSTAT_DATA_STRING 9
|
||||
#endif
|
||||
if (knm[i].data_type == KSTAT_DATA_CHAR) {
|
||||
// VM is running on Solaris 8 which does not have value.str.
|
||||
implementation = &(knm[i].value.c[0]);
|
||||
} else if (knm[i].data_type == KSTAT_DATA_STRING) {
|
||||
// VM is running on Solaris 10.
|
||||
#ifndef KSTAT_NAMED_STR_PTR
|
||||
// Solaris 8 was used to build VM, define the structure it misses.
|
||||
struct str_t {
|
||||
union {
|
||||
char *ptr; /* NULL-term string */
|
||||
char __pad[8]; /* 64-bit padding */
|
||||
} addr;
|
||||
uint32_t len; /* # bytes for strlen + '\0' */
|
||||
};
|
||||
#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr)
|
||||
#endif
|
||||
implementation = KSTAT_NAMED_STR_PTR(&knm[i]);
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
if (PrintMiscellaneous && Verbose) {
|
||||
tty->print_cr("cpu_info.implementation: %s", implementation);
|
||||
}
|
||||
#endif
|
||||
if (strncmp(implementation, "SPARC64", 7) == 0) {
|
||||
features |= sparc64_family_m;
|
||||
} else if (strncmp(implementation, "UltraSPARC-T", 12) == 0) {
|
||||
features |= T_family_m;
|
||||
if (strncmp(implementation, "UltraSPARC-T1", 13) == 0) {
|
||||
features |= T1_model_m;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
} // for(
|
||||
}
|
||||
}
|
||||
assert(strcmp(implementation, "UNKNOWN") != 0,
|
||||
"unknown cpu info (changed kstat interface?)");
|
||||
kstat_close(kc);
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#define MAXID 20
|
||||
static char initflag = 0; // True after 1st initialization
|
||||
static char shft[MAXID] = {1,2,3,4,5,6,7,1,2,3,4,5,6,7,1,2,3,4,5,6};
|
||||
static short xsum[MAXID + 1];
|
||||
static short xsum[MAXID];
|
||||
|
||||
//------------------------------bucket---------------------------------------
|
||||
class bucket {
|
||||
|
@ -66,7 +66,7 @@ void Dict::init() {
|
|||
// Precompute table of null character hashes
|
||||
if( !initflag ) { // Not initializated yet?
|
||||
xsum[0] = (1<<shft[0])+1; // Initialize
|
||||
for( i = 1; i < MAXID + 1; i++) {
|
||||
for( i = 1; i < MAXID; i++) {
|
||||
xsum[i] = (1<<shft[i])+1+xsum[i-1];
|
||||
}
|
||||
initflag = 1; // Never again
|
||||
|
@ -291,7 +291,7 @@ int hashstr(const void *t) {
|
|||
c = (c<<1)+1; // Characters are always odd!
|
||||
sum += c + (c<<shft[k++]); // Universal hash function
|
||||
}
|
||||
assert( k < (MAXID + 1), "Exceeded maximum name length");
|
||||
assert( k < (MAXID), "Exceeded maximum name length");
|
||||
return (int)((sum+xsum[k]) >> 1); // Hash key, un-modulo'd table size
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signa
|
|||
if (opr->is_address()) {
|
||||
LIR_Address* addr = opr->as_address_ptr();
|
||||
assert(addr->disp() == (int)addr->disp(), "out of range value");
|
||||
out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
|
||||
out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
|
||||
}
|
||||
i += type2size[t];
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signatur
|
|||
args->append(opr);
|
||||
if (opr->is_address()) {
|
||||
LIR_Address* addr = opr->as_address_ptr();
|
||||
out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
|
||||
out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
|
||||
}
|
||||
i += type2size[t];
|
||||
}
|
||||
|
|
|
@ -1568,15 +1568,16 @@ class LIR_Op2: public LIR_Op {
|
|||
assert(code == lir_cmp, "code check");
|
||||
}
|
||||
|
||||
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result)
|
||||
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
|
||||
: LIR_Op(code, result, NULL)
|
||||
, _opr1(opr1)
|
||||
, _opr2(opr2)
|
||||
, _type(T_ILLEGAL)
|
||||
, _type(type)
|
||||
, _condition(condition)
|
||||
, _fpu_stack_size(0)
|
||||
, _tmp(LIR_OprFact::illegalOpr) {
|
||||
assert(code == lir_cmove, "code check");
|
||||
assert(type != T_ILLEGAL, "cmove should have type");
|
||||
}
|
||||
|
||||
LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
|
||||
|
@ -1993,8 +1994,8 @@ class LIR_List: public CompilationResourceObj {
|
|||
void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
|
||||
void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
|
||||
|
||||
void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst) {
|
||||
append(new LIR_Op2(lir_cmove, condition, src1, src2, dst));
|
||||
void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
|
||||
append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
|
||||
}
|
||||
|
||||
void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
|
||||
|
|
|
@ -685,7 +685,7 @@ void LIR_Assembler::emit_op2(LIR_Op2* op) {
|
|||
break;
|
||||
|
||||
case lir_cmove:
|
||||
cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
|
||||
cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
|
||||
break;
|
||||
|
||||
case lir_shl:
|
||||
|
|
|
@ -217,7 +217,7 @@ class LIR_Assembler: public CompilationResourceObj {
|
|||
void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info);
|
||||
void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions
|
||||
void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
|
||||
void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result);
|
||||
void cmove(LIR_Condition code, LIR_Opr left, LIR_Opr right, LIR_Opr result, BasicType type);
|
||||
|
||||
void call( LIR_OpJavaCall* op, relocInfo::relocType rtype);
|
||||
void ic_call( LIR_OpJavaCall* op);
|
||||
|
|
|
@ -856,7 +856,7 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
|
|||
__ cmove(lir_cond(cond),
|
||||
LIR_OprFact::intptrConst(taken_count_offset),
|
||||
LIR_OprFact::intptrConst(not_taken_count_offset),
|
||||
data_offset_reg);
|
||||
data_offset_reg, as_BasicType(if_instr->x()->type()));
|
||||
|
||||
// MDO cells are intptr_t, so the data_reg width is arch-dependent.
|
||||
LIR_Opr data_reg = new_pointer_register();
|
||||
|
@ -2591,7 +2591,7 @@ void LIRGenerator::do_IfOp(IfOp* x) {
|
|||
LIR_Opr reg = rlock_result(x);
|
||||
|
||||
__ cmp(lir_cond(x->cond()), left.result(), right.result());
|
||||
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg);
|
||||
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -90,6 +90,7 @@ LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
|
|||
, _intervals(0) // initialized later with correct length
|
||||
, _new_intervals_from_allocation(new IntervalList())
|
||||
, _sorted_intervals(NULL)
|
||||
, _needs_full_resort(false)
|
||||
, _lir_ops(0) // initialized later with correct length
|
||||
, _block_of_op(0) // initialized later with correct length
|
||||
, _has_info(0)
|
||||
|
@ -1520,6 +1521,14 @@ void LinearScan::create_unhandled_lists(Interval** list1, Interval** list2, bool
|
|||
void LinearScan::sort_intervals_before_allocation() {
|
||||
TIME_LINEAR_SCAN(timer_sort_intervals_before);
|
||||
|
||||
if (_needs_full_resort) {
|
||||
// There is no known reason why this should occur but just in case...
|
||||
assert(false, "should never occur");
|
||||
// Re-sort existing interval list because an Interval::from() has changed
|
||||
_sorted_intervals->sort(interval_cmp);
|
||||
_needs_full_resort = false;
|
||||
}
|
||||
|
||||
IntervalList* unsorted_list = &_intervals;
|
||||
int unsorted_len = unsorted_list->length();
|
||||
int sorted_len = 0;
|
||||
|
@ -1559,11 +1568,18 @@ void LinearScan::sort_intervals_before_allocation() {
|
|||
}
|
||||
}
|
||||
_sorted_intervals = sorted_list;
|
||||
assert(is_sorted(_sorted_intervals), "intervals unsorted");
|
||||
}
|
||||
|
||||
void LinearScan::sort_intervals_after_allocation() {
|
||||
TIME_LINEAR_SCAN(timer_sort_intervals_after);
|
||||
|
||||
if (_needs_full_resort) {
|
||||
// Re-sort existing interval list because an Interval::from() has changed
|
||||
_sorted_intervals->sort(interval_cmp);
|
||||
_needs_full_resort = false;
|
||||
}
|
||||
|
||||
IntervalArray* old_list = _sorted_intervals;
|
||||
IntervalList* new_list = _new_intervals_from_allocation;
|
||||
int old_len = old_list->length();
|
||||
|
@ -1571,6 +1587,7 @@ void LinearScan::sort_intervals_after_allocation() {
|
|||
|
||||
if (new_len == 0) {
|
||||
// no intervals have been added during allocation, so sorted list is already up to date
|
||||
assert(is_sorted(_sorted_intervals), "intervals unsorted");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1593,6 +1610,7 @@ void LinearScan::sort_intervals_after_allocation() {
|
|||
}
|
||||
|
||||
_sorted_intervals = combined_list;
|
||||
assert(is_sorted(_sorted_intervals), "intervals unsorted");
|
||||
}
|
||||
|
||||
|
||||
|
@ -1825,6 +1843,8 @@ void LinearScan::resolve_exception_entry(BlockBegin* block, int reg_num, MoveRes
|
|||
interval = interval->split(from_op_id);
|
||||
interval->assign_reg(reg, regHi);
|
||||
append_interval(interval);
|
||||
} else {
|
||||
_needs_full_resort = true;
|
||||
}
|
||||
assert(interval->from() == from_op_id, "must be true now");
|
||||
|
||||
|
@ -4492,7 +4512,8 @@ void Interval::print(outputStream* out) const {
|
|||
}
|
||||
} else {
|
||||
type_name = type2name(type());
|
||||
if (assigned_reg() != -1) {
|
||||
if (assigned_reg() != -1 &&
|
||||
(LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
|
||||
opr = LinearScan::calc_operand_for_interval(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -148,6 +148,7 @@ class LinearScan : public CompilationResourceObj {
|
|||
IntervalList _intervals; // mapping from register number to interval
|
||||
IntervalList* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
|
||||
IntervalArray* _sorted_intervals; // intervals sorted by Interval::from()
|
||||
bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted
|
||||
|
||||
LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node
|
||||
BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
|
||||
|
|
|
@ -2386,19 +2386,21 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(constantPoolHa
|
|||
valid_cp_range(bootstrap_method_index, cp_size) &&
|
||||
cp->tag_at(bootstrap_method_index).is_method_handle(),
|
||||
"bootstrap_method_index %u has bad constant type in class file %s",
|
||||
bootstrap_method_index,
|
||||
CHECK);
|
||||
operands->short_at_put(operand_fill_index++, bootstrap_method_index);
|
||||
operands->short_at_put(operand_fill_index++, argument_count);
|
||||
|
||||
cfs->guarantee_more(sizeof(u2) * argument_count, CHECK); // argv[argc]
|
||||
for (int j = 0; j < argument_count; j++) {
|
||||
u2 arg_index = cfs->get_u2_fast();
|
||||
u2 argument_index = cfs->get_u2_fast();
|
||||
check_property(
|
||||
valid_cp_range(arg_index, cp_size) &&
|
||||
cp->tag_at(arg_index).is_loadable_constant(),
|
||||
valid_cp_range(argument_index, cp_size) &&
|
||||
cp->tag_at(argument_index).is_loadable_constant(),
|
||||
"argument_index %u has bad constant type in class file %s",
|
||||
argument_index,
|
||||
CHECK);
|
||||
operands->short_at_put(operand_fill_index++, arg_index);
|
||||
operands->short_at_put(operand_fill_index++, argument_index);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -939,7 +939,9 @@ void CodeCache::print_bounds(outputStream* st) {
|
|||
_heap->high(),
|
||||
_heap->high_boundary());
|
||||
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
|
||||
" adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT,
|
||||
" adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT
|
||||
" largest_free_block=" SIZE_FORMAT,
|
||||
CodeCache::nof_blobs(), CodeCache::nof_nmethods(),
|
||||
CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
|
||||
CodeCache::nof_adapters(), CodeCache::unallocated_capacity(),
|
||||
CodeCache::largest_free_block());
|
||||
}
|
||||
|
|
|
@ -158,6 +158,7 @@ class CodeCache : AllStatic {
|
|||
static size_t capacity() { return _heap->capacity(); }
|
||||
static size_t max_capacity() { return _heap->max_capacity(); }
|
||||
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
|
||||
static size_t largest_free_block() { return _heap->largest_free_block(); }
|
||||
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
|
||||
|
||||
static bool needs_cache_clean() { return _needs_cache_clean; }
|
||||
|
|
|
@ -811,9 +811,11 @@ nmethod::nmethod(
|
|||
_stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
|
||||
|
||||
// Exception handler and deopt handler are in the stub section
|
||||
assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
|
||||
assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
|
||||
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
|
||||
_deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
|
||||
if (has_method_handle_invokes()) {
|
||||
if (offsets->value(CodeOffsets::DeoptMH) != -1) {
|
||||
_deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
|
||||
} else {
|
||||
_deoptimize_mh_offset = -1;
|
||||
|
@ -1909,6 +1911,7 @@ void nmethod::copy_scopes_pcs(PcDesc* pcs, int count) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
assert(has_method_handle_invokes() == (_deoptimize_mh_offset != -1), "must have deopt mh handler");
|
||||
|
||||
int size = count * sizeof(PcDesc);
|
||||
assert(scopes_pcs_size() >= size, "oob");
|
||||
|
|
|
@ -33,6 +33,7 @@ void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
|
|||
bool do_initialization = false;
|
||||
{
|
||||
ThreadInVMfromNative tv(thread);
|
||||
ResetNoHandleMark rnhm;
|
||||
MutexLocker only_one(CompileThread_lock, thread);
|
||||
if ( *state == uninitialized) {
|
||||
do_initialization = true;
|
||||
|
@ -53,6 +54,7 @@ void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
|
|||
// To in_vm so we can use the lock
|
||||
|
||||
ThreadInVMfromNative tv(thread);
|
||||
ResetNoHandleMark rnhm;
|
||||
MutexLocker only_one(CompileThread_lock, thread);
|
||||
assert(*state == initializing, "wrong state");
|
||||
*state = initialized;
|
||||
|
|
|
@ -332,7 +332,7 @@ static OracleCommand parse_command_name(const char * line, int* bytes_read) {
|
|||
"command_names size mismatch");
|
||||
|
||||
*bytes_read = 0;
|
||||
char command[32];
|
||||
char command[33];
|
||||
int result = sscanf(line, "%32[a-z]%n", command, bytes_read);
|
||||
for (uint i = 0; i < ARRAY_SIZE(command_names); i++) {
|
||||
if (strcmp(command, command_names[i]) == 0) {
|
||||
|
@ -470,6 +470,12 @@ void CompilerOracle::parse_from_line(char* line) {
|
|||
OracleCommand command = parse_command_name(line, &bytes_read);
|
||||
line += bytes_read;
|
||||
|
||||
if (command == UnknownCommand) {
|
||||
tty->print_cr("CompilerOracle: unrecognized line");
|
||||
tty->print_cr(" \"%s\"", original_line);
|
||||
return;
|
||||
}
|
||||
|
||||
if (command == QuietCommand) {
|
||||
_quiet = true;
|
||||
return;
|
||||
|
@ -498,7 +504,7 @@ void CompilerOracle::parse_from_line(char* line) {
|
|||
line += bytes_read;
|
||||
// there might be a signature following the method.
|
||||
// signatures always begin with ( so match that by hand
|
||||
if (1 == sscanf(line, "%*[ \t](%254[);/" RANGEBASE "]%n", sig + 1, &bytes_read)) {
|
||||
if (1 == sscanf(line, "%*[ \t](%254[[);/" RANGEBASE "]%n", sig + 1, &bytes_read)) {
|
||||
sig[0] = '(';
|
||||
line += bytes_read;
|
||||
signature = oopFactory::new_symbol_handle(sig, CHECK);
|
||||
|
|
|
@ -315,6 +315,15 @@ size_t CodeHeap::allocated_capacity() const {
|
|||
return l;
|
||||
}
|
||||
|
||||
size_t CodeHeap::largest_free_block() const {
|
||||
size_t len = 0;
|
||||
for (FreeBlock* b = _freelist; b != NULL; b = b->link()) {
|
||||
if (b->length() > len)
|
||||
len = b->length();
|
||||
}
|
||||
return size(len);
|
||||
}
|
||||
|
||||
// Free list management
|
||||
|
||||
FreeBlock *CodeHeap::following_block(FreeBlock *b) {
|
||||
|
|
|
@ -161,6 +161,7 @@ class CodeHeap : public CHeapObj {
|
|||
size_t max_capacity() const;
|
||||
size_t allocated_capacity() const;
|
||||
size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); }
|
||||
size_t largest_free_block() const;
|
||||
|
||||
// Debugging
|
||||
void verify();
|
||||
|
|
|
@ -934,7 +934,8 @@ jint Universe::initialize_heap() {
|
|||
// See needs_explicit_null_check.
|
||||
// Only set the heap base for compressed oops because it indicates
|
||||
// compressed oops for pstack code.
|
||||
if (PrintCompressedOopsMode) {
|
||||
bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
|
||||
if (verbose) {
|
||||
tty->cr();
|
||||
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
|
||||
Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
|
||||
|
@ -943,12 +944,12 @@ jint Universe::initialize_heap() {
|
|||
// Can't reserve heap below 32Gb.
|
||||
Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size());
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
if (PrintCompressedOopsMode) {
|
||||
if (verbose) {
|
||||
tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
|
||||
}
|
||||
} else {
|
||||
Universe::set_narrow_oop_base(0);
|
||||
if (PrintCompressedOopsMode) {
|
||||
if (verbose) {
|
||||
tty->print(", zero based Compressed Oops");
|
||||
}
|
||||
#ifdef _WIN64
|
||||
|
@ -963,12 +964,12 @@ jint Universe::initialize_heap() {
|
|||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
} else {
|
||||
Universe::set_narrow_oop_shift(0);
|
||||
if (PrintCompressedOopsMode) {
|
||||
if (verbose) {
|
||||
tty->print(", 32-bits Oops");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (PrintCompressedOopsMode) {
|
||||
if (verbose) {
|
||||
tty->cr();
|
||||
tty->cr();
|
||||
}
|
||||
|
|
|
@ -444,22 +444,32 @@ void Compile::print_compile_messages() {
|
|||
}
|
||||
|
||||
|
||||
//-----------------------init_scratch_buffer_blob------------------------------
|
||||
// Construct a temporary BufferBlob and cache it for this compile.
|
||||
void Compile::init_scratch_buffer_blob(int const_size) {
|
||||
if (scratch_buffer_blob() != NULL) return;
|
||||
// If there is already a scratch buffer blob allocated and the
|
||||
// constant section is big enough, use it. Otherwise free the
|
||||
// current and allocate a new one.
|
||||
BufferBlob* blob = scratch_buffer_blob();
|
||||
if ((blob != NULL) && (const_size <= _scratch_const_size)) {
|
||||
// Use the current blob.
|
||||
} else {
|
||||
if (blob != NULL) {
|
||||
BufferBlob::free(blob);
|
||||
}
|
||||
|
||||
// Construct a temporary CodeBuffer to have it construct a BufferBlob
|
||||
// Cache this BufferBlob for this compile.
|
||||
ResourceMark rm;
|
||||
_scratch_const_size = const_size;
|
||||
int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
|
||||
BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size);
|
||||
// Record the buffer blob for next time.
|
||||
set_scratch_buffer_blob(blob);
|
||||
// Have we run out of code space?
|
||||
if (scratch_buffer_blob() == NULL) {
|
||||
// Let CompilerBroker disable further compilations.
|
||||
record_failure("Not enough space for scratch buffer in CodeCache");
|
||||
return;
|
||||
ResourceMark rm;
|
||||
_scratch_const_size = const_size;
|
||||
int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
|
||||
blob = BufferBlob::create("Compile::scratch_buffer", size);
|
||||
// Record the buffer blob for next time.
|
||||
set_scratch_buffer_blob(blob);
|
||||
// Have we run out of code space?
|
||||
if (scratch_buffer_blob() == NULL) {
|
||||
// Let CompilerBroker disable further compilations.
|
||||
record_failure("Not enough space for scratch buffer in CodeCache");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the relocation buffers
|
||||
|
@ -468,13 +478,6 @@ void Compile::init_scratch_buffer_blob(int const_size) {
|
|||
}
|
||||
|
||||
|
||||
void Compile::clear_scratch_buffer_blob() {
|
||||
assert(scratch_buffer_blob(), "no BufferBlob set");
|
||||
set_scratch_buffer_blob(NULL);
|
||||
set_scratch_locs_memory(NULL);
|
||||
}
|
||||
|
||||
|
||||
//-----------------------scratch_emit_size-------------------------------------
|
||||
// Helper function that computes size by emitting code
|
||||
uint Compile::scratch_emit_size(const Node* n) {
|
||||
|
|
|
@ -1609,10 +1609,11 @@ bool ConnectionGraph::compute_escape() {
|
|||
//
|
||||
// Normally only 1-3 passes needed to build
|
||||
// Connection Graph depending on graph complexity.
|
||||
// Set limit to 10 to catch situation when something
|
||||
// Observed 8 passes in jvm2008 compiler.compiler.
|
||||
// Set limit to 20 to catch situation when something
|
||||
// did go wrong and recompile the method without EA.
|
||||
|
||||
#define CG_BUILD_ITER_LIMIT 10
|
||||
#define CG_BUILD_ITER_LIMIT 20
|
||||
|
||||
uint length = worklist.length();
|
||||
int iterations = 0;
|
||||
|
|
|
@ -1746,9 +1746,6 @@ void Compile::ScheduleAndBundle() {
|
|||
// Walk backwards over each basic block, computing the needed alignment
|
||||
// Walk over all the basic blocks
|
||||
scheduling.DoScheduling();
|
||||
|
||||
// Clear the BufferBlob used for scheduling.
|
||||
clear_scratch_buffer_blob();
|
||||
}
|
||||
|
||||
//------------------------------ComputeLocalLatenciesForward-------------------
|
||||
|
|
|
@ -59,7 +59,8 @@ class StringConcat : public ResourceObj {
|
|||
enum {
|
||||
StringMode,
|
||||
IntMode,
|
||||
CharMode
|
||||
CharMode,
|
||||
StringNullCheckMode
|
||||
};
|
||||
|
||||
StringConcat(PhaseStringOpts* stringopts, CallStaticJavaNode* end):
|
||||
|
@ -114,6 +115,9 @@ class StringConcat : public ResourceObj {
|
|||
void push_string(Node* value) {
|
||||
push(value, StringMode);
|
||||
}
|
||||
void push_string_null_check(Node* value) {
|
||||
push(value, StringNullCheckMode);
|
||||
}
|
||||
void push_int(Node* value) {
|
||||
push(value, IntMode);
|
||||
}
|
||||
|
@ -416,7 +420,19 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
|
|||
if (sig == ciSymbol::string_void_signature()) {
|
||||
// StringBuilder(String) so pick this up as the first argument
|
||||
assert(use->in(TypeFunc::Parms + 1) != NULL, "what?");
|
||||
sc->push_string(use->in(TypeFunc::Parms + 1));
|
||||
const Type* type = _gvn->type(use->in(TypeFunc::Parms + 1));
|
||||
if (type == TypePtr::NULL_PTR) {
|
||||
// StringBuilder(null) throws exception.
|
||||
#ifndef PRODUCT
|
||||
if (PrintOptimizeStringConcat) {
|
||||
tty->print("giving up because StringBuilder(null) throws exception");
|
||||
alloc->jvms()->dump_spec(tty); tty->cr();
|
||||
}
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
// StringBuilder(str) argument needs null check.
|
||||
sc->push_string_null_check(use->in(TypeFunc::Parms + 1));
|
||||
}
|
||||
// The int variant takes an initial size for the backing
|
||||
// array so just treat it like the void version.
|
||||
|
@ -436,7 +452,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
|
|||
#ifndef PRODUCT
|
||||
if (PrintOptimizeStringConcat) {
|
||||
tty->print("giving up because couldn't find constructor ");
|
||||
alloc->jvms()->dump_spec(tty);
|
||||
alloc->jvms()->dump_spec(tty); tty->cr();
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
|
@ -1269,6 +1285,25 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
|||
string_sizes->init_req(argi, string_size);
|
||||
break;
|
||||
}
|
||||
case StringConcat::StringNullCheckMode: {
|
||||
const Type* type = kit.gvn().type(arg);
|
||||
assert(type != TypePtr::NULL_PTR, "missing check");
|
||||
if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
|
||||
// Null check with uncommont trap since
|
||||
// StringBuilder(null) throws exception.
|
||||
// Use special uncommon trap instead of
|
||||
// calling normal do_null_check().
|
||||
Node* p = __ Bool(__ CmpP(arg, kit.null()), BoolTest::ne);
|
||||
IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_MIN, COUNT_UNKNOWN);
|
||||
overflow->add_req(__ IfFalse(iff));
|
||||
Node* notnull = __ IfTrue(iff);
|
||||
kit.set_control(notnull); // set control for the cast_not_null
|
||||
arg = kit.cast_not_null(arg, false);
|
||||
sc->set_argument(argi, arg);
|
||||
}
|
||||
assert(kit.gvn().type(arg)->higher_equal(TypeInstPtr::NOTNULL), "sanity");
|
||||
// Fallthrough to add string length.
|
||||
}
|
||||
case StringConcat::StringMode: {
|
||||
const Type* type = kit.gvn().type(arg);
|
||||
if (type == TypePtr::NULL_PTR) {
|
||||
|
@ -1328,6 +1363,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
|||
// Hook
|
||||
PreserveJVMState pjvms(&kit);
|
||||
kit.set_control(overflow);
|
||||
C->record_for_igvn(overflow);
|
||||
kit.uncommon_trap(Deoptimization::Reason_intrinsic,
|
||||
Deoptimization::Action_make_not_entrant);
|
||||
}
|
||||
|
@ -1363,6 +1399,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
|||
start = end;
|
||||
break;
|
||||
}
|
||||
case StringConcat::StringNullCheckMode:
|
||||
case StringConcat::StringMode: {
|
||||
start = copy_string(kit, arg, char_array, start);
|
||||
break;
|
||||
|
|
|
@ -111,7 +111,7 @@ bool MethodHandles::spot_check_entry_names() {
|
|||
//------------------------------------------------------------------------------
|
||||
// MethodHandles::generate_adapters
|
||||
//
|
||||
void MethodHandles::generate_adapters() {
|
||||
void MethodHandles::generate_adapters(TRAPS) {
|
||||
if (!EnableMethodHandles || SystemDictionary::MethodHandle_klass() == NULL) return;
|
||||
|
||||
assert(_adapter_code == NULL, "generate only once");
|
||||
|
@ -123,20 +123,20 @@ void MethodHandles::generate_adapters() {
|
|||
vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters");
|
||||
CodeBuffer code(_adapter_code);
|
||||
MethodHandlesAdapterGenerator g(&code);
|
||||
g.generate();
|
||||
g.generate(CHECK);
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// MethodHandlesAdapterGenerator::generate
|
||||
//
|
||||
void MethodHandlesAdapterGenerator::generate() {
|
||||
void MethodHandlesAdapterGenerator::generate(TRAPS) {
|
||||
// Generate generic method handle adapters.
|
||||
for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST;
|
||||
ek < MethodHandles::_EK_LIMIT;
|
||||
ek = MethodHandles::EntryKind(1 + (int)ek)) {
|
||||
StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek));
|
||||
MethodHandles::generate_method_handle_stub(_masm, ek);
|
||||
MethodHandles::generate_method_handle_stub(_masm, ek, CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2645,5 +2645,10 @@ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class))
|
|||
MethodHandles::set_enabled(true);
|
||||
}
|
||||
}
|
||||
|
||||
// Generate method handles adapters if enabled.
|
||||
if (MethodHandles::enabled()) {
|
||||
MethodHandles::generate_adapters(CHECK);
|
||||
}
|
||||
}
|
||||
JVM_END
|
||||
|
|
|
@ -294,11 +294,11 @@ class MethodHandles: AllStatic {
|
|||
enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
|
||||
|
||||
// Generate MethodHandles adapters.
|
||||
static void generate_adapters();
|
||||
static void generate_adapters(TRAPS);
|
||||
|
||||
// Called from InterpreterGenerator and MethodHandlesAdapterGenerator.
|
||||
static address generate_method_handle_interpreter_entry(MacroAssembler* _masm);
|
||||
static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek);
|
||||
static void generate_method_handle_stub(MacroAssembler* _masm, EntryKind ek, TRAPS);
|
||||
|
||||
// argument list parsing
|
||||
static int argument_slot(oop method_type, int arg);
|
||||
|
@ -530,7 +530,7 @@ class MethodHandlesAdapterGenerator : public StubCodeGenerator {
|
|||
public:
|
||||
MethodHandlesAdapterGenerator(CodeBuffer* code) : StubCodeGenerator(code) {}
|
||||
|
||||
void generate();
|
||||
void generate(TRAPS);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_PRIMS_METHODHANDLES_HPP
|
||||
|
|
|
@ -125,9 +125,6 @@ jint init_globals() {
|
|||
javaClasses_init(); // must happen after vtable initialization
|
||||
stubRoutines_init2(); // note: StubRoutines need 2-phase init
|
||||
|
||||
// Generate MethodHandles adapters.
|
||||
MethodHandles::generate_adapters();
|
||||
|
||||
// Although we'd like to, we can't easily do a heap verify
|
||||
// here because the main thread isn't yet a JavaThread, so
|
||||
// its TLAB may not be made parseable from the usual interfaces.
|
||||
|
|
49
hotspot/test/compiler/6579789/Test6579789.java
Normal file
49
hotspot/test/compiler/6579789/Test6579789.java
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6579789
|
||||
* @summary Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
|
||||
* @run main/othervm -Xcomp -XX:UseSSE=0 -XX:CompileOnly=Test6579789.bug Test6579789
|
||||
*/
|
||||
|
||||
public class Test6579789 {
|
||||
public static void main(String[] args) {
|
||||
bug(4);
|
||||
}
|
||||
public static void bug(int n) {
|
||||
float f = 1;
|
||||
int i = 1;
|
||||
try {
|
||||
int x = 1 / n; // instruction that can trap
|
||||
f = 2;
|
||||
i = 2;
|
||||
int y = 2 / n; // instruction that can trap
|
||||
} catch (Exception ex) {
|
||||
f++;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
100
hotspot/test/compiler/7009231/Test7009231.java
Normal file
100
hotspot/test/compiler/7009231/Test7009231.java
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 7009231
|
||||
* @summary C1: Incorrect CAS code for longs on SPARC 32bit
|
||||
*
|
||||
* @run main/othervm -Xbatch Test7009231
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
|
||||
public class Test7009231 {
|
||||
public static void main(String[] args) throws InterruptedException {
|
||||
doTest(8);
|
||||
}
|
||||
|
||||
private static void doTest(int nThreads) throws InterruptedException {
|
||||
Thread[] aThreads = new Thread[nThreads];
|
||||
final AtomicLong atl = new AtomicLong();
|
||||
|
||||
for (int i = 0; i < nThreads; i++) {
|
||||
aThreads[i] = new RunnerThread(atl, 1L << (8 * i));
|
||||
}
|
||||
|
||||
for (int i = 0; i < nThreads; i++) {
|
||||
aThreads[i].start();
|
||||
}
|
||||
|
||||
for (int i = 0; i < nThreads; i++) {
|
||||
aThreads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
public static class RunnerThread extends Thread {
|
||||
public RunnerThread(AtomicLong atomic, long lMask) {
|
||||
m_lMask = lMask;
|
||||
m_atomic = atomic;
|
||||
}
|
||||
|
||||
public void run() {
|
||||
AtomicLong atomic = m_atomic;
|
||||
long lMask = m_lMask;
|
||||
for (int i = 0; i < 100000; i++) {
|
||||
setBit(atomic, lMask);
|
||||
clearBit(atomic, lMask);
|
||||
}
|
||||
}
|
||||
|
||||
protected void setBit(AtomicLong atomic, long lMask) {
|
||||
long lWord;
|
||||
do {
|
||||
lWord = atomic.get();
|
||||
} while (!atomic.compareAndSet(lWord, lWord | lMask));
|
||||
|
||||
if ((atomic.get() & lMask) == 0L) {
|
||||
throw new InternalError();
|
||||
}
|
||||
}
|
||||
|
||||
protected void clearBit(AtomicLong atomic, long lMask) {
|
||||
long lWord;
|
||||
do {
|
||||
lWord = atomic.get();
|
||||
} while (!atomic.compareAndSet(lWord, lWord & ~lMask));
|
||||
|
||||
if ((atomic.get() & lMask) != 0L) {
|
||||
throw new InternalError();
|
||||
}
|
||||
}
|
||||
|
||||
private long m_lMask;
|
||||
private AtomicLong m_atomic;
|
||||
}
|
||||
}
|
52
hotspot/test/compiler/7009359/Test7009359.java
Normal file
52
hotspot/test/compiler/7009359/Test7009359.java
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 7009359
|
||||
* @summary HS with -XX:+AggressiveOpts optimize new StringBuffer(null) so it does not throw NPE as expected
|
||||
*
|
||||
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat -XX:CompileCommand=exclude,Test7009359,main Test7009359
|
||||
*
|
||||
*/
|
||||
|
||||
public class Test7009359 {
|
||||
public static void main (String[] args) {
|
||||
for(int i = 0; i < 1000000; i++) {
|
||||
if(!stringmakerBUG(null).equals("NPE")) {
|
||||
System.out.println("StringBuffer(null) does not throw NPE");
|
||||
System.exit(97);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static String stringmakerBUG(String str) {
|
||||
try {
|
||||
return new StringBuffer(str).toString();
|
||||
} catch (NullPointerException e) {
|
||||
return "NPE";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue