8132318: -XX:TraceJumps is broken on Sparc

Reviewed-by: kvn, thartmann
This commit is contained in:
Dmitrij Pochepko 2016-06-27 17:23:15 +03:00
parent 4ab4c66c41
commit 490625cd8b
13 changed files with 12 additions and 139 deletions

View file

@ -243,7 +243,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
} }
int VtableStub::pd_code_size_limit(bool is_vtable_stub) { int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) { if (DebugVtables || CountCompiledCalls || VerifyOops) {
return 1000; return 1000;
} else { } else {
int decode_klass_size = MacroAssembler::instr_size_for_decode_klass_not_null(); int decode_klass_size = MacroAssembler::instr_size_for_decode_klass_not_null();

View file

@ -77,8 +77,7 @@ int CompiledStaticCall::to_interp_stub_size() {
// This doesn't need to be accurate but it must be larger or equal to // This doesn't need to be accurate but it must be larger or equal to
// the real size of the stub. // the real size of the stub.
return (NativeMovConstReg::instruction_size + // sethi/setlo; return (NativeMovConstReg::instruction_size + // sethi/setlo;
NativeJump::instruction_size + // sethi; jmp; nop NativeJump::instruction_size); // sethi; jmp; nop
(TraceJumps ? 20 * BytesPerInstWord : 0) );
} }
// Relocation entries for call stub, compiled java to interpreter. // Relocation entries for call stub, compiled java to interpreter.

View file

@ -33,12 +33,10 @@
int InlineCacheBuffer::ic_stub_code_size() { int InlineCacheBuffer::ic_stub_code_size() {
#ifdef _LP64 #ifdef _LP64
if (TraceJumps) return 600 * wordSize;
return (NativeMovConstReg::instruction_size + // sethi;add return (NativeMovConstReg::instruction_size + // sethi;add
NativeJump::instruction_size + // sethi; jmp; delay slot NativeJump::instruction_size + // sethi; jmp; delay slot
(1*BytesPerInstWord) + 1); // flush + 1 extra byte (1*BytesPerInstWord) + 1); // flush + 1 extra byte
#else #else
if (TraceJumps) return 300 * wordSize;
return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
#endif #endif
} }

View file

@ -184,72 +184,10 @@ void MacroAssembler::null_check(Register reg, int offset) {
void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) { void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
assert_not_delayed(); assert_not_delayed();
// This can only be traceable if r1 & r2 are visible after a window save
if (TraceJumps) {
#ifndef PRODUCT
save_frame(0);
verify_thread();
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
add(O2, O1, O1);
add(r1->after_save(), r2->after_save(), O2);
set((intptr_t)file, O3);
set(line, O4);
Label L;
// get nearby pc, store jmp target
call(L, relocInfo::none); // No relocation for call to pc+0x8
delayed()->st(O2, O1, 0);
bind(L);
// store nearby pc
st(O7, O1, sizeof(intptr_t));
// store file
st(O3, O1, 2*sizeof(intptr_t));
// store line
st(O4, O1, 3*sizeof(intptr_t));
add(O0, 1, O0);
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
restore();
#endif /* PRODUCT */
}
jmpl(r1, r2, G0); jmpl(r1, r2, G0);
} }
void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) { void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
assert_not_delayed(); assert_not_delayed();
// This can only be traceable if r1 is visible after a window save
if (TraceJumps) {
#ifndef PRODUCT
save_frame(0);
verify_thread();
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
add(O2, O1, O1);
add(r1->after_save(), offset, O2);
set((intptr_t)file, O3);
set(line, O4);
Label L;
// get nearby pc, store jmp target
call(L, relocInfo::none); // No relocation for call to pc+0x8
delayed()->st(O2, O1, 0);
bind(L);
// store nearby pc
st(O7, O1, sizeof(intptr_t));
// store file
st(O3, O1, 2*sizeof(intptr_t));
// store line
st(O4, O1, 3*sizeof(intptr_t));
add(O0, 1, O0);
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
restore();
#endif /* PRODUCT */
}
jmp(r1, offset); jmp(r1, offset);
} }
@ -260,44 +198,7 @@ void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Registe
// variable length instruction streams. // variable length instruction streams.
patchable_sethi(addrlit, temp); patchable_sethi(addrlit, temp);
Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement. Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
if (TraceJumps) { jmpl(a.base(), a.disp(), d);
#ifndef PRODUCT
// Must do the add here so relocation can find the remainder of the
// value to be relocated.
add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
save_frame(0);
verify_thread();
ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
add(O2, O1, O1);
set((intptr_t)file, O3);
set(line, O4);
Label L;
// get nearby pc, store jmp target
call(L, relocInfo::none); // No relocation for call to pc+0x8
delayed()->st(a.base()->after_save(), O1, 0);
bind(L);
// store nearby pc
st(O7, O1, sizeof(intptr_t));
// store file
st(O3, O1, 2*sizeof(intptr_t));
// store line
st(O4, O1, 3*sizeof(intptr_t));
add(O0, 1, O0);
and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
restore();
jmpl(a.base(), G0, d);
#else
jmpl(a.base(), a.disp(), d);
#endif /* PRODUCT */
} else {
jmpl(a.base(), a.disp(), d);
}
} }
void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) { void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {

View file

@ -703,8 +703,8 @@ class MacroAssembler : public Assembler {
inline void tst( Register s ); inline void tst( Register s );
inline void ret( bool trace = TraceJumps ); inline void ret( bool trace = false );
inline void retl( bool trace = TraceJumps ); inline void retl( bool trace = false );
// Required platform-specific helpers for Label::patch_instructions. // Required platform-specific helpers for Label::patch_instructions.
// They _shadow_ the declarations in AbstractAssembler, which are undefined. // They _shadow_ the declarations in AbstractAssembler, which are undefined.

View file

@ -760,8 +760,7 @@ void NativeJump::verify() {
Register rd = inv_rd(i0); Register rd = inv_rd(i0);
#ifndef _LP64 #ifndef _LP64
if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
(is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) || (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
(TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
rd == inv_rs1(i1))) { rd == inv_rs1(i1))) {
fatal("not a jump_to instruction"); fatal("not a jump_to instruction");

View file

@ -3368,9 +3368,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// setup code generation tools // setup code generation tools
// Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
// even larger with TraceJumps CodeBuffer buffer("handler_blob", 1600, 512);
int pad = TraceJumps ? 512 : 0;
CodeBuffer buffer("handler_blob", 1600 + pad, 512);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_words; int frame_size_words;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();
@ -3462,9 +3460,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
// setup code generation tools // setup code generation tools
// Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
// Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
// even larger with TraceJumps CodeBuffer buffer(name, 1600, 512);
int pad = TraceJumps ? 512 : 0;
CodeBuffer buffer(name, 1600 + pad, 512);
MacroAssembler* masm = new MacroAssembler(&buffer); MacroAssembler* masm = new MacroAssembler(&buffer);
int frame_size_words; int frame_size_words;
OopMapSet *oop_maps = new OopMapSet(); OopMapSet *oop_maps = new OopMapSet();

View file

@ -501,16 +501,10 @@ class HandlerImpl {
static int emit_deopt_handler(CodeBuffer& cbuf); static int emit_deopt_handler(CodeBuffer& cbuf);
static uint size_exception_handler() { static uint size_exception_handler() {
if (TraceJumps) {
return (400); // just a guess
}
return ( NativeJump::instruction_size ); // sethi;jmp;nop return ( NativeJump::instruction_size ); // sethi;jmp;nop
} }
static uint size_deopt_handler() { static uint size_deopt_handler() {
if (TraceJumps) {
return (400); // just a guess
}
return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
} }
}; };
@ -2661,8 +2655,7 @@ encode %{
// Emit stub for static call. // Emit stub for static call.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
// Stub does not fit into scratch buffer if TraceJumps is enabled if (stub == NULL) {
if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) {
ciEnv::current()->record_failure("CodeCache is full"); ciEnv::current()->record_failure("CodeCache is full");
return; return;
} }

View file

@ -1560,13 +1560,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ bind(ok); __ bind(ok);
} }
#endif #endif
if (TraceJumps) { __ jmp(Lscratch, 0);
// Move target to register that is recordable
__ mov(Lscratch, G3_scratch);
__ JMP(G3_scratch, 0);
} else {
__ jmp(Lscratch, 0);
}
__ delayed()->nop(); __ delayed()->nop();

View file

@ -221,7 +221,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
int VtableStub::pd_code_size_limit(bool is_vtable_stub) { int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000; if (DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
else { else {
const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets) const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets)
if (is_vtable_stub) { if (is_vtable_stub) {

View file

@ -545,10 +545,6 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// factor me: setPC // factor me: setPC
os::Solaris::ucontext_set_pc(uc, stub); os::Solaris::ucontext_set_pc(uc, stub);
#ifndef PRODUCT
if (TraceJumps) thread->record_jump(stub, NULL, __FILE__, __LINE__);
#endif /* PRODUCT */
return true; return true;
} }

View file

@ -952,7 +952,7 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
// Set the initially allocated size // Set the initially allocated size
int code_req = initial_code_capacity; int code_req = initial_code_capacity;
int locs_req = initial_locs_capacity; int locs_req = initial_locs_capacity;
int stub_req = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity; int stub_req = initial_stub_capacity;
int const_req = initial_const_capacity; int const_req = initial_const_capacity;
int pad_req = NativeCall::instruction_size; int pad_req = NativeCall::instruction_size;

View file

@ -802,9 +802,6 @@ public:
develop(bool, TracePcPatching, false, \ develop(bool, TracePcPatching, false, \
"Trace usage of frame::patch_pc") \ "Trace usage of frame::patch_pc") \
\ \
develop(bool, TraceJumps, false, \
"Trace assembly jumps in thread ring buffer") \
\
develop(bool, TraceRelocator, false, \ develop(bool, TraceRelocator, false, \
"Trace the bytecode relocator") \ "Trace the bytecode relocator") \
\ \