8259937: guarantee(loc != NULL) failed: missing saved register with native invoker

Reviewed-by: kvn, jvernee, vlivanov
This commit is contained in:
Roland Westrelin 2021-03-01 15:11:25 +00:00
parent c569f1d64b
commit 6baecf39d5
28 changed files with 384 additions and 182 deletions

View file

@ -355,10 +355,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
if (jfa->saved_fp_address()) {
update_map_with_saved_link(map, jfa->saved_fp_address());
}
return fr;
}

View file

@ -31,9 +31,6 @@ private:
// FP value associated with _last_Java_sp:
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
// (Optional) location of saved FP register, which GCs want to inspect
intptr_t** volatile _saved_fp_address;
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
@ -47,7 +44,6 @@ public:
OrderAccess::release();
_last_Java_fp = NULL;
_last_Java_pc = NULL;
_saved_fp_address = NULL;
}
void copy(JavaFrameAnchor* src) {
@ -66,8 +62,6 @@ public:
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true
_last_Java_sp = src->_last_Java_sp;
_saved_fp_address = src->_saved_fp_address;
}
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
@ -78,12 +72,9 @@ public:
address last_Java_pc(void) { return _last_Java_pc; }
intptr_t** saved_fp_address(void) const { return _saved_fp_address; }
private:
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
static ByteSize saved_fp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_fp_address); }
public:

View file

@ -320,8 +320,6 @@ void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
// Always clear the pc because it could have been set by make_walkable()
str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
str(zr, Address(rthread, JavaThread::saved_fp_address_offset()));
}
// Calls to C land

View file

@ -3072,7 +3072,6 @@ void OptoRuntime::generate_exception_blob() {
// Set exception blob
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
}
#endif // COMPILER2
// ---------------------------------------------------------------
@ -3082,6 +3081,10 @@ class NativeInvokerGenerator : public StubCodeGenerator {
const GrowableArray<VMReg>& _input_registers;
const GrowableArray<VMReg>& _output_registers;
int _frame_complete;
int _framesize;
OopMapSet* _oop_maps;
public:
NativeInvokerGenerator(CodeBuffer* buffer,
address call_target,
@ -3092,9 +3095,90 @@ public:
_call_target(call_target),
_shadow_space_bytes(shadow_space_bytes),
_input_registers(input_registers),
_output_registers(output_registers) {}
_output_registers(output_registers),
_frame_complete(0),
_framesize(0),
_oop_maps(NULL) {
assert(_output_registers.length() <= 1
|| (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
}
void generate();
int spill_size_in_bytes() const {
if (_output_registers.length() == 0) {
return 0;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
if (reg->is_Register()) {
return 8;
} else if (reg->is_FloatRegister()) {
bool use_sve = Matcher::supports_scalable_vector();
if (use_sve) {
return Matcher::scalable_vector_reg_size(T_BYTE);
}
return 16;
} else {
ShouldNotReachHere();
}
return 0;
}
void spill_output_registers() {
if (_output_registers.length() == 0) {
return;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ spill(reg->as_Register(), true, 0);
} else if (reg->is_FloatRegister()) {
bool use_sve = Matcher::supports_scalable_vector();
if (use_sve) {
__ spill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
} else {
__ spill(reg->as_FloatRegister(), __ Q, 0);
}
} else {
ShouldNotReachHere();
}
}
void fill_output_registers() {
if (_output_registers.length() == 0) {
return;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ unspill(reg->as_Register(), true, 0);
} else if (reg->is_FloatRegister()) {
bool use_sve = Matcher::supports_scalable_vector();
if (use_sve) {
__ unspill_sve_vector(reg->as_FloatRegister(), 0, Matcher::scalable_vector_reg_size(T_BYTE));
} else {
__ unspill(reg->as_FloatRegister(), __ Q, 0);
}
} else {
ShouldNotReachHere();
}
}
int frame_complete() const {
return _frame_complete;
}
int framesize() const {
return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
}
OopMapSet* oop_maps() const {
return _oop_maps;
}
private:
#ifdef ASSERT
bool target_uses_register(VMReg reg) {
@ -3105,21 +3189,23 @@ private:
static const int native_invoker_code_size = 1024;
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
BufferBlob* _invoke_native_blob =
BufferBlob::create("nep_invoker_blob", native_invoker_code_size);
if (_invoke_native_blob == NULL)
return NULL; // allocation failure
CodeBuffer code(_invoke_native_blob);
int locs_size = 64;
CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
g.generate();
code.log_section_sizes("nep_invoker_blob");
return _invoke_native_blob;
RuntimeStub* stub =
RuntimeStub::new_runtime_stub("nep_invoker_blob",
&code,
g.frame_complete(),
g.framesize(),
g.oop_maps(), false);
return stub;
}
void NativeInvokerGenerator::generate() {
@ -3128,26 +3214,40 @@ void NativeInvokerGenerator::generate() {
|| target_uses_register(rthread->as_VMReg())),
"Register conflict");
enum layout {
rbp_off,
rbp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64");
_framesize = align_up(framesize + (spill_size_in_bytes() >> LogBytesPerInt), 4);
assert(is_even(_framesize/2), "sp not 16-byte aligned");
_oop_maps = new OopMapSet();
MacroAssembler* masm = _masm;
__ set_last_Java_frame(sp, noreg, lr, rscratch1);
address start = __ pc();
__ enter();
// Store a pointer to the previous R29 (RFP) saved on the stack as it
// may contain an oop if PreserveFramePointer is off. This value is
// retrieved later by frame::sender_for_entry_frame() when the stack
// is walked.
__ mov(rscratch1, sp);
__ str(rscratch1, Address(rthread, JavaThread::saved_fp_address_offset()));
// lr and fp are already in place
__ sub(sp, rfp, ((unsigned)_framesize-4) << LogBytesPerInt); // prolog
_frame_complete = __ pc() - start;
address the_pc = __ pc();
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
OopMap* map = new OopMap(_framesize, 0);
_oop_maps->add_gc_map(the_pc - start, map);
// State transition
__ mov(rscratch1, _thread_in_native);
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);
assert(_shadow_space_bytes == 0, "not expecting shadow space on AArch64");
rt_call(masm, _call_target);
__ mov(rscratch1, _thread_in_native_trans);
@ -3193,27 +3293,14 @@ void NativeInvokerGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);
// Need to save the native result registers around any runtime calls.
RegSet spills;
FloatRegSet fp_spills;
for (int i = 0; i < _output_registers.length(); i++) {
VMReg output = _output_registers.at(i);
if (output->is_Register()) {
spills += RegSet::of(output->as_Register());
} else if (output->is_FloatRegister()) {
fp_spills += FloatRegSet::of(output->as_FloatRegister());
}
}
__ push(spills, sp);
__ push_fp(fp_spills, sp);
spill_output_registers();
__ mov(c_rarg0, rthread);
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
__ lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ blr(rscratch1);
__ pop_fp(fp_spills, sp);
__ pop(spills, sp);
fill_output_registers();
__ b(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
@ -3223,13 +3310,11 @@ void NativeInvokerGenerator::generate() {
__ block_comment("{ L_reguard");
__ bind(L_reguard);
__ push(spills, sp);
__ push_fp(fp_spills, sp);
spill_output_registers();
rt_call(masm, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
__ pop_fp(fp_spills, sp);
__ pop(spills, sp);
fill_output_registers();
__ b(L_after_reguard);
@ -3239,3 +3324,4 @@ void NativeInvokerGenerator::generate() {
__ flush();
}
#endif // COMPILER2

View file

@ -1898,10 +1898,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
#ifdef COMPILER2
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
Unimplemented();
return nullptr;
}
#endif

View file

@ -3442,10 +3442,12 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
reverse_words(m, (unsigned long *)m_ints, longwords);
}
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
#ifdef COMPILER2
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
Unimplemented();
return nullptr;
}
#endif

View file

@ -3468,10 +3468,12 @@ int SpinPause() {
return 0;
}
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
#ifdef COMPILER2
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
Unimplemented();
return nullptr;
}
#endif

View file

@ -346,10 +346,6 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
if (jfa->saved_rbp_address()) {
update_map_with_saved_link(map, jfa->saved_rbp_address());
}
return fr;
}

View file

@ -30,9 +30,6 @@ private:
// FP value associated with _last_Java_sp:
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
// (Optional) location of saved RBP register, which GCs want to inspect
intptr_t** volatile _saved_rbp_address;
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
@ -46,7 +43,6 @@ public:
// fence?
_last_Java_fp = NULL;
_last_Java_pc = NULL;
_saved_rbp_address = NULL;
}
void copy(JavaFrameAnchor* src) {
@ -64,8 +60,6 @@ public:
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true
_last_Java_sp = src->_last_Java_sp;
_saved_rbp_address = src->_saved_rbp_address;
}
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
@ -76,12 +70,9 @@ public:
address last_Java_pc(void) { return _last_Java_pc; }
intptr_t** saved_rbp_address(void) const { return _saved_rbp_address; }
private:
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
static ByteSize saved_rbp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_rbp_address); }
public:

View file

@ -2732,7 +2732,6 @@ void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp)
}
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
movptr(Address(java_thread, JavaThread::saved_rbp_address_offset()), NULL_WORD);
vzeroupper();
}

View file

@ -2980,10 +2980,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
#ifdef COMPILER2
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
ShouldNotCallThis();
return nullptr;
}
#endif

View file

@ -3161,7 +3161,6 @@ void SharedRuntime::generate_uncommon_trap_blob() {
}
#endif // COMPILER2
//------------------------------generate_handler_blob------
//
// Generate a special Compile2Runtime blob that saves all registers,
@ -3410,6 +3409,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
}
#ifdef COMPILER2
static const int native_invoker_code_size = MethodHandles::adapter_code_size;
class NativeInvokerGenerator : public StubCodeGenerator {
@ -3418,6 +3418,10 @@ class NativeInvokerGenerator : public StubCodeGenerator {
const GrowableArray<VMReg>& _input_registers;
const GrowableArray<VMReg>& _output_registers;
int _frame_complete;
int _framesize;
OopMapSet* _oop_maps;
public:
NativeInvokerGenerator(CodeBuffer* buffer,
address call_target,
@ -3428,23 +3432,54 @@ public:
_call_target(call_target),
_shadow_space_bytes(shadow_space_bytes),
_input_registers(input_registers),
_output_registers(output_registers) {}
_output_registers(output_registers),
_frame_complete(0),
_framesize(0),
_oop_maps(NULL) {
assert(_output_registers.length() <= 1
|| (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
}
void generate();
void spill_register(VMReg reg) {
int spill_size_in_bytes() const {
if (_output_registers.length() == 0) {
return 0;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
if (reg->is_Register()) {
return 8;
} else if (reg->is_XMMRegister()) {
if (UseAVX >= 3) {
return 64;
} else if (UseAVX >= 1) {
return 32;
} else {
return 16;
}
} else {
ShouldNotReachHere();
}
return 0;
}
void spill_out_registers() {
if (_output_registers.length() == 0) {
return;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ push(reg->as_Register());
__ movptr(Address(rsp, 0), reg->as_Register());
} else if (reg->is_XMMRegister()) {
if (UseAVX >= 3) {
__ subptr(rsp, 64); // bytes
__ evmovdqul(Address(rsp, 0), reg->as_XMMRegister(), Assembler::AVX_512bit);
} else if (UseAVX >= 1) {
__ subptr(rsp, 32);
__ vmovdqu(Address(rsp, 0), reg->as_XMMRegister());
} else {
__ subptr(rsp, 16);
__ movdqu(Address(rsp, 0), reg->as_XMMRegister());
}
} else {
@ -3452,27 +3487,40 @@ public:
}
}
void fill_register(VMReg reg) {
void fill_out_registers() {
if (_output_registers.length() == 0) {
return;
}
VMReg reg = _output_registers.at(0);
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ pop(reg->as_Register());
__ movptr(reg->as_Register(), Address(rsp, 0));
} else if (reg->is_XMMRegister()) {
if (UseAVX >= 3) {
__ evmovdqul(reg->as_XMMRegister(), Address(rsp, 0), Assembler::AVX_512bit);
__ addptr(rsp, 64); // bytes
} else if (UseAVX >= 1) {
__ vmovdqu(reg->as_XMMRegister(), Address(rsp, 0));
__ addptr(rsp, 32);
} else {
__ movdqu(reg->as_XMMRegister(), Address(rsp, 0));
__ addptr(rsp, 16);
}
} else {
ShouldNotReachHere();
}
}
int frame_complete() const {
return _frame_complete;
}
int framesize() const {
return (_framesize >> (LogBytesPerWord - LogBytesPerInt));
}
OopMapSet* oop_maps() const {
return _oop_maps;
}
private:
#ifdef ASSERT
bool target_uses_register(VMReg reg) {
@ -3481,58 +3529,62 @@ bool target_uses_register(VMReg reg) {
#endif
};
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
RuntimeStub* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
BufferBlob* _invoke_native_blob = BufferBlob::create("nep_invoker_blob", native_invoker_code_size);
if (_invoke_native_blob == NULL)
return NULL; // allocation failure
CodeBuffer code(_invoke_native_blob);
int locs_size = 64;
CodeBuffer code("nep_invoker_blob", native_invoker_code_size, locs_size);
NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
g.generate();
code.log_section_sizes("nep_invoker_blob");
return _invoke_native_blob;
RuntimeStub* stub =
RuntimeStub::new_runtime_stub("nep_invoker_blob",
&code,
g.frame_complete(),
g.framesize(),
g.oop_maps(), false);
return stub;
}
void NativeInvokerGenerator::generate() {
assert(!(target_uses_register(r15_thread->as_VMReg()) || target_uses_register(rscratch1->as_VMReg())), "Register conflict");
enum layout {
rbp_off,
rbp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
_framesize = align_up(framesize + ((_shadow_space_bytes + spill_size_in_bytes()) >> LogBytesPerInt), 4);
assert(is_even(_framesize/2), "sp not 16-byte aligned");
_oop_maps = new OopMapSet();
MacroAssembler* masm = _masm;
address start = __ pc();
__ enter();
Address java_pc(r15_thread, JavaThread::last_Java_pc_offset());
__ movptr(rscratch1, Address(rsp, 8)); // read return address from stack
__ movptr(java_pc, rscratch1);
// return address and rbp are already in place
__ subptr(rsp, (_framesize-4) << LogBytesPerInt); // prolog
__ movptr(rscratch1, rsp);
__ addptr(rscratch1, 16); // skip return and frame
__ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), rscratch1);
_frame_complete = __ pc() - start;
__ movptr(Address(r15_thread, JavaThread::saved_rbp_address_offset()), rsp); // rsp points at saved RBP
address the_pc = __ pc();
__ set_last_Java_frame(rsp, rbp, (address)the_pc);
OopMap* map = new OopMap(_framesize, 0);
_oop_maps->add_gc_map(the_pc - start, map);
// State transition
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
if (_shadow_space_bytes != 0) {
// needed here for correct stack args offset on Windows
__ subptr(rsp, _shadow_space_bytes);
}
__ call(RuntimeAddress(_call_target));
if (_shadow_space_bytes != 0) {
// needed here for correct stack args offset on Windows
__ addptr(rsp, _shadow_space_bytes);
}
assert(_output_registers.length() <= 1
|| (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
bool need_spills = _output_registers.length() != 0;
VMReg ret_reg = need_spills ? _output_registers.at(0) : VMRegImpl::Bad();
__ restore_cpu_control_state_after_jni();
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
@ -3572,9 +3624,7 @@ void NativeInvokerGenerator::generate() {
__ bind(L_safepoint_poll_slow_path);
__ vzeroupper();
if (need_spills) {
spill_register(ret_reg);
}
spill_out_registers();
__ mov(c_rarg0, r15_thread);
__ mov(r12, rsp); // remember sp
@ -3584,9 +3634,7 @@ void NativeInvokerGenerator::generate() {
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
if (need_spills) {
fill_register(ret_reg);
}
fill_out_registers();
__ jmp(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
@ -3597,9 +3645,7 @@ void NativeInvokerGenerator::generate() {
__ bind(L_reguard);
__ vzeroupper();
if (need_spills) {
spill_register(ret_reg);
}
spill_out_registers();
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
@ -3608,9 +3654,7 @@ void NativeInvokerGenerator::generate() {
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
if (need_spills) {
fill_register(ret_reg);
}
fill_out_registers();
__ jmp(L_after_reguard);
@ -3620,6 +3664,7 @@ void NativeInvokerGenerator::generate() {
__ flush();
}
#endif // COMPILER2
//------------------------------Montgomery multiplication------------------------
//

View file

@ -37,10 +37,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_rbp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View file

@ -39,10 +39,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_fp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_fp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View file

@ -37,10 +37,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_rbp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View file

@ -38,10 +38,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_fp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_fp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View file

@ -44,10 +44,6 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_rbp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View file

@ -955,7 +955,7 @@ void ciEnv::register_method(ciMethod* target,
bool has_unsafe_access,
bool has_wide_vectors,
RTMState rtm_state,
const GrowableArrayView<BufferBlob*>& native_invokers) {
const GrowableArrayView<RuntimeStub*>& native_invokers) {
VM_ENTRY_MARK;
nmethod* nm = NULL;
{

View file

@ -380,7 +380,7 @@ public:
bool has_unsafe_access,
bool has_wide_vectors,
RTMState rtm_state = NoRTM,
const GrowableArrayView<BufferBlob*>& native_invokers = GrowableArrayView<BufferBlob*>::EMPTY);
const GrowableArrayView<RuntimeStub*>& native_invokers = GrowableArrayView<RuntimeStub*>::EMPTY);
// Access to certain well known ciObjects.

View file

@ -502,7 +502,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers
const GrowableArrayView<RuntimeStub*>& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@ -727,7 +727,7 @@ nmethod::nmethod(
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers
const GrowableArrayView<RuntimeStub*>& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@ -1058,7 +1058,7 @@ void nmethod::copy_values(GrowableArray<Metadata*>* array) {
}
void nmethod::free_native_invokers() {
for (BufferBlob** it = native_invokers_begin(); it < native_invokers_end(); it++) {
for (RuntimeStub** it = native_invokers_begin(); it < native_invokers_end(); it++) {
CodeCache::free(*it);
}
}
@ -2697,7 +2697,7 @@ void nmethod::print_pcs_on(outputStream* st) {
void nmethod::print_native_invokers() {
ResourceMark m; // in case methods get printed via debugger
tty->print_cr("Native invokers:");
for (BufferBlob** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) {
for (RuntimeStub** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) {
(*itt)->print_on(tty);
}
}

View file

@ -314,7 +314,7 @@ class nmethod : public CompiledMethod {
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers
const GrowableArrayView<RuntimeStub*>& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@ -363,7 +363,7 @@ class nmethod : public CompiledMethod {
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers = GrowableArrayView<BufferBlob*>::EMPTY
const GrowableArrayView<RuntimeStub*>& native_invokers = GrowableArrayView<RuntimeStub*>::EMPTY
#if INCLUDE_JVMCI
, char* speculations = NULL,
int speculations_len = 0,
@ -413,8 +413,8 @@ class nmethod : public CompiledMethod {
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
address dependencies_end () const { return header_begin() + _native_invokers_offset ; }
BufferBlob** native_invokers_begin() const { return (BufferBlob**)(header_begin() + _native_invokers_offset) ; }
BufferBlob** native_invokers_end () const { return (BufferBlob**)(header_begin() + _handler_table_offset); }
RuntimeStub** native_invokers_begin() const { return (RuntimeStub**)(header_begin() + _native_invokers_offset) ; }
RuntimeStub** native_invokers_end () const { return (RuntimeStub**)(header_begin() + _handler_table_offset); }
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }

View file

@ -1627,7 +1627,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
debug_info, dependencies, code_buffer,
frame_words, oop_map_set,
handler_table, implicit_exception_table,
compiler, comp_level, GrowableArrayView<BufferBlob*>::EMPTY,
compiler, comp_level, GrowableArrayView<RuntimeStub*>::EMPTY,
speculations, speculations_len,
nmethod_mirror_index, nmethod_mirror_name, failed_speculations);

View file

@ -4790,6 +4790,6 @@ void Compile::igv_print_method_to_network(const char* phase_name) {
}
#endif
void Compile::add_native_invoker(BufferBlob* stub) {
void Compile::add_native_invoker(RuntimeStub* stub) {
_native_invokers.append(stub);
}

View file

@ -388,7 +388,7 @@ class Compile : public Phase {
int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
GrowableArray<BufferBlob*> _native_invokers;
GrowableArray<RuntimeStub*> _native_invokers;
// Inlining may not happen in parse order which would make
// PrintInlining output confusing. Keep track of PrintInlining
@ -951,9 +951,9 @@ class Compile : public Phase {
_vector_reboxing_late_inlines.push(cg);
}
void add_native_invoker(BufferBlob* stub);
void add_native_invoker(RuntimeStub* stub);
const GrowableArray<BufferBlob*>& native_invokers() const { return _native_invokers; }
const GrowableArray<RuntimeStub*> native_invokers() const { return _native_invokers; }
void remove_useless_nodes (GrowableArray<Node*>& node_list, Unique_Node_List &useful);

View file

@ -2632,7 +2632,7 @@ Node* GraphKit::make_native_call(const TypeFunc* call_type, uint nargs, ciNative
address call_addr = nep->entry_point();
if (nep->need_transition()) {
BufferBlob* invoker = SharedRuntime::make_native_invoker(call_addr,
RuntimeStub* invoker = SharedRuntime::make_native_invoker(call_addr,
nep->shadow_space(),
arg_regs, ret_regs);
if (invoker == NULL) {

View file

@ -517,10 +517,12 @@ class SharedRuntime: AllStatic {
static address handle_unsafe_access(JavaThread* thread, address next_pc);
static BufferBlob* make_native_invoker(address call_target,
#ifdef COMPILER2
static RuntimeStub* make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers);
#endif
#ifndef PRODUCT

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8259937
* @summary guarantee(loc != NULL) failed: missing saved register with native invoke
*
* @requires vm.flavor == "server"
* @requires ((os.arch == "amd64" | os.arch == "x86_64") & sun.arch.data.model == "64") | os.arch == "aarch64"
* @requires vm.gc.Shenandoah
*
* @modules jdk.incubator.foreign
*
* @run main/othervm -Dforeign.restricted=permit -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestLinkToNativeRBP
*
*/
import jdk.incubator.foreign.CLinker;
import jdk.incubator.foreign.FunctionDescriptor;
import jdk.incubator.foreign.LibraryLookup;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodType;
import static jdk.incubator.foreign.CLinker.C_INT;
public class TestLinkToNativeRBP {
final static CLinker abi = CLinker.getInstance();
static final LibraryLookup lookup = LibraryLookup.ofLibrary("LinkToNativeRBP");
final static MethodHandle foo = abi.downcallHandle(lookup.lookup("foo").get(),
MethodType.methodType(int.class),
FunctionDescriptor.of(C_INT));
static int foo() throws Throwable {
return (int)foo.invokeExact();
}
public static void main(String[] args) throws Throwable {
for (int i = 0; i < 20_000; i++) {
test(5);
}
for (int i = 0; i < 100; i++) {
test(1_000_000);
}
}
static volatile Integer field = 0;
private static int test(int stop) throws Throwable {
int res = 0;
for (int i = 0; i < stop; i++) {
Integer v = field;
res = foo() + v.intValue();
}
return res;
}
}

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifdef _WIN64
#define EXPORT __declspec(dllexport)
#else
#define EXPORT
#endif
EXPORT int foo() {
return 0;
}