8201786: Modularize interpreter GC barriers: leftovers for ARM32

Reviewed-by: enevill, eosterlund
This commit is contained in:
Aleksey Shipilev 2018-05-02 19:26:42 +02:00
parent 62d87665eb
commit 078b80e63c
15 changed files with 580 additions and 475 deletions

View file

@ -852,80 +852,53 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
//
address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code for G1 (or any SATB based GC),
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * In the G1 code we do not check whether we need to block for
// a safepoint. If G1 is enabled then we must execute the specialized
// code for Reference.get (except when the Reference object is null)
// so that we can log the value in the referent field with an SATB
// update buffer.
// If the code for the getfield template is modified so that the
// G1 pre-barrier code is executed when the current method is
// Reference.get() then going through the normal method entry
// will be fine.
// * The G1 code can, however, check the receiver object (the instance
// of java.lang.Reference) and jump to the slow path if null. If the
// Reference object is null then we obviously cannot fetch the referent
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
// This code is based on generate_accessor_enty.
//
// Rmethod: Method*
// Rthread: thread
// Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
// Rparams: parameters
// Code: _aload_0, _getfield, _areturn
// parameter size = 1
//
// The code that gets generated by this routine is split into 2 parts:
// 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
// 2. The slow path - which is an expansion of the regular method entry.
//
// Notes:-
// * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
// * We may jump to the slow path iff the receiver is null. If the
// Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
// Thus we can use the regular method entry code to generate the NPE.
//
// Rmethod: Method*
// Rthread: thread
// Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
// Rparams: parameters
address entry = __ pc();
Label slow_path;
const Register Rthis = R0;
const Register Rret_addr = Rtmp_save1;
assert_different_registers(Rthis, Rret_addr, Rsender_sp);
address entry = __ pc();
Label slow_path;
const Register Rthis = R0;
const Register Rret_addr = Rtmp_save1;
assert_different_registers(Rthis, Rret_addr, Rsender_sp);
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
const int referent_offset = java_lang_ref_Reference::referent_offset;
guarantee(referent_offset > 0, "referent offset not initialized");
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ ldr(Rthis, Address(Rparams));
__ cbz(Rthis, slow_path);
// Check if local 0 != NULL
// If the receiver is null then it is OK to jump to the slow path.
__ ldr(Rthis, Address(Rparams));
__ cbz(Rthis, slow_path);
// Generate the G1 pre-barrier code to log the value of
// the referent field in an SATB buffer.
// Preserve LR
__ mov(Rret_addr, LR);
// Load the value of the referent field.
__ load_heap_oop(R0, Address(Rthis, referent_offset));
// Load the value of the referent field.
const Address field_address(Rthis, referent_offset);
__ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF);
// Preserve LR
__ mov(Rret_addr, LR);
// _areturn
__ mov(SP, Rsender_sp);
__ ret(Rret_addr);
__ g1_write_barrier_pre(noreg, // store_addr
noreg, // new_val
R0, // pre_val
Rtemp, // tmp1
R1_tmp); // tmp2
// _areturn
__ mov(SP, Rsender_sp);
__ ret(Rret_addr);
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
return entry;
}
#endif // INCLUDE_ALL_GCS
// If G1 is not enabled then attempt to go through the normal entry point
return NULL;
// generate a vanilla interpreter entry as the slow path
__ bind(slow_path);
__ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
return entry;
}
// Not supported