8364406: Remove LockingMode related code from aarch64

Reviewed-by: aph, dholmes
This commit is contained in:
Fredrik Bredberg 2025-08-12 08:45:36 +00:00
parent f155f7d6e5
commit 3c0eed8e47
9 changed files with 52 additions and 563 deletions

View file

@ -16281,41 +16281,8 @@ instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
ins_cost(5 * INSN_COST);
format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
%}
ins_pipe(pipe_serial);
%}
instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
%{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, TEMP tmp2);
ins_cost(5 * INSN_COST);
format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
ins_encode %{
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
%}
ins_pipe(pipe_serial);
%}
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
predicate(LockingMode == LM_LIGHTWEIGHT);
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
@ -16331,7 +16298,6 @@ instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
predicate(LockingMode == LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);

View file

@ -410,11 +410,7 @@ int LIR_Assembler::emit_unwind_handler() {
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::r0_opr);
stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
if (LockingMode == LM_MONITOR) {
__ b(*stub->entry());
} else {
__ unlock_object(r5, r4, r0, r6, *stub->entry());
}
__ unlock_object(r5, r4, r0, r6, *stub->entry());
__ bind(*stub->continuation());
}
@ -2484,13 +2480,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
Register temp = op->scratch_opr()->as_register();
if (LockingMode == LM_MONITOR) {
if (op->info() != nullptr) {
add_debug_info_for_null_check_here(op->info());
__ null_check(obj, -1);
}
__ b(*op->stub()->entry());
} else if (op->code() == lir_lock) {
if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());

View file

@ -60,8 +60,6 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
}
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
int null_check_offset = -1;
@ -72,95 +70,20 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
null_check_offset = offset();
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(disp_hdr, obj, hdr, temp, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
lightweight_lock(disp_hdr, obj, hdr, temp, rscratch2, slow_case);
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(hdr, obj);
ldrb(hdr, Address(hdr, Klass::misc_flags_offset()));
tst(hdr, KlassFlags::_misc_is_value_based_class);
br(Assembler::NE, slow_case);
}
Label done;
// Load object header
ldr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orr(hdr, hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
str(hdr, Address(disp_hdr, 0));
// test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header - if it is not the same, get the
// object header instead
lea(rscratch2, Address(obj, hdr_offset));
cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/nullptr);
// if the object header was the same, we're done
// if the object header was not the same, it is now in the hdr register
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
//
// 1) (hdr & aligned_mask) == 0
// 2) sp <= hdr
// 3) hdr <= sp + page_size
//
// these 3 tests can be done by evaluating the following expression:
//
// (hdr - sp) & (aligned_mask - page_size)
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
mov(rscratch1, sp);
sub(hdr, hdr, rscratch1);
ands(hdr, hdr, aligned_mask - (int)os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header
// location (null in the displaced hdr location indicates recursive locking)
str(hdr, Address(disp_hdr, 0));
// otherwise we don't care about the result and handle locking via runtime call
cbnz(hdr, slow_case);
// done
bind(done);
inc_held_monitor_count(rscratch1);
}
return null_check_offset;
}
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
Label done;
if (LockingMode != LM_LIGHTWEIGHT) {
// load displaced header
ldr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is null we had recursive locking
// if we had recursive locking, we are done
cbz(hdr, done);
}
// load object
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead
// if the object header was not pointing to the displaced header,
// we do unlocking via runtime call
if (hdr_offset) {
lea(rscratch1, Address(obj, hdr_offset));
cmpxchgptr(disp_hdr, hdr, rscratch1, rscratch2, done, &slow_case);
} else {
cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
}
// done
bind(done);
dec_held_monitor_count(rscratch1);
}
lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
}

View file

@ -147,215 +147,8 @@ address C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register
return pc();
}
void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register tmpReg,
Register tmp2Reg, Register tmp3Reg) {
Register oop = objectReg;
Register box = boxReg;
Register disp_hdr = tmpReg;
Register tmp = tmp2Reg;
Label cont;
Label object_has_monitor;
Label count, no_count;
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
assert_different_registers(oop, box, tmp, disp_hdr, rscratch2);
// Load markWord from object into displaced_header.
ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(tmp, oop);
ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
tst(tmp, KlassFlags::_misc_is_value_based_class);
br(Assembler::NE, cont);
}
// Check for existing monitor
tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
if (LockingMode == LM_MONITOR) {
tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
b(cont);
} else {
assert(LockingMode == LM_LEGACY, "must be");
// Set tmp to be (markWord of object | UNLOCK_VALUE).
orr(tmp, disp_hdr, markWord::unlocked_value);
// Initialize the box. (Must happen before we update the object mark!)
str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
// Compare object markWord with an unlocked value (tmp) and if
// equal exchange the stack address of our box with object markWord.
// On failure disp_hdr contains the possibly locked markWord.
cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, disp_hdr);
br(Assembler::EQ, cont);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// If the compare-and-exchange succeeded, then we found an unlocked
// object, will have now locked it will continue at label cont
// Check if the owner is self by comparing the value in the
// markWord of object (disp_hdr) with the stack pointer.
mov(rscratch1, sp);
sub(disp_hdr, disp_hdr, rscratch1);
mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
// If condition is true we are cont and hence we can store 0 as the
// displaced header in the box, which indicates that it is a recursive lock.
ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
b(cont);
}
// Handle existing monitor.
bind(object_has_monitor);
// Try to CAS owner (no owner => current thread's _monitor_owner_id).
ldr(rscratch2, Address(rthread, JavaThread::monitor_owner_id_offset()));
add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
cmpxchg(tmp, zr, rscratch2, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
// Store a non-null value into the box to avoid looking like a re-entrant
// lock. The fast-path monitor unlock code checks for
// markWord::monitor_value so use markWord::unused_mark which has the
// relevant bit set, and also matches ObjectSynchronizer::enter.
mov(tmp, (address)markWord::unused_mark().value());
str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
br(Assembler::EQ, cont); // CAS success means locking succeeded
cmp(tmp3Reg, rscratch2);
br(Assembler::NE, cont); // Check for recursive locking
// Recursive lock case
increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
// flag == EQ still from the cmp above, checking if this is a reentrant lock
bind(cont);
// flag == EQ indicates success
// flag == NE indicates failure
br(Assembler::NE, no_count);
bind(count);
if (LockingMode == LM_LEGACY) {
inc_held_monitor_count(rscratch1);
}
bind(no_count);
}
void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
Register tmp2Reg) {
Register oop = objectReg;
Register box = boxReg;
Register disp_hdr = tmpReg;
Register owner_addr = tmpReg;
Register tmp = tmp2Reg;
Label cont;
Label object_has_monitor;
Label count, no_count;
Label unlocked;
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
assert_different_registers(oop, box, tmp, disp_hdr);
if (LockingMode == LM_LEGACY) {
// Find the lock address and load the displaced header from the stack.
ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
// If the displaced header is 0, we have a recursive unlock.
cmp(disp_hdr, zr);
br(Assembler::EQ, cont);
}
// Handle existing monitor.
ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
if (LockingMode == LM_MONITOR) {
tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
b(cont);
} else {
assert(LockingMode == LM_LEGACY, "must be");
// Check if it is still a light weight lock, this is is true if we
// see the stack address of the basicLock in the markWord of the
// object.
cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
/*release*/ true, /*weak*/ false, tmp);
b(cont);
}
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
// Handle existing monitor.
bind(object_has_monitor);
STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
add(tmp, tmp, -(int)markWord::monitor_value); // monitor
ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
Label notRecursive;
cbz(disp_hdr, notRecursive);
// Recursive lock
sub(disp_hdr, disp_hdr, 1u);
str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
cmp(disp_hdr, disp_hdr); // Sets flags for result
b(cont);
bind(notRecursive);
// Compute owner address.
lea(owner_addr, Address(tmp, ObjectMonitor::owner_offset()));
// Set owner to null.
// Release to satisfy the JMM
stlr(zr, owner_addr);
// We need a full fence after clearing owner to avoid stranding.
// StoreLoad achieves this.
membar(StoreLoad);
// Check if the entry_list is empty.
ldr(rscratch1, Address(tmp, ObjectMonitor::entry_list_offset()));
cmp(rscratch1, zr);
br(Assembler::EQ, cont); // If so we are done.
// Check if there is a successor.
ldr(rscratch1, Address(tmp, ObjectMonitor::succ_offset()));
cmp(rscratch1, zr);
br(Assembler::NE, unlocked); // If so we are done.
// Save the monitor pointer in the current thread, so we can try to
// reacquire the lock in SharedRuntime::monitor_exit_helper().
str(tmp, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
cmp(zr, rthread); // Set Flag to NE => slow path
b(cont);
bind(unlocked);
cmp(zr, zr); // Set Flag to EQ => fast path
// Intentional fall-through
bind(cont);
// flag == EQ indicates success
// flag == NE indicates failure
br(Assembler::NE, no_count);
bind(count);
if (LockingMode == LM_LEGACY) {
dec_held_monitor_count(rscratch1);
}
bind(no_count);
}
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
Register t2, Register t3) {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
assert_different_registers(obj, box, t1, t2, t3, rscratch2);
// Handle inflated monitor.
@ -512,7 +305,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1,
Register t2, Register t3) {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
assert_different_registers(obj, box, t1, t2, t3);
// Handle inflated monitor.

View file

@ -51,9 +51,6 @@
FloatRegister vmul3, FloatRegister vpow, FloatRegister vpowm,
BasicType eltype);
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
void fast_lock(Register object, Register box, Register tmp, Register tmp2, Register tmp3);
void fast_unlock(Register object, Register box, Register tmp, Register tmp2);
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
void fast_lock_lightweight(Register object, Register box, Register t1, Register t2, Register t3);
void fast_unlock_lightweight(Register object, Register box, Register t1, Register t2, Register t3);

View file

@ -691,104 +691,27 @@ void InterpreterMacroAssembler::leave_jfr_critical_section() {
void InterpreterMacroAssembler::lock_object(Register lock_reg)
{
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
if (LockingMode == LM_MONITOR) {
call_VM_preemptable(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
lock_reg);
} else {
Label count, done;
const Register swap_reg = r0;
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp2 = c_rarg4;
const Register tmp3 = c_rarg5;
const Register tmp = c_rarg2;
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp2 = c_rarg4;
const Register tmp3 = c_rarg5;
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
const int mark_offset = lock_offset +
BasicLock::displaced_header_offset_in_bytes();
// Load object pointer into obj_reg %c_rarg3
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
Label slow_case;
Label slow_case, done;
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
b(done);
// Load object pointer into obj_reg %c_rarg3
ldr(obj_reg, Address(lock_reg, obj_offset));
bind(slow_case);
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
b(done);
} else if (LockingMode == LM_LEGACY) {
// Call the runtime routine for slow case
call_VM_preemptable(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
lock_reg);
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(tmp, obj_reg);
ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
tst(tmp, KlassFlags::_misc_is_value_based_class);
br(Assembler::NE, slow_case);
}
// Load (object->mark() | 1) into swap_reg
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
str(swap_reg, Address(lock_reg, mark_offset));
assert(lock_offset == 0,
"displached header must be first word in BasicObjectLock");
Label fail;
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
// Fast check for recursive lock.
//
// Can apply the optimization only if this is a stack lock
// allocated in this thread. For efficiency, we can focus on
// recently allocated stack locks (instead of reading the stack
// base and checking whether 'mark' points inside the current
// thread stack):
// 1) (mark & 7) == 0, and
// 2) sp <= mark < mark + os::pagesize()
//
// Warning: sp + os::pagesize can overflow the stack base. We must
// neither apply the optimization for an inflated lock allocated
// just above the thread stack (this is why condition 1 matters)
// nor apply the optimization if the stack lock is inside the stack
// of another thread. The latter is avoided even in case of overflow
// because we have guard pages at the end of all stacks. Hence, if
// we go over the stack base and hit the stack of another thread,
// this should not be in a writeable area that could contain a
// stack lock allocated by that thread. As a consequence, a stack
// lock less than page size away from sp is guaranteed to be
// owned by the current thread.
//
// These 3 tests can be done by evaluating the following
// expression: ((mark - sp) & (7 - os::vm_page_size())),
// assuming both stack pointer and pagesize have their
// least significant 3 bits clear.
// NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
// NOTE2: aarch64 does not like to subtract sp from rn so take a
// copy
mov(rscratch1, sp);
sub(swap_reg, swap_reg, rscratch1);
ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
// Save the test result, for recursive case, the result is zero
str(swap_reg, Address(lock_reg, mark_offset));
br(Assembler::NE, slow_case);
bind(count);
inc_held_monitor_count(rscratch1);
b(done);
}
bind(slow_case);
// Call the runtime routine for slow case
call_VM_preemptable(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
lock_reg);
bind(done);
}
bind(done);
}
@ -807,57 +730,29 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
{
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
if (LockingMode == LM_MONITOR) {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
} else {
Label count, done;
const Register swap_reg = r0;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
const Register swap_reg = r0;
const Register header_reg = c_rarg2; // Will contain the old oopMark
const Register obj_reg = c_rarg3; // Will contain the oop
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
save_bcp(); // Save in case of exception
save_bcp(); // Save in case of exception
// Load oop into obj_reg(%c_rarg3)
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
if (LockingMode != LM_LIGHTWEIGHT) {
// Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into %r0
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
}
// Free entry
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
// Load oop into obj_reg(%c_rarg3)
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
Label slow_case, done;
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
b(done);
// Free entry
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
Label slow_case;
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
b(done);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
ldr(header_reg, Address(swap_reg,
BasicLock::displaced_header_offset_in_bytes()));
// Test for recursion
cbz(header_reg, count);
// Atomic swap back the old header
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, &slow_case);
bind(count);
dec_held_monitor_count(rscratch1);
b(done);
}
bind(slow_case);
// Call the runtime routine for slow case.
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
bind(done);
restore_bcp();
}
bind(slow_case);
// Call the runtime routine for slow case.
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
bind(done);
restore_bcp();
}
void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,

View file

@ -7097,7 +7097,6 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
// - t1, t2, t3: temporary registers, will be destroyed
// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
Label push;
@ -7157,7 +7156,6 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
// - t1, t2, t3: temporary registers
// - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
// cmpxchg clobbers rscratch1.
assert_different_registers(obj, t1, t2, t3, rscratch1);

View file

@ -1721,7 +1721,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// We use the same pc/oopMap repeatedly when we call out.
Label native_return;
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
if (method->is_object_wait0()) {
// For convenience we use the pc we want to resume to in case of preemption on Object.wait.
__ set_last_Java_frame(sp, noreg, native_return, rscratch1);
} else {
@ -1776,44 +1776,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ ldr(obj_reg, Address(oop_handle_reg, 0));
if (LockingMode == LM_MONITOR) {
__ b(slow_path_lock);
} else if (LockingMode == LM_LEGACY) {
// Load (object->mark() | 1) into swap_reg %r0
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ orr(swap_reg, rscratch1, 1);
// Save (object->mark() | 1) into BasicLock's displaced header
__ str(swap_reg, Address(lock_reg, mark_word_offset));
// src -> dest iff dest == r0 else r0 <- dest
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
// Hmm should this move to the slow path code area???
// Test if the oopMark is an obvious stack pointer, i.e.,
// 1) (mark & 3) == 0, and
// 2) sp <= mark < mark + os::pagesize()
// These 3 tests can be done by evaluating the following
// expression: ((mark - sp) & (3 - os::vm_page_size())),
// assuming both stack pointer and pagesize have their
// least significant 2 bits clear.
// NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
__ sub(swap_reg, sp, swap_reg);
__ neg(swap_reg, swap_reg);
__ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
// Save the test result, for recursive case, the result is zero
__ str(swap_reg, Address(lock_reg, mark_word_offset));
__ br(Assembler::NE, slow_path_lock);
__ bind(count);
__ inc_held_monitor_count(rscratch1);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
}
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
// Slow path will re-enter here
__ bind(lock_done);
@ -1888,7 +1851,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
if (method->is_object_wait0()) {
// Check preemption for Object.wait()
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
__ cbz(rscratch1, native_return);
@ -1917,48 +1880,18 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Get locked oop from the handle we passed to jni
__ ldr(obj_reg, Address(oop_handle_reg, 0));
Label done, not_recursive;
if (LockingMode == LM_LEGACY) {
// Simple recursive lock?
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
__ cbnz(rscratch1, not_recursive);
__ dec_held_monitor_count(rscratch1);
__ b(done);
}
__ bind(not_recursive);
// Must save r0 if if it is live now because cmpxchg must use it
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
save_native_result(masm, ret_type, stack_slots);
}
if (LockingMode == LM_MONITOR) {
__ b(slow_path_unlock);
} else if (LockingMode == LM_LEGACY) {
// get address of the stack lock
__ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
// get old displaced header
__ ldr(old_hdr, Address(r0, 0));
// Atomic swap old header if oop still contains the stack lock
Label count;
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
__ bind(count);
__ dec_held_monitor_count(rscratch1);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
}
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
// slow path re-enters here
__ bind(unlock_done);
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
restore_native_result(masm, ret_type, stack_slots);
}
__ bind(done);
}
Label dtrace_method_exit, dtrace_method_exit_done;

View file

@ -1478,22 +1478,17 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
__ stlrw(rscratch1, rscratch2);
if (LockingMode != LM_LEGACY) {
// Check preemption for Object.wait()
Label not_preempted;
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
__ cbz(rscratch1, not_preempted);
__ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
__ br(rscratch1);
__ bind(native_return);
__ restore_after_resume(true /* is_native */);
// reload result_handler
__ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
__ bind(not_preempted);
} else {
// any pc will do so just use this one for LM_LEGACY to keep code together.
__ bind(native_return);
}
// Check preemption for Object.wait()
Label not_preempted;
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
__ cbz(rscratch1, not_preempted);
__ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
__ br(rscratch1);
__ bind(native_return);
__ restore_after_resume(true /* is_native */);
// reload result_handler
__ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
__ bind(not_preempted);
// reset_last_Java_frame
__ reset_last_Java_frame(true);