mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8152358: code and comment cleanups found during the hunt for 8077392
Reviewed-by: gthornbr, kvn, cvarming
This commit is contained in:
parent
66570c722b
commit
2e87e3178c
8 changed files with 71 additions and 43 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -312,7 +312,7 @@ void LIR_Assembler::osr_entry() {
|
|||
Register OSR_buf = osrBufferPointer()->as_pointer_register();
|
||||
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
|
||||
int monitor_offset = BytesPerWord * method()->max_locals() +
|
||||
(2 * BytesPerWord) * (number_of_locks - 1);
|
||||
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
|
||||
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
|
||||
// the OSR buffer using 2 word entries: first the lock and then
|
||||
// the oop.
|
||||
|
|
|
@ -1099,7 +1099,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||
movptr(Address(lock_reg, mark_offset), swap_reg);
|
||||
|
||||
assert(lock_offset == 0,
|
||||
"displached header must be first word in BasicObjectLock");
|
||||
"displaced header must be first word in BasicObjectLock");
|
||||
|
||||
if (os::is_MP()) lock();
|
||||
cmpxchgptr(lock_reg, Address(obj_reg, 0));
|
||||
|
@ -1154,7 +1154,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
|||
// Kills:
|
||||
// rax
|
||||
// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)
|
||||
// rscratch1, rscratch2 (scratch regs)
|
||||
// rscratch1 (scratch reg)
|
||||
// rax, rbx, rcx, rdx
|
||||
void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||
assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
|
||||
|
@ -1201,7 +1201,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
|||
if (os::is_MP()) lock();
|
||||
cmpxchgptr(header_reg, Address(obj_reg, 0));
|
||||
|
||||
// zero for recursive case
|
||||
// zero for simple unlock of a stack-lock case
|
||||
jcc(Assembler::zero, done);
|
||||
|
||||
// Call the runtime routine for slow case.
|
||||
|
|
|
@ -1106,7 +1106,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
|
|||
assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
|
||||
assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
|
||||
Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
|
||||
Address saved_mark_addr(lock_reg, 0);
|
||||
NOT_LP64( Address saved_mark_addr(lock_reg, 0); )
|
||||
|
||||
if (PrintBiasedLockingStatistics && counters == NULL) {
|
||||
counters = BiasedLocking::counters();
|
||||
|
@ -1695,7 +1695,7 @@ void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg
|
|||
RTMLockingCounters* stack_rtm_counters,
|
||||
Metadata* method_data,
|
||||
bool use_rtm, bool profile_rtm) {
|
||||
// Ensure the register assignents are disjoint
|
||||
// Ensure the register assignments are disjoint
|
||||
assert(tmpReg == rax, "");
|
||||
|
||||
if (use_rtm) {
|
||||
|
@ -2194,8 +2194,8 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
|||
cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
|
||||
jccb (Assembler::zero, LGoSlowPath);
|
||||
|
||||
xorptr(boxReg, boxReg);
|
||||
if ((EmitSync & 16) && os::is_MP()) {
|
||||
orptr(boxReg, boxReg);
|
||||
xchgptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||
} else {
|
||||
movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
|
||||
|
@ -2227,7 +2227,6 @@ void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpR
|
|||
|
||||
// box is really RAX -- the following CMPXCHG depends on that binding
|
||||
// cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
|
||||
movptr(boxReg, (int32_t)NULL_WORD);
|
||||
if (os::is_MP()) { lock(); }
|
||||
cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
|
||||
// There's no successor so we tried to regrab the lock.
|
||||
|
|
|
@ -149,9 +149,13 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_
|
|||
if (!mark->has_bias_pattern()) {
|
||||
if (log_is_enabled(Info, biasedlocking)) {
|
||||
ResourceMark rm;
|
||||
log_info(biasedlocking)(" (Skipping revocation of object of type %s "
|
||||
"because it's no longer biased)",
|
||||
obj->klass()->external_name());
|
||||
log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT
|
||||
", mark " INTPTR_FORMAT ", type %s"
|
||||
", requesting thread " INTPTR_FORMAT
|
||||
" because it's no longer biased)",
|
||||
p2i((void *)obj), (intptr_t) mark,
|
||||
obj->klass()->external_name(),
|
||||
(intptr_t) requesting_thread);
|
||||
}
|
||||
return BiasedLocking::NOT_BIASED;
|
||||
}
|
||||
|
@ -163,9 +167,9 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_
|
|||
// Log at "info" level if not bulk, else "trace" level
|
||||
if (!is_bulk) {
|
||||
ResourceMark rm;
|
||||
log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
|
||||
INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
|
||||
" , allow rebias %d , requesting thread " INTPTR_FORMAT,
|
||||
log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
|
||||
INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
|
||||
", allow rebias %d, requesting thread " INTPTR_FORMAT,
|
||||
p2i((void *)obj),
|
||||
(intptr_t) mark,
|
||||
obj->klass()->external_name(),
|
||||
|
@ -222,13 +226,24 @@ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_
|
|||
}
|
||||
// Log at "info" level if not bulk, else "trace" level
|
||||
if (!is_bulk) {
|
||||
log_info(biasedlocking)(" Revoked bias of object biased toward dead thread");
|
||||
log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
|
||||
PTR_FORMAT ")", p2i(biased_thread));
|
||||
} else {
|
||||
log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread");
|
||||
log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
|
||||
PTR_FORMAT ")", p2i(biased_thread));
|
||||
}
|
||||
return BiasedLocking::BIAS_REVOKED;
|
||||
}
|
||||
|
||||
// Log at "info" level if not bulk, else "trace" level
|
||||
if (!is_bulk) {
|
||||
log_info(biasedlocking)(" Revoked bias of object biased toward live thread ("
|
||||
PTR_FORMAT ")", p2i(biased_thread));
|
||||
} else {
|
||||
log_trace(biasedlocking)(" Revoked bias of object biased toward live thread ("
|
||||
PTR_FORMAT ")", p2i(biased_thread));
|
||||
}
|
||||
|
||||
// Thread owning bias is alive.
|
||||
// Check to see whether it currently owns the lock and, if so,
|
||||
// write down the needed displaced headers to the thread's stack.
|
||||
|
|
|
@ -850,7 +850,7 @@ void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
|
|||
// ~~~~~~~~
|
||||
// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
|
||||
// the fast-path operators have been optimized so the common ::exit()
|
||||
// operation is 1-0. See i486.ad fast_unlock(), for instance.
|
||||
// operation is 1-0, e.g., see macroAssembler_x86.cpp: fast_unlock().
|
||||
// The code emitted by fast_unlock() elides the usual MEMBAR. This
|
||||
// greatly improves latency -- MEMBAR and CAS having considerable local
|
||||
// latency on modern processors -- but at the cost of "stranding". Absent the
|
||||
|
@ -863,7 +863,7 @@ void ObjectMonitor::UnlinkAfterAcquire(Thread *Self, ObjectWaiter *SelfNode) {
|
|||
//
|
||||
// The CAS() in enter provides for safety and exclusion, while the CAS or
|
||||
// MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
|
||||
// eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
|
||||
// eliminates the CAS/MEMBAR from the exit path, but it admits stranding.
|
||||
// We detect and recover from stranding with timers.
|
||||
//
|
||||
// If a thread transiently strands it'll park until (a) another
|
||||
|
@ -936,7 +936,6 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
|||
for (;;) {
|
||||
assert(THREAD == _owner, "invariant");
|
||||
|
||||
|
||||
if (Knob_ExitPolicy == 0) {
|
||||
// release semantics: prior loads and stores from within the critical section
|
||||
// must not float (reorder) past the following store that drops the lock.
|
||||
|
|
|
@ -1284,8 +1284,8 @@ void os::set_memory_serialize_page(address page) {
|
|||
_mem_serialize_page = (volatile int32_t *)page;
|
||||
// We initialize the serialization page shift count here
|
||||
// We assume a cache line size of 64 bytes
|
||||
assert(SerializePageShiftCount == count,
|
||||
"thread size changed, fix SerializePageShiftCount constant");
|
||||
assert(SerializePageShiftCount == count, "JavaThread size changed; "
|
||||
"SerializePageShiftCount constant should be %d", count);
|
||||
set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t)));
|
||||
}
|
||||
|
||||
|
|
|
@ -2954,7 +2954,7 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
|
|||
Method* moop = fr.interpreter_frame_method();
|
||||
int max_locals = moop->max_locals();
|
||||
// Allocate temp buffer, 1 word per local & 2 per active monitor
|
||||
int buf_size_words = max_locals + active_monitor_count*2;
|
||||
int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
|
||||
intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
|
||||
|
||||
// Copy the locals. Order is preserved so that loading of longs works.
|
||||
|
|
|
@ -283,38 +283,52 @@ void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
|
|||
}
|
||||
|
||||
void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
|
||||
assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
|
||||
// if displaced header is null, the previous enter is recursive enter, no-op
|
||||
markOop mark = object->mark();
|
||||
// We cannot check for Biased Locking if we are racing an inflation.
|
||||
assert(mark == markOopDesc::INFLATING() ||
|
||||
!mark->has_bias_pattern(), "should not see bias pattern here");
|
||||
|
||||
markOop dhw = lock->displaced_header();
|
||||
markOop mark;
|
||||
if (dhw == NULL) {
|
||||
// Recursive stack-lock.
|
||||
// Diagnostics -- Could be: stack-locked, inflating, inflated.
|
||||
mark = object->mark();
|
||||
assert(!mark->is_neutral(), "invariant");
|
||||
if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
|
||||
assert(THREAD->is_lock_owned((address)mark->locker()), "invariant");
|
||||
}
|
||||
if (mark->has_monitor()) {
|
||||
ObjectMonitor * m = mark->monitor();
|
||||
assert(((oop)(m->object()))->mark() == mark, "invariant");
|
||||
assert(m->is_entered(THREAD), "invariant");
|
||||
// If the displaced header is NULL, then this exit matches up with
|
||||
// a recursive enter. No real work to do here except for diagnostics.
|
||||
#ifndef PRODUCT
|
||||
if (mark != markOopDesc::INFLATING()) {
|
||||
// Only do diagnostics if we are not racing an inflation. Simply
|
||||
// exiting a recursive enter of a Java Monitor that is being
|
||||
// inflated is safe; see the has_monitor() comment below.
|
||||
assert(!mark->is_neutral(), "invariant");
|
||||
assert(!mark->has_locker() ||
|
||||
THREAD->is_lock_owned((address)mark->locker()), "invariant");
|
||||
if (mark->has_monitor()) {
|
||||
// The BasicLock's displaced_header is marked as a recursive
|
||||
// enter and we have an inflated Java Monitor (ObjectMonitor).
|
||||
// This is a special case where the Java Monitor was inflated
|
||||
// after this thread entered the stack-lock recursively. When a
|
||||
// Java Monitor is inflated, we cannot safely walk the Java
|
||||
// Monitor owner's stack and update the BasicLocks because a
|
||||
// Java Monitor can be asynchronously inflated by a thread that
|
||||
// does not own the Java Monitor.
|
||||
ObjectMonitor * m = mark->monitor();
|
||||
assert(((oop)(m->object()))->mark() == mark, "invariant");
|
||||
assert(m->is_entered(THREAD), "invariant");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
mark = object->mark();
|
||||
|
||||
// If the object is stack-locked by the current thread, try to
|
||||
// swing the displaced header from the box back to the mark.
|
||||
if (mark == (markOop) lock) {
|
||||
// If the object is stack-locked by the current thread, try to
|
||||
// swing the displaced header from the BasicLock back to the mark.
|
||||
assert(dhw->is_neutral(), "invariant");
|
||||
if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
|
||||
TEVENT(fast_exit: release stacklock);
|
||||
if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) {
|
||||
TEVENT(fast_exit: release stack-lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We have to take the slow-path of possible inflation and then exit.
|
||||
ObjectSynchronizer::inflate(THREAD,
|
||||
object,
|
||||
inflate_cause_vm_internal)->exit(true, THREAD);
|
||||
|
@ -1747,6 +1761,7 @@ class ReleaseJavaMonitorsClosure: public MonitorClosure {
|
|||
void do_monitor(ObjectMonitor* mid) {
|
||||
if (mid->owner() == THREAD) {
|
||||
if (ObjectMonitor::Knob_VerifyMatch != 0) {
|
||||
ResourceMark rm;
|
||||
Handle obj((oop) mid->object());
|
||||
tty->print("INFO: unexpected locked object:");
|
||||
javaVFrame::print_locked_object_class_name(tty, obj, "locked");
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue