8262910: Cleanup THREAD/TRAPS/naming and typing issues in ObjectMonitor and related code

Reviewed-by: coleenp, pchilanomate, dcubed, cjplummer, sspitsyn
This commit is contained in:
David Holmes 2021-03-10 22:33:56 +00:00
parent 57f16f9fe5
commit c6d74bd933
18 changed files with 470 additions and 521 deletions

View file

@ -80,7 +80,7 @@ size_t MonitorList::max() const {
// Walk the in-use list and unlink (at most MonitorDeflationMax) deflated
// ObjectMonitors. Returns the number of unlinked ObjectMonitors.
size_t MonitorList::unlink_deflated(Thread* self, LogStream* ls,
size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls,
elapsedTimer* timer_p,
GrowableArray<ObjectMonitor*>* unlinked_list) {
size_t unlinked_count = 0;
@ -124,9 +124,9 @@ size_t MonitorList::unlink_deflated(Thread* self, LogStream* ls,
m = m->next_om();
}
if (self->is_Java_thread()) {
if (current->is_Java_thread()) {
// A JavaThread must check for a safepoint/handshake and honor it.
ObjectSynchronizer::chk_for_block_req(self->as_Java_thread(), "unlinking",
ObjectSynchronizer::chk_for_block_req(current->as_Java_thread(), "unlinking",
"unlinked_count", unlinked_count,
ls, timer_p);
}
@ -263,14 +263,13 @@ static uintx _no_progress_cnt = 0;
// the monitorexit operation. In that case the JIT could fuse the operations
// into a single notifyAndExit() runtime primitive.
bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) {
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(self->as_Java_thread()->thread_state() == _thread_in_Java, "invariant");
bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
assert(current->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // slow-path for invalid obj
const markWord mark = obj->mark();
if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Degenerate notify
// stack-locked by caller so by definition the implied waitset is empty.
return true;
@ -279,20 +278,20 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) {
if (mark.has_monitor()) {
ObjectMonitor* const mon = mark.monitor();
assert(mon->object() == oop(obj), "invariant");
if (mon->owner() != self) return false; // slow-path for IMS exception
if (mon->owner() != current) return false; // slow-path for IMS exception
if (mon->first_waiter() != NULL) {
// We have one or more waiters. Since this is an inflated monitor
// that we own, we can transfer one or more threads from the waitset
// to the entrylist here and now, avoiding the slow-path.
if (all) {
DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
DTRACE_MONITOR_PROBE(notifyAll, mon, obj, current);
} else {
DTRACE_MONITOR_PROBE(notify, mon, obj, self);
DTRACE_MONITOR_PROBE(notify, mon, obj, current);
}
int free_count = 0;
do {
mon->INotify(self);
mon->INotify(current);
++free_count;
} while (mon->first_waiter() != NULL && all);
OM_PERFDATA_OP(Notifications, inc(free_count));
@ -311,10 +310,9 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) {
// Note that we can't safely call AsyncPrintJavaStack() from within
// quick_enter() as our thread state remains _in_Java.
bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
BasicLock * lock) {
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
assert(self->as_Java_thread()->thread_state() == _thread_in_Java, "invariant");
assert(current->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // Need to throw NPE
@ -332,14 +330,14 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
if (m->object_peek() == NULL) {
return false;
}
Thread* const owner = (Thread *) m->owner_raw();
JavaThread* const owner = (JavaThread*) m->owner_raw();
// Lock contention and Transactional Lock Elision (TLE) diagnostics
// and observability
// Case: light contention possibly amenable to TLE
// Case: TLE inimical operations such as nested/recursive synchronization
if (owner == self) {
if (owner == current) {
m->_recursions++;
return true;
}
@ -356,7 +354,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
lock->set_displaced_header(markWord::unused_mark());
if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
assert(m->_recursions == 0, "invariant");
return true;
}
@ -373,10 +371,8 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
}
// Handle notifications when synchronizing on value based classes
void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, Thread* current) {
JavaThread* self = current->as_Java_thread();
frame last_frame = self->last_frame();
void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
frame last_frame = current->last_frame();
bool bcp_was_adjusted = false;
// Don't decrement bcp if it points to the frame's first instruction. This happens when
// handle_sync_on_value_based_class() is called because of a synchronized method. There
@ -389,9 +385,9 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, Thread* cu
}
if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
ResourceMark rm(self);
ResourceMark rm(current);
stringStream ss;
self->print_stack_on(&ss);
current->print_stack_on(&ss);
char* base = (char*)strstr(ss.base(), "at");
char* newline = (char*)strchr(ss.base(), '\n');
if (newline != NULL) {
@ -400,13 +396,13 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, Thread* cu
fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
} else {
assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
ResourceMark rm(self);
ResourceMark rm(current);
Log(valuebasedclasses) vblog;
vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
if (self->has_last_Java_frame()) {
if (current->has_last_Java_frame()) {
LogStream info_stream(vblog.info());
self->print_stack_on(&info_stream);
current->print_stack_on(&info_stream);
} else {
vblog.info("Cannot find the last Java frame");
}
@ -429,17 +425,13 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, Thread* cu
// of this algorithm. Make sure to update that code if the following function is
// changed. The implementation is extremely sensitive to race condition. Be careful.
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, THREAD);
handle_sync_on_value_based_class(obj, current);
}
if (UseBiasedLocking) {
if (!SafepointSynchronize::is_at_safepoint()) {
BiasedLocking::revoke(obj, THREAD);
} else {
BiasedLocking::revoke_at_safepoint(obj);
}
BiasedLocking::revoke(obj, current);
}
markWord mark = obj->mark();
@ -454,7 +446,7 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
}
// Fall through to inflate() ...
} else if (mark.has_locker() &&
THREAD->is_lock_owned((address)mark.locker())) {
current->is_lock_owned((address)mark.locker())) {
assert(lock != mark.locker(), "must not re-lock the same lock");
assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
lock->set_displaced_header(markWord::from_pointer(NULL));
@ -470,14 +462,14 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_monitor_enter);
if (monitor->enter(THREAD)) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
if (monitor->enter(current)) {
return;
}
}
}
void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
markWord mark = object->mark();
// We cannot check for Biased Locking if we are racing an inflation.
assert(mark == markWord::INFLATING() ||
@ -494,7 +486,7 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
// inflated is safe; see the has_monitor() comment below.
assert(!mark.is_neutral(), "invariant");
assert(!mark.has_locker() ||
THREAD->is_lock_owned((address)mark.locker()), "invariant");
current->is_lock_owned((address)mark.locker()), "invariant");
if (mark.has_monitor()) {
// The BasicLock's displaced_header is marked as a recursive
// enter and we have an inflated Java Monitor (ObjectMonitor).
@ -506,7 +498,7 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
// does not own the Java Monitor.
ObjectMonitor* m = mark.monitor();
assert(m->object()->mark() == mark, "invariant");
assert(m->is_entered(THREAD), "invariant");
assert(m->is_entered(current), "invariant");
}
}
#endif
@ -525,8 +517,8 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
// We have to take the slow-path of possible inflation and then exit.
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(THREAD, object, inflate_cause_vm_internal);
monitor->exit(true, THREAD);
ObjectMonitor* monitor = inflate(current, object, inflate_cause_vm_internal);
monitor->exit(true, current);
}
// -----------------------------------------------------------------------------
@ -541,23 +533,23 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
// 4) reenter lock1 with original recursion count
// 5) lock lock2
// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
intx ObjectSynchronizer::complete_exit(Handle obj, JavaThread* current) {
if (UseBiasedLocking) {
BiasedLocking::revoke(obj, THREAD);
BiasedLocking::revoke(obj, current);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
intptr_t ret_code = monitor->complete_exit(THREAD);
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
intptr_t ret_code = monitor->complete_exit(current);
return ret_code;
}
// NOTE: must use heavy weight monitor to handle complete_exit/reenter()
void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
void ObjectSynchronizer::reenter(Handle obj, intx recursions, JavaThread* current) {
if (UseBiasedLocking) {
BiasedLocking::revoke(obj, THREAD);
BiasedLocking::revoke(obj, current);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
@ -566,8 +558,8 @@ void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
// enter() returns false if we have lost the race to async deflation
// and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
if (monitor->reenter(recursions, THREAD)) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_vm_internal);
if (monitor->reenter(recursions, current)) {
return;
}
}
@ -576,53 +568,54 @@ void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
// -----------------------------------------------------------------------------
// JNI locks on java objects
// NOTE: must use heavy weight monitor to handle jni monitor enter
void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
void ObjectSynchronizer::jni_enter(Handle obj, JavaThread* current) {
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, THREAD);
handle_sync_on_value_based_class(obj, current);
}
// the current locking is from JNI instead of Java code
if (UseBiasedLocking) {
BiasedLocking::revoke(obj, THREAD);
BiasedLocking::revoke(obj, current);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
THREAD->set_current_pending_monitor_is_from_java(false);
current->set_current_pending_monitor_is_from_java(false);
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_jni_enter);
if (monitor->enter(THREAD)) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_jni_enter);
if (monitor->enter(current)) {
break;
}
}
THREAD->set_current_pending_monitor_is_from_java(true);
current->set_current_pending_monitor_is_from_java(true);
}
// NOTE: must use heavy weight monitor to handle jni monitor exit
void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
void ObjectSynchronizer::jni_exit(oop obj, TRAPS) {
JavaThread* current = THREAD->as_Java_thread();
if (UseBiasedLocking) {
Handle h_obj(THREAD, obj);
BiasedLocking::revoke(h_obj, THREAD);
Handle h_obj(current, obj);
BiasedLocking::revoke(h_obj, current);
obj = h_obj();
}
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
// The ObjectMonitor* can't be async deflated until ownership is
// dropped inside exit() and the ObjectMonitor* must be !is_busy().
ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
ObjectMonitor* monitor = inflate(current, obj, inflate_cause_jni_exit);
// If this thread has locked the object, exit the monitor. We
// intentionally do not use CHECK here because we must exit the
// monitor even if an exception is pending.
// intentionally do not use CHECK on check_owner because we must exit the
// monitor even if an exception was already pending.
if (monitor->check_owner(THREAD)) {
monitor->exit(true, THREAD);
monitor->exit(true, current);
}
}
// -----------------------------------------------------------------------------
// Internal VM locks on java objects
// standard constructor, allows locking failures
ObjectLocker::ObjectLocker(Handle obj, Thread* thread) {
ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
_thread = thread;
_thread->check_for_valid_safepoint_state();
_obj = obj;
@ -643,8 +636,9 @@ ObjectLocker::~ObjectLocker() {
// Wait/Notify/NotifyAll
// NOTE: must use heavy weight monitor to handle wait()
int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
JavaThread* current = THREAD->as_Java_thread();
if (UseBiasedLocking) {
BiasedLocking::revoke(obj, THREAD);
BiasedLocking::revoke(obj, current);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
@ -653,10 +647,10 @@ int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
// The ObjectMonitor* can't be async deflated because the _waiters
// field is incremented before ownership is dropped and decremented
// after ownership is regained.
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
monitor->wait(millis, true, THREAD);
DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), current, millis);
monitor->wait(millis, true, THREAD); // Not CHECK as we need following code
// This dummy call is in place to get around dtrace bug 6254741. Once
// that's fixed we can uncomment the following line, remove the call
@ -666,52 +660,55 @@ int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
return ret_code;
}
void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
// No exception are possible in this case as we only use this internally when locking is
// correct and we have to wait until notified - so no interrupts or timeouts.
void ObjectSynchronizer::wait_uninterruptibly(Handle obj, JavaThread* current) {
if (UseBiasedLocking) {
BiasedLocking::revoke(obj, THREAD);
BiasedLocking::revoke(obj, current);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
if (millis < 0) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
}
// The ObjectMonitor* can't be async deflated because the _waiters
// field is incremented before ownership is dropped and decremented
// after ownership is regained.
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
monitor->wait(millis, false, THREAD);
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_wait);
monitor->wait(0 /* wait-forever */, false /* not interruptible */, current);
}
void ObjectSynchronizer::notify(Handle obj, TRAPS) {
JavaThread* current = THREAD->as_Java_thread();
if (UseBiasedLocking) {
BiasedLocking::revoke(obj, THREAD);
BiasedLocking::revoke(obj, current);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
markWord mark = obj->mark();
if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify);
monitor->notify(THREAD);
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
monitor->notify(CHECK);
}
// NOTE: see comment of notify()
void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
JavaThread* current = THREAD->as_Java_thread();
if (UseBiasedLocking) {
BiasedLocking::revoke(obj, THREAD);
BiasedLocking::revoke(obj, current);
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
markWord mark = obj->mark();
if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
// Not inflated so there can't be any waiters to notify.
return;
}
// The ObjectMonitor* can't be async deflated until ownership is
// dropped by the calling thread.
ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify);
monitor->notifyAll(THREAD);
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_notify);
monitor->notifyAll(CHECK);
}
// -----------------------------------------------------------------------------
@ -774,7 +771,7 @@ static markWord read_stable_mark(oop obj) {
gInflationLocks[ix]->lock();
while (obj->mark() == markWord::INFLATING()) {
// Beware: naked_yield() is advisory and has almost no effect on some platforms
// so we periodically call self->_ParkEvent->park(1).
// so we periodically call current->_ParkEvent->park(1).
// We use a mixed spin/yield/block mechanism.
if ((YieldThenBlock++) >= 16) {
Thread::current()->_ParkEvent->park(1);
@ -807,7 +804,7 @@ static markWord read_stable_mark(oop obj) {
// There are simple ways to "diffuse" the middle address bits over the
// generated hashCode values:
static inline intptr_t get_next_hash(Thread* self, oop obj) {
static inline intptr_t get_next_hash(Thread* current, oop obj) {
intptr_t value = 0;
if (hashCode == 0) {
// This form uses global Park-Miller RNG.
@ -830,14 +827,14 @@ static inline intptr_t get_next_hash(Thread* self, oop obj) {
// Marsaglia's xor-shift scheme with thread-specific state
// This is probably the best overall implementation -- we'll
// likely make this the default in future releases.
unsigned t = self->_hashStateX;
unsigned t = current->_hashStateX;
t ^= (t << 11);
self->_hashStateX = self->_hashStateY;
self->_hashStateY = self->_hashStateZ;
self->_hashStateZ = self->_hashStateW;
unsigned v = self->_hashStateW;
current->_hashStateX = current->_hashStateY;
current->_hashStateY = current->_hashStateZ;
current->_hashStateZ = current->_hashStateW;
unsigned v = current->_hashStateW;
v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
self->_hashStateW = v;
current->_hashStateW = v;
value = v;
}
@ -847,7 +844,7 @@ static inline intptr_t get_next_hash(Thread* self, oop obj) {
return value;
}
intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
if (UseBiasedLocking) {
// NOTE: many places throughout the JVM do not expect a safepoint
// to be taken here. However, we only ever bias Java instances and all
@ -857,11 +854,11 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
// thread-local storage.
if (obj->mark().has_bias_pattern()) {
// Handle for oop obj in case of STW safepoint
Handle hobj(self, obj);
Handle hobj(current, obj);
if (SafepointSynchronize::is_at_safepoint()) {
BiasedLocking::revoke_at_safepoint(hobj);
} else {
BiasedLocking::revoke(hobj, self);
BiasedLocking::revoke(hobj, current);
}
obj = hobj();
assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
@ -877,16 +874,16 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
// object should remain ineligible for biased locking
assert(!mark.has_bias_pattern(), "invariant");
if (mark.is_neutral()) { // if this is a normal header
if (mark.is_neutral()) { // if this is a normal header
hash = mark.hash();
if (hash != 0) { // if it has a hash, just return it
if (hash != 0) { // if it has a hash, just return it
return hash;
}
hash = get_next_hash(self, obj); // get a new hash
temp = mark.copy_set_hash(hash); // merge the hash into header
// try to install the hash
hash = get_next_hash(current, obj); // get a new hash
temp = mark.copy_set_hash(hash); // merge the hash into header
// try to install the hash
test = obj->cas_set_mark(temp, mark);
if (test == mark) { // if the hash was installed, return it
if (test == mark) { // if the hash was installed, return it
return hash;
}
// Failed to install the hash. It could be that another thread
@ -920,7 +917,7 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
}
// Fall thru so we only have one place that installs the hash in
// the ObjectMonitor.
} else if (self->is_lock_owned((address)mark.locker())) {
} else if (current->is_lock_owned((address)mark.locker())) {
// This is a stack lock owned by the calling thread so fetch the
// displaced markWord from the BasicLock on the stack.
temp = mark.displaced_mark_helper();
@ -943,14 +940,14 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) {
// An async deflation can race after the inflate() call and before we
// can update the ObjectMonitor's header with the hash value below.
monitor = inflate(self, obj, inflate_cause_hash_code);
monitor = inflate(current, obj, inflate_cause_hash_code);
// Load ObjectMonitor's header/dmw field and see if it has a hash.
mark = monitor->header();
assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
hash = mark.hash();
if (hash == 0) { // if it does not have a hash
hash = get_next_hash(self, obj); // get a new hash
temp = mark.copy_set_hash(hash); // merge the hash into header
if (hash == 0) { // if it does not have a hash
hash = get_next_hash(current, obj); // get a new hash
temp = mark.copy_set_hash(hash) ; // merge the hash into header
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
test = markWord(v);
@ -985,28 +982,28 @@ intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
}
bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
Handle h_obj) {
if (UseBiasedLocking) {
BiasedLocking::revoke(h_obj, thread);
BiasedLocking::revoke(h_obj, current);
assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
}
assert(thread == JavaThread::current(), "Can only be called on current thread");
assert(current == JavaThread::current(), "Can only be called on current thread");
oop obj = h_obj();
markWord mark = read_stable_mark(obj);
// Uncontended case, header points to stack
if (mark.has_locker()) {
return thread->is_lock_owned((address)mark.locker());
return current->is_lock_owned((address)mark.locker());
}
// Contended case, header points to ObjectMonitor (tagged pointer)
if (mark.has_monitor()) {
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
ObjectMonitor* monitor = mark.monitor();
return monitor->is_entered(thread) != 0;
return monitor->is_entered(current) != 0;
}
// Unlocked case, header in place
assert(mark.is_neutral(), "sanity check");
@ -1142,7 +1139,7 @@ bool ObjectSynchronizer::is_async_deflation_needed() {
}
bool ObjectSynchronizer::request_deflate_idle_monitors() {
Thread* self = Thread::current();
JavaThread* current = JavaThread::current();
bool ret_code = false;
jlong last_time = last_async_deflation_time_ns();
@ -1158,11 +1155,9 @@ bool ObjectSynchronizer::request_deflate_idle_monitors() {
ret_code = true;
break;
}
if (self->is_Java_thread()) {
{
// JavaThread has to honor the blocking protocol.
ThreadBlockInVM tbivm(self->as_Java_thread());
os::naked_short_sleep(999); // sleep for almost 1 second
} else {
ThreadBlockInVM tbivm(current);
os::naked_short_sleep(999); // sleep for almost 1 second
}
}
@ -1200,7 +1195,7 @@ void ObjectSynchronizer::inflate_helper(oop obj) {
(void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
const InflateCause cause) {
EventJavaMonitorInflate event;
@ -1297,7 +1292,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
m->set_header(dmw);
// Optimization: if the mark.locker stack address is associated
// with this thread we could simply set m->_owner = self.
// with this thread we could simply set m->_owner = current.
// Note that a thread can inflate an object
// that it has stack-locked -- as might happen in wait() -- directly
// with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
@ -1318,7 +1313,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
// to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm(self);
ResourceMark rm(current);
lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
@ -1335,7 +1330,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
// pre-locked ObjectMonitor pointer into the object header. A successful
// CAS inflates the object *and* confers ownership to the inflating thread.
// In the current implementation we use a 2-step mechanism where we CAS()
// to inflate and then CAS() again to try to swing _owner from NULL to self.
// to inflate and then CAS() again to try to swing _owner from NULL to current.
// An inflateTry() method that we could call from enter() would be useful.
// Catch if the object's header is not neutral (not locked and
@ -1362,7 +1357,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
// cache lines to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm(self);
ResourceMark rm(current);
lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
@ -1374,10 +1369,10 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
}
}
void ObjectSynchronizer::chk_for_block_req(JavaThread* self, const char* op_name,
void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_name,
const char* cnt_name, size_t cnt,
LogStream* ls, elapsedTimer* timer_p) {
if (!SafepointMechanism::should_process(self)) {
if (!SafepointMechanism::should_process(current)) {
return;
}
@ -1392,7 +1387,7 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* self, const char* op_name
{
// Honor block request.
ThreadBlockInVM tbivm(self);
ThreadBlockInVM tbivm(current);
}
if (ls != NULL) {
@ -1405,7 +1400,7 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* self, const char* op_name
// Walk the in-use list and deflate (at most MonitorDeflationMax) idle
// ObjectMonitors. Returns the number of deflated ObjectMonitors.
size_t ObjectSynchronizer::deflate_monitor_list(Thread *self, LogStream* ls,
size_t ObjectSynchronizer::deflate_monitor_list(Thread* current, LogStream* ls,
elapsedTimer* timer_p) {
MonitorList::Iterator iter = _in_use_list.iterator();
size_t deflated_count = 0;
@ -1419,9 +1414,9 @@ size_t ObjectSynchronizer::deflate_monitor_list(Thread *self, LogStream* ls,
deflated_count++;
}
if (self->is_Java_thread()) {
if (current->is_Java_thread()) {
// A JavaThread must check for a safepoint/handshake and honor it.
chk_for_block_req(self->as_Java_thread(), "deflation", "deflated_count",
chk_for_block_req(current->as_Java_thread(), "deflation", "deflated_count",
deflated_count, ls, timer_p);
}
}
@ -1443,8 +1438,8 @@ class HandshakeForDeflation : public HandshakeClosure {
// ObjectMonitors. It is also called via do_final_audit_and_print_stats()
// by the VMThread.
size_t ObjectSynchronizer::deflate_idle_monitors() {
Thread* self = Thread::current();
if (self->is_Java_thread()) {
Thread* current = Thread::current();
if (current->is_Java_thread()) {
// The async deflation request has been processed.
_last_async_deflation_time_ns = os::javaTimeNanos();
set_is_async_deflation_requested(false);
@ -1467,7 +1462,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
}
// Deflate some idle ObjectMonitors.
size_t deflated_count = deflate_monitor_list(self, ls, &timer);
size_t deflated_count = deflate_monitor_list(current, ls, &timer);
if (deflated_count > 0 || is_final_audit()) {
// There are ObjectMonitors that have been deflated or this is the
// final audit and all the remaining ObjectMonitors have been
@ -1477,9 +1472,9 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
// Unlink deflated ObjectMonitors from the in-use list.
ResourceMark rm;
GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
size_t unlinked_count = _in_use_list.unlink_deflated(self, ls, &timer,
size_t unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer,
&delete_list);
if (self->is_Java_thread()) {
if (current->is_Java_thread()) {
if (ls != NULL) {
timer.stop();
ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
@ -1509,9 +1504,9 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
delete monitor;
deleted_count++;
if (self->is_Java_thread()) {
if (current->is_Java_thread()) {
// A JavaThread must check for a safepoint/handshake and honor it.
chk_for_block_req(self->as_Java_thread(), "deletion", "deleted_count",
chk_for_block_req(current->as_Java_thread(), "deletion", "deleted_count",
deleted_count, ls, &timer);
}
}
@ -1544,22 +1539,20 @@ size_t ObjectSynchronizer::deflate_idle_monitors() {
// Monitor cleanup on JavaThread::exit
// Iterate through monitor cache and attempt to release thread's monitors
// Gives up on a particular monitor if an exception occurs, but continues
// the overall iteration, swallowing the exception.
class ReleaseJavaMonitorsClosure: public MonitorClosure {
private:
TRAPS;
JavaThread* _thread;
public:
ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
ReleaseJavaMonitorsClosure(JavaThread* thread) : _thread(thread) {}
void do_monitor(ObjectMonitor* mid) {
if (mid->owner() == THREAD) {
(void)mid->complete_exit(CHECK);
if (mid->owner() == _thread) {
(void)mid->complete_exit(_thread);
}
}
};
// Release all inflated monitors owned by THREAD. Lightweight monitors are
// Release all inflated monitors owned by current thread. Lightweight monitors are
// ignored. This is meant to be called during JNI thread detach which assumes
// all remaining monitors are heavyweight. All exceptions are swallowed.
// Scanning the extant monitor list can be time consuming.
@ -1574,12 +1567,13 @@ class ReleaseJavaMonitorsClosure: public MonitorClosure {
// Since the tests are extremely cheap we could leave them enabled
// for normal product builds.
void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
assert(THREAD == JavaThread::current(), "must be current Java thread");
void ObjectSynchronizer::release_monitors_owned_by_thread(JavaThread* current) {
assert(current == JavaThread::current(), "must be current Java thread");
NoSafepointVerifier nsv;
ReleaseJavaMonitorsClosure rjmc(THREAD);
ReleaseJavaMonitorsClosure rjmc(current);
ObjectSynchronizer::monitors_iterate(&rjmc);
THREAD->clear_pending_exception();
assert(!current->has_pending_exception(), "Should not be possible");
current->clear_pending_exception();
}
const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {