8047156: cleanup more non-indent white space issues prior to Contended Locking cleanup bucket

Checkpoint some missed do_space_filter.ksh cleanups for Contended Locking.

Reviewed-by: sspitsyn, lfoltan, coleenp
This commit is contained in:
Daniel D. Daugherty 2014-06-18 14:21:28 -07:00
parent d8ba26e6df
commit c1c9f333d5
7 changed files with 320 additions and 320 deletions

View file

@ -191,16 +191,16 @@ public:
class PlatformEvent : public CHeapObj<mtInternal> { class PlatformEvent : public CHeapObj<mtInternal> {
private: private:
double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event ; volatile int _Event;
volatile int _nParked ; volatile int _nParked;
pthread_mutex_t _mutex [1] ; pthread_mutex_t _mutex[1];
pthread_cond_t _cond [1] ; pthread_cond_t _cond[1];
double PostPad [2] ; double PostPad[2];
Thread * _Assoc ; Thread * _Assoc;
public: // TODO-FIXME: make dtor private public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee (0, "invariant") ; } ~PlatformEvent() { guarantee(0, "invariant"); }
public: public:
PlatformEvent() { PlatformEvent() {
@ -209,28 +209,28 @@ class PlatformEvent : public CHeapObj<mtInternal> {
assert_status(status == 0, status, "cond_init"); assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL); status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init"); assert_status(status == 0, status, "mutex_init");
_Event = 0 ; _Event = 0;
_nParked = 0 ; _nParked = 0;
_Assoc = NULL ; _Assoc = NULL;
} }
// Use caution with reset() and fired() -- they may require MEMBARs // Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0 ; } void reset() { _Event = 0; }
int fired() { return _Event; } int fired() { return _Event; }
void park () ; void park();
void unpark () ; void unpark();
int TryPark () ; int TryPark();
int park (jlong millis) ; int park(jlong millis);
void SetAssociation (Thread * a) { _Assoc = a ; } void SetAssociation(Thread * a) { _Assoc = a; }
}; };
class PlatformParker : public CHeapObj<mtInternal> { class PlatformParker : public CHeapObj<mtInternal> {
protected: protected:
pthread_mutex_t _mutex [1] ; pthread_mutex_t _mutex[1];
pthread_cond_t _cond [1] ; pthread_cond_t _cond[1];
public: // TODO-FIXME: make dtor private public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee (0, "invariant") ; } ~PlatformParker() { guarantee(0, "invariant"); }
public: public:
PlatformParker() { PlatformParker() {

View file

@ -287,16 +287,16 @@ public:
class PlatformEvent : public CHeapObj<mtInternal> { class PlatformEvent : public CHeapObj<mtInternal> {
private: private:
double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event ; volatile int _Event;
volatile int _nParked ; volatile int _nParked;
pthread_mutex_t _mutex [1] ; pthread_mutex_t _mutex[1];
pthread_cond_t _cond [1] ; pthread_cond_t _cond[1];
double PostPad [2] ; double PostPad[2];
Thread * _Assoc ; Thread * _Assoc;
public: // TODO-FIXME: make dtor private public: // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee (0, "invariant") ; } ~PlatformEvent() { guarantee(0, "invariant"); }
public: public:
PlatformEvent() { PlatformEvent() {
@ -305,20 +305,20 @@ class PlatformEvent : public CHeapObj<mtInternal> {
assert_status(status == 0, status, "cond_init"); assert_status(status == 0, status, "cond_init");
status = pthread_mutex_init (_mutex, NULL); status = pthread_mutex_init (_mutex, NULL);
assert_status(status == 0, status, "mutex_init"); assert_status(status == 0, status, "mutex_init");
_Event = 0 ; _Event = 0;
_nParked = 0 ; _nParked = 0;
_Assoc = NULL ; _Assoc = NULL;
} }
// Use caution with reset() and fired() -- they may require MEMBARs // Use caution with reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0 ; } void reset() { _Event = 0; }
int fired() { return _Event; } int fired() { return _Event; }
void park () ; void park();
void unpark () ; void unpark();
int TryPark () ; int TryPark();
int park (jlong millis) ; // relative timed-wait only int park(jlong millis); // relative timed-wait only
void SetAssociation (Thread * a) { _Assoc = a ; } void SetAssociation(Thread * a) { _Assoc = a; }
} ; };
class PlatformParker : public CHeapObj<mtInternal> { class PlatformParker : public CHeapObj<mtInternal> {
protected: protected:
@ -327,11 +327,11 @@ class PlatformParker : public CHeapObj<mtInternal> {
ABS_INDEX = 1 ABS_INDEX = 1
}; };
int _cur_index; // which cond is in use: -1, 0, 1 int _cur_index; // which cond is in use: -1, 0, 1
pthread_mutex_t _mutex [1] ; pthread_mutex_t _mutex[1];
pthread_cond_t _cond [2] ; // one for relative times and one for abs. pthread_cond_t _cond[2]; // one for relative times and one for abs.
public: // TODO-FIXME: make dtor private public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee (0, "invariant") ; } ~PlatformParker() { guarantee(0, "invariant"); }
public: public:
PlatformParker() { PlatformParker() {

View file

@ -301,48 +301,48 @@ class Solaris {
class PlatformEvent : public CHeapObj<mtInternal> { class PlatformEvent : public CHeapObj<mtInternal> {
private: private:
double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line double CachePad[4]; // increase odds that _mutex is sole occupant of cache line
volatile int _Event ; volatile int _Event;
int _nParked ; int _nParked;
int _pipev [2] ; int _pipev[2];
mutex_t _mutex [1] ; mutex_t _mutex[1];
cond_t _cond [1] ; cond_t _cond[1];
double PostPad [2] ; double PostPad[2];
protected: protected:
// Defining a protected ctor effectively gives us an abstract base class. // Defining a protected ctor effectively gives us an abstract base class.
// That is, a PlatformEvent can never be instantiated "naked" but only // That is, a PlatformEvent can never be instantiated "naked" but only
// as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent). // as a part of a ParkEvent (recall that ParkEvent extends PlatformEvent).
// TODO-FIXME: make dtor private // TODO-FIXME: make dtor private
~PlatformEvent() { guarantee (0, "invariant") ; } ~PlatformEvent() { guarantee(0, "invariant"); }
PlatformEvent() { PlatformEvent() {
int status; int status;
status = os::Solaris::cond_init(_cond); status = os::Solaris::cond_init(_cond);
assert_status(status == 0, status, "cond_init"); assert_status(status == 0, status, "cond_init");
status = os::Solaris::mutex_init(_mutex); status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init"); assert_status(status == 0, status, "mutex_init");
_Event = 0 ; _Event = 0;
_nParked = 0 ; _nParked = 0;
_pipev[0] = _pipev[1] = -1 ; _pipev[0] = _pipev[1] = -1;
} }
public: public:
// Exercise caution using reset() and fired() -- they may require MEMBARs // Exercise caution using reset() and fired() -- they may require MEMBARs
void reset() { _Event = 0 ; } void reset() { _Event = 0; }
int fired() { return _Event; } int fired() { return _Event; }
void park () ; void park();
int park (jlong millis) ; int park(jlong millis);
int TryPark () ; int TryPark();
void unpark () ; void unpark();
} ; };
class PlatformParker : public CHeapObj<mtInternal> { class PlatformParker : public CHeapObj<mtInternal> {
protected: protected:
mutex_t _mutex [1] ; mutex_t _mutex[1];
cond_t _cond [1] ; cond_t _cond[1];
public: // TODO-FIXME: make dtor private public: // TODO-FIXME: make dtor private
~PlatformParker() { guarantee (0, "invariant") ; } ~PlatformParker() { guarantee(0, "invariant"); }
public: public:
PlatformParker() { PlatformParker() {
@ -352,6 +352,6 @@ class PlatformParker : public CHeapObj<mtInternal> {
status = os::Solaris::mutex_init(_mutex); status = os::Solaris::mutex_init(_mutex);
assert_status(status == 0, status, "mutex_init"); assert_status(status == 0, status, "mutex_init");
} }
} ; };
#endif // OS_SOLARIS_VM_OS_SOLARIS_HPP #endif // OS_SOLARIS_VM_OS_SOLARIS_HPP

View file

@ -116,7 +116,7 @@ class Atomic : AllStatic {
atomic_decl atomic_decl
#else #else
#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \ #define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \
atomic_decl ; \ atomic_decl; \
non_atomic_decl non_atomic_decl
#endif #endif

View file

@ -269,62 +269,62 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c))) #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
#define UNS(x) (uintptr_t(x)) #define UNS(x) (uintptr_t(x))
#define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }} #define TRACE(m) { static volatile int ctr = 0; int x = ++ctr; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
// Simplistic low-quality Marsaglia SHIFT-XOR RNG. // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
// Bijective except for the trailing mask operation. // Bijective except for the trailing mask operation.
// Useful for spin loops as the compiler can't optimize it away. // Useful for spin loops as the compiler can't optimize it away.
static inline jint MarsagliaXORV (jint x) { static inline jint MarsagliaXORV (jint x) {
if (x == 0) x = 1|os::random() ; if (x == 0) x = 1|os::random();
x ^= x << 6; x ^= x << 6;
x ^= ((unsigned)x) >> 21; x ^= ((unsigned)x) >> 21;
x ^= x << 7 ; x ^= x << 7;
return x & 0x7FFFFFFF ; return x & 0x7FFFFFFF;
} }
static int Stall (int its) { static int Stall (int its) {
static volatile jint rv = 1 ; static volatile jint rv = 1;
volatile int OnFrame = 0 ; volatile int OnFrame = 0;
jint v = rv ^ UNS(OnFrame) ; jint v = rv ^ UNS(OnFrame);
while (--its >= 0) { while (--its >= 0) {
v = MarsagliaXORV (v) ; v = MarsagliaXORV(v);
} }
// Make this impossible for the compiler to optimize away, // Make this impossible for the compiler to optimize away,
// but (mostly) avoid W coherency sharing on MP systems. // but (mostly) avoid W coherency sharing on MP systems.
if (v == 0x12345) rv = v ; if (v == 0x12345) rv = v;
return v ; return v;
} }
int Monitor::TryLock () { int Monitor::TryLock() {
intptr_t v = _LockWord.FullWord ; intptr_t v = _LockWord.FullWord;
for (;;) { for (;;) {
if ((v & _LBIT) != 0) return 0 ; if ((v & _LBIT) != 0) return 0;
const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
if (v == u) return 1 ; if (v == u) return 1;
v = u ; v = u;
} }
} }
int Monitor::TryFast () { int Monitor::TryFast() {
// Optimistic fast-path form ... // Optimistic fast-path form ...
// Fast-path attempt for the common uncontended case. // Fast-path attempt for the common uncontended case.
// Avoid RTS->RTO $ coherence upgrade on typical SMP systems. // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ... intptr_t v = CASPTR(&_LockWord, 0, _LBIT); // agro ...
if (v == 0) return 1 ; if (v == 0) return 1;
for (;;) { for (;;) {
if ((v & _LBIT) != 0) return 0 ; if ((v & _LBIT) != 0) return 0;
const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
if (v == u) return 1 ; if (v == u) return 1;
v = u ; v = u;
} }
} }
int Monitor::ILocked () { int Monitor::ILocked() {
const intptr_t w = _LockWord.FullWord & 0xFF ; const intptr_t w = _LockWord.FullWord & 0xFF;
assert (w == 0 || w == _LBIT, "invariant") ; assert(w == 0 || w == _LBIT, "invariant");
return w == _LBIT ; return w == _LBIT;
} }
// Polite TATAS spinlock with exponential backoff - bounded spin. // Polite TATAS spinlock with exponential backoff - bounded spin.
@ -342,38 +342,38 @@ int Monitor::ILocked () {
// See synchronizer.cpp for details and rationale. // See synchronizer.cpp for details and rationale.
int Monitor::TrySpin (Thread * const Self) { int Monitor::TrySpin (Thread * const Self) {
if (TryLock()) return 1 ; if (TryLock()) return 1;
if (!os::is_MP()) return 0 ; if (!os::is_MP()) return 0;
int Probes = 0 ; int Probes = 0;
int Delay = 0 ; int Delay = 0;
int Steps = 0 ; int Steps = 0;
int SpinMax = NativeMonitorSpinLimit ; int SpinMax = NativeMonitorSpinLimit;
int flgs = NativeMonitorFlags ; int flgs = NativeMonitorFlags;
for (;;) { for (;;) {
intptr_t v = _LockWord.FullWord; intptr_t v = _LockWord.FullWord;
if ((v & _LBIT) == 0) { if ((v & _LBIT) == 0) {
if (CASPTR (&_LockWord, v, v|_LBIT) == v) { if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
return 1 ; return 1;
} }
continue ; continue;
} }
if ((flgs & 8) == 0) { if ((flgs & 8) == 0) {
SpinPause () ; SpinPause();
} }
// Periodically increase Delay -- variable Delay form // Periodically increase Delay -- variable Delay form
// conceptually: delay *= 1 + 1/Exponent // conceptually: delay *= 1 + 1/Exponent
++ Probes; ++Probes;
if (Probes > SpinMax) return 0 ; if (Probes > SpinMax) return 0;
if ((Probes & 0x7) == 0) { if ((Probes & 0x7) == 0) {
Delay = ((Delay << 1)|1) & 0x7FF ; Delay = ((Delay << 1)|1) & 0x7FF;
// CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
} }
if (flgs & 2) continue ; if (flgs & 2) continue;
// Consider checking _owner's schedctl state, if OFFPROC abort spin. // Consider checking _owner's schedctl state, if OFFPROC abort spin.
// If the owner is OFFPROC then it's unlike that the lock will be dropped // If the owner is OFFPROC then it's unlike that the lock will be dropped
@ -389,48 +389,48 @@ int Monitor::TrySpin (Thread * const Self) {
// spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
// Furthermore, they don't have a W$ like traditional SPARC processors. // Furthermore, they don't have a W$ like traditional SPARC processors.
// We currently use a Marsaglia Shift-Xor RNG loop. // We currently use a Marsaglia Shift-Xor RNG loop.
Steps += Delay ; Steps += Delay;
if (Self != NULL) { if (Self != NULL) {
jint rv = Self->rng[0] ; jint rv = Self->rng[0];
for (int k = Delay ; --k >= 0; ) { for (int k = Delay; --k >= 0;) {
rv = MarsagliaXORV (rv) ; rv = MarsagliaXORV(rv);
if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ; if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0;
} }
Self->rng[0] = rv ; Self->rng[0] = rv;
} else { } else {
Stall (Delay) ; Stall(Delay);
} }
} }
} }
static int ParkCommon (ParkEvent * ev, jlong timo) { static int ParkCommon (ParkEvent * ev, jlong timo) {
// Diagnostic support - periodically unwedge blocked threads // Diagnostic support - periodically unwedge blocked threads
intx nmt = NativeMonitorTimeout ; intx nmt = NativeMonitorTimeout;
if (nmt > 0 && (nmt < timo || timo <= 0)) { if (nmt > 0 && (nmt < timo || timo <= 0)) {
timo = nmt ; timo = nmt;
} }
int err = OS_OK ; int err = OS_OK;
if (0 == timo) { if (0 == timo) {
ev->park() ; ev->park();
} else { } else {
err = ev->park(timo) ; err = ev->park(timo);
} }
return err ; return err;
} }
inline int Monitor::AcquireOrPush (ParkEvent * ESelf) { inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
intptr_t v = _LockWord.FullWord ; intptr_t v = _LockWord.FullWord;
for (;;) { for (;;) {
if ((v & _LBIT) == 0) { if ((v & _LBIT) == 0) {
const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; const intptr_t u = CASPTR(&_LockWord, v, v|_LBIT);
if (u == v) return 1 ; // indicate acquired if (u == v) return 1; // indicate acquired
v = u ; v = u;
} else { } else {
// Anticipate success ... // Anticipate success ...
ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ; ESelf->ListNext = (ParkEvent *)(v & ~_LBIT);
const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ; const intptr_t u = CASPTR(&_LockWord, v, intptr_t(ESelf)|_LBIT);
if (u == v) return 0 ; // indicate pushed onto cxq if (u == v) return 0; // indicate pushed onto cxq
v = u ; v = u;
} }
// Interference - LockWord change - just retry // Interference - LockWord change - just retry
} }
@ -444,33 +444,33 @@ inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
// _owner is a higher-level logical concept. // _owner is a higher-level logical concept.
void Monitor::ILock (Thread * Self) { void Monitor::ILock (Thread * Self) {
assert (_OnDeck != Self->_MutexEvent, "invariant") ; assert(_OnDeck != Self->_MutexEvent, "invariant");
if (TryFast()) { if (TryFast()) {
Exeunt: Exeunt:
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
return ; return;
} }
ParkEvent * const ESelf = Self->_MutexEvent ; ParkEvent * const ESelf = Self->_MutexEvent;
assert (_OnDeck != ESelf, "invariant") ; assert(_OnDeck != ESelf, "invariant");
// As an optimization, spinners could conditionally try to set ONDECK to _LBIT // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
// Synchronizer.cpp uses a similar optimization. // Synchronizer.cpp uses a similar optimization.
if (TrySpin (Self)) goto Exeunt ; if (TrySpin(Self)) goto Exeunt;
// Slow-path - the lock is contended. // Slow-path - the lock is contended.
// Either Enqueue Self on cxq or acquire the outer lock. // Either Enqueue Self on cxq or acquire the outer lock.
// LockWord encoding = (cxq,LOCKBYTE) // LockWord encoding = (cxq,LOCKBYTE)
ESelf->reset() ; ESelf->reset();
OrderAccess::fence() ; OrderAccess::fence();
// Optional optimization ... try barging on the inner lock // Optional optimization ... try barging on the inner lock
if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) { if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
goto OnDeck_LOOP ; goto OnDeck_LOOP;
} }
if (AcquireOrPush (ESelf)) goto Exeunt ; if (AcquireOrPush(ESelf)) goto Exeunt;
// At any given time there is at most one ondeck thread. // At any given time there is at most one ondeck thread.
// ondeck implies not resident on cxq and not resident on EntryList // ondeck implies not resident on cxq and not resident on EntryList
@ -478,26 +478,26 @@ void Monitor::ILock (Thread * Self) {
// CONSIDER: use Self->OnDeck instead of m->OnDeck. // CONSIDER: use Self->OnDeck instead of m->OnDeck.
// Deschedule Self so that others may run. // Deschedule Self so that others may run.
while (_OnDeck != ESelf) { while (_OnDeck != ESelf) {
ParkCommon (ESelf, 0) ; ParkCommon(ESelf, 0);
} }
// Self is now in the ONDECK position and will remain so until it // Self is now in the ONDECK position and will remain so until it
// manages to acquire the lock. // manages to acquire the lock.
OnDeck_LOOP: OnDeck_LOOP:
for (;;) { for (;;) {
assert (_OnDeck == ESelf, "invariant") ; assert(_OnDeck == ESelf, "invariant");
if (TrySpin (Self)) break ; if (TrySpin(Self)) break;
// CONSIDER: if ESelf->TryPark() && TryLock() break ... // CONSIDER: if ESelf->TryPark() && TryLock() break ...
// It's probably wise to spin only if we *actually* blocked // It's probably wise to spin only if we *actually* blocked
// CONSIDER: check the lockbyte, if it remains set then // CONSIDER: check the lockbyte, if it remains set then
// preemptively drain the cxq into the EntryList. // preemptively drain the cxq into the EntryList.
// The best place and time to perform queue operations -- lock metadata -- // The best place and time to perform queue operations -- lock metadata --
// is _before having acquired the outer lock, while waiting for the lock to drop. // is _before having acquired the outer lock, while waiting for the lock to drop.
ParkCommon (ESelf, 0) ; ParkCommon(ESelf, 0);
} }
assert (_OnDeck == ESelf, "invariant") ; assert(_OnDeck == ESelf, "invariant");
_OnDeck = NULL ; _OnDeck = NULL;
// Note that we current drop the inner lock (clear OnDeck) in the slow-path // Note that we current drop the inner lock (clear OnDeck) in the slow-path
// epilogue immediately after having acquired the outer lock. // epilogue immediately after having acquired the outer lock.
@ -512,11 +512,11 @@ void Monitor::ILock (Thread * Self) {
// effective length of the critical section. // effective length of the critical section.
// Note that (A) and (B) are tantamount to succession by direct handoff for // Note that (A) and (B) are tantamount to succession by direct handoff for
// the inner lock. // the inner lock.
goto Exeunt ; goto Exeunt;
} }
void Monitor::IUnlock (bool RelaxAssert) { void Monitor::IUnlock (bool RelaxAssert) {
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
// Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
// before the store that releases the lock. Crucially, all the stores and loads in the // before the store that releases the lock. Crucially, all the stores and loads in the
// critical section must be globally visible before the store of 0 into the lock-word // critical section must be globally visible before the store of 0 into the lock-word
@ -532,9 +532,9 @@ void Monitor::IUnlock (bool RelaxAssert) {
// safety or lock release consistency. // safety or lock release consistency.
OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
OrderAccess::storeload (); OrderAccess::storeload();
ParkEvent * const w = _OnDeck ; ParkEvent * const w = _OnDeck;
assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
if (w != NULL) { if (w != NULL) {
// Either we have a valid ondeck thread or ondeck is transiently "locked" // Either we have a valid ondeck thread or ondeck is transiently "locked"
// by some exiting thread as it arranges for succession. The LSBit of // by some exiting thread as it arranges for succession. The LSBit of
@ -549,19 +549,19 @@ void Monitor::IUnlock (bool RelaxAssert) {
// then progress is known to have occurred as that means the thread associated // then progress is known to have occurred as that means the thread associated
// with "w" acquired the lock. In that case this thread need take no further // with "w" acquired the lock. In that case this thread need take no further
// action to guarantee progress. // action to guarantee progress.
if ((UNS(w) & _LBIT) == 0) w->unpark() ; if ((UNS(w) & _LBIT) == 0) w->unpark();
return ; return;
} }
intptr_t cxq = _LockWord.FullWord ; intptr_t cxq = _LockWord.FullWord;
if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
return ; // normal fast-path exit - cxq and EntryList both empty return; // normal fast-path exit - cxq and EntryList both empty
} }
if (cxq & _LBIT) { if (cxq & _LBIT) {
// Optional optimization ... // Optional optimization ...
// Some other thread acquired the lock in the window since this // Some other thread acquired the lock in the window since this
// thread released it. Succession is now that thread's responsibility. // thread released it. Succession is now that thread's responsibility.
return ; return;
} }
Succession: Succession:
@ -575,22 +575,22 @@ void Monitor::IUnlock (bool RelaxAssert) {
// picks a successor and marks that thread as OnDeck. That successor // picks a successor and marks that thread as OnDeck. That successor
// thread will then clear OnDeck once it eventually acquires the outer lock. // thread will then clear OnDeck once it eventually acquires the outer lock.
if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
return ; return;
} }
ParkEvent * List = _EntryList ; ParkEvent * List = _EntryList;
if (List != NULL) { if (List != NULL) {
// Transfer the head of the EntryList to the OnDeck position. // Transfer the head of the EntryList to the OnDeck position.
// Once OnDeck, a thread stays OnDeck until it acquires the lock. // Once OnDeck, a thread stays OnDeck until it acquires the lock.
// For a given lock there is at most OnDeck thread at any one instant. // For a given lock there is at most OnDeck thread at any one instant.
WakeOne: WakeOne:
assert (List == _EntryList, "invariant") ; assert(List == _EntryList, "invariant");
ParkEvent * const w = List ; ParkEvent * const w = List;
assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
_EntryList = w->ListNext ; _EntryList = w->ListNext;
// as a diagnostic measure consider setting w->_ListNext = BAD // as a diagnostic measure consider setting w->_ListNext = BAD
assert (UNS(_OnDeck) == _LBIT, "invariant") ; assert(UNS(_OnDeck) == _LBIT, "invariant");
_OnDeck = w ; // pass OnDeck to w. _OnDeck = w; // pass OnDeck to w.
// w will clear OnDeck once it acquires the outer lock // w will clear OnDeck once it acquires the outer lock
// Another optional optimization ... // Another optional optimization ...
@ -599,25 +599,25 @@ void Monitor::IUnlock (bool RelaxAssert) {
// Try to defer the unpark() operation - Delegate the responsibility // Try to defer the unpark() operation - Delegate the responsibility
// for unpark()ing the OnDeck thread to the current or subsequent owners // for unpark()ing the OnDeck thread to the current or subsequent owners
// That is, the new owner is responsible for unparking the OnDeck thread. // That is, the new owner is responsible for unparking the OnDeck thread.
OrderAccess::storeload() ; OrderAccess::storeload();
cxq = _LockWord.FullWord ; cxq = _LockWord.FullWord;
if (cxq & _LBIT) return ; if (cxq & _LBIT) return;
w->unpark() ; w->unpark();
return ; return;
} }
cxq = _LockWord.FullWord ; cxq = _LockWord.FullWord;
if ((cxq & ~_LBIT) != 0) { if ((cxq & ~_LBIT) != 0) {
// The EntryList is empty but the cxq is populated. // The EntryList is empty but the cxq is populated.
// drain RATs from cxq into EntryList // drain RATs from cxq into EntryList
// Detach RATs segment with CAS and then merge into EntryList // Detach RATs segment with CAS and then merge into EntryList
for (;;) { for (;;) {
// optional optimization - if locked, the owner is responsible for succession // optional optimization - if locked, the owner is responsible for succession
if (cxq & _LBIT) goto Punt ; if (cxq & _LBIT) goto Punt;
const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ; const intptr_t vfy = CASPTR(&_LockWord, cxq, cxq & _LBIT);
if (vfy == cxq) break ; if (vfy == cxq) break;
cxq = vfy ; cxq = vfy;
// Interference - LockWord changed - Just retry // Interference - LockWord changed - Just retry
// We can see concurrent interference from contending threads // We can see concurrent interference from contending threads
// pushing themselves onto the cxq or from lock-unlock operations. // pushing themselves onto the cxq or from lock-unlock operations.
@ -639,10 +639,10 @@ void Monitor::IUnlock (bool RelaxAssert) {
// the EntryList, but it might make sense to reverse the order // the EntryList, but it might make sense to reverse the order
// or perhaps sort by thread priority. See the comments in // or perhaps sort by thread priority. See the comments in
// synchronizer.cpp objectMonitor::exit(). // synchronizer.cpp objectMonitor::exit().
assert (_EntryList == NULL, "invariant") ; assert(_EntryList == NULL, "invariant");
_EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ; _EntryList = List = (ParkEvent *)(cxq & ~_LBIT);
assert (List != NULL, "invariant") ; assert(List != NULL, "invariant");
goto WakeOne ; goto WakeOne;
} }
// cxq|EntryList is empty. // cxq|EntryList is empty.
@ -651,8 +651,8 @@ void Monitor::IUnlock (bool RelaxAssert) {
// A thread could have added itself to cxq since this thread previously checked. // A thread could have added itself to cxq since this thread previously checked.
// Detect and recover by refetching cxq. // Detect and recover by refetching cxq.
Punt: Punt:
assert (UNS(_OnDeck) == _LBIT, "invariant") ; assert(UNS(_OnDeck) == _LBIT, "invariant");
_OnDeck = NULL ; // Release inner lock. _OnDeck = NULL; // Release inner lock.
OrderAccess::storeload(); // Dekker duality - pivot point OrderAccess::storeload(); // Dekker duality - pivot point
// Resample LockWord/cxq to recover from possible race. // Resample LockWord/cxq to recover from possible race.
@ -665,32 +665,32 @@ void Monitor::IUnlock (bool RelaxAssert) {
// Note that we don't need to recheck EntryList, just cxq. // Note that we don't need to recheck EntryList, just cxq.
// If threads moved onto EntryList since we dropped OnDeck // If threads moved onto EntryList since we dropped OnDeck
// that implies some other thread forced succession. // that implies some other thread forced succession.
cxq = _LockWord.FullWord ; cxq = _LockWord.FullWord;
if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
goto Succession ; // potential race -- re-run succession goto Succession; // potential race -- re-run succession
} }
return ; return;
} }
bool Monitor::notify() { bool Monitor::notify() {
assert (_owner == Thread::current(), "invariant") ; assert(_owner == Thread::current(), "invariant");
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
if (_WaitSet == NULL) return true ; if (_WaitSet == NULL) return true;
NotifyCount ++ ; NotifyCount++;
// Transfer one thread from the WaitSet to the EntryList or cxq. // Transfer one thread from the WaitSet to the EntryList or cxq.
// Currently we just unlink the head of the WaitSet and prepend to the cxq. // Currently we just unlink the head of the WaitSet and prepend to the cxq.
// And of course we could just unlink it and unpark it, too, but // And of course we could just unlink it and unpark it, too, but
// in that case it'd likely impale itself on the reentry. // in that case it'd likely impale itself on the reentry.
Thread::muxAcquire (_WaitLock, "notify:WaitLock") ; Thread::muxAcquire(_WaitLock, "notify:WaitLock");
ParkEvent * nfy = _WaitSet ; ParkEvent * nfy = _WaitSet;
if (nfy != NULL) { // DCL idiom if (nfy != NULL) { // DCL idiom
_WaitSet = nfy->ListNext ; _WaitSet = nfy->ListNext;
assert (nfy->Notified == 0, "invariant") ; assert(nfy->Notified == 0, "invariant");
// push nfy onto the cxq // push nfy onto the cxq
for (;;) { for (;;) {
const intptr_t v = _LockWord.FullWord ; const intptr_t v = _LockWord.FullWord;
assert ((v & 0xFF) == _LBIT, "invariant") ; assert((v & 0xFF) == _LBIT, "invariant");
nfy->ListNext = (ParkEvent *)(v & ~_LBIT); nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
// interference - _LockWord changed -- just retry // interference - _LockWord changed -- just retry
@ -698,17 +698,17 @@ bool Monitor::notify() {
// Note that setting Notified before pushing nfy onto the cxq is // Note that setting Notified before pushing nfy onto the cxq is
// also legal and safe, but the safety properties are much more // also legal and safe, but the safety properties are much more
// subtle, so for the sake of code stewardship ... // subtle, so for the sake of code stewardship ...
OrderAccess::fence() ; OrderAccess::fence();
nfy->Notified = 1; nfy->Notified = 1;
} }
Thread::muxRelease (_WaitLock) ; Thread::muxRelease(_WaitLock);
if (nfy != NULL && (NativeMonitorFlags & 16)) { if (nfy != NULL && (NativeMonitorFlags & 16)) {
// Experimental code ... light up the wakee in the hope that this thread (the owner) // Experimental code ... light up the wakee in the hope that this thread (the owner)
// will drop the lock just about the time the wakee comes ONPROC. // will drop the lock just about the time the wakee comes ONPROC.
nfy->unpark() ; nfy->unpark();
} }
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
return true ; return true;
} }
// Currently notifyAll() transfers the waiters one-at-a-time from the waitset // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
@ -719,14 +719,14 @@ bool Monitor::notify() {
// will be empty and the cxq will be "DCBAXYZ". This is benign, of course. // will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
bool Monitor::notify_all() { bool Monitor::notify_all() {
assert (_owner == Thread::current(), "invariant") ; assert(_owner == Thread::current(), "invariant");
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
while (_WaitSet != NULL) notify() ; while (_WaitSet != NULL) notify();
return true ; return true;
} }
int Monitor::IWait (Thread * Self, jlong timo) { int Monitor::IWait (Thread * Self, jlong timo) {
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
// Phases: // Phases:
// 1. Enqueue Self on WaitSet - currently prepend // 1. Enqueue Self on WaitSet - currently prepend
@ -734,10 +734,10 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// 3. wait for either notification or timeout // 3. wait for either notification or timeout
// 4. lock - reentry - reacquire the outer lock // 4. lock - reentry - reacquire the outer lock
ParkEvent * const ESelf = Self->_MutexEvent ; ParkEvent * const ESelf = Self->_MutexEvent;
ESelf->Notified = 0 ; ESelf->Notified = 0;
ESelf->reset() ; ESelf->reset();
OrderAccess::fence() ; OrderAccess::fence();
// Add Self to WaitSet // Add Self to WaitSet
// Ideally only the holder of the outer lock would manipulate the WaitSet - // Ideally only the holder of the outer lock would manipulate the WaitSet -
@ -766,10 +766,10 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// In that case we could have one ListElement on the WaitSet and another // In that case we could have one ListElement on the WaitSet and another
// on the EntryList, with both referring to the same pure Event. // on the EntryList, with both referring to the same pure Event.
Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ; Thread::muxAcquire(_WaitLock, "wait:WaitLock:Add");
ESelf->ListNext = _WaitSet ; ESelf->ListNext = _WaitSet;
_WaitSet = ESelf ; _WaitSet = ESelf;
Thread::muxRelease (_WaitLock) ; Thread::muxRelease(_WaitLock);
// Release the outer lock // Release the outer lock
// We call IUnlock (RelaxAssert=true) as a thread T1 might // We call IUnlock (RelaxAssert=true) as a thread T1 might
@ -781,16 +781,16 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// IUnlock() call a thread should _never find itself on the EntryList // IUnlock() call a thread should _never find itself on the EntryList
// or cxq, but in the case of wait() it's possible. // or cxq, but in the case of wait() it's possible.
// See synchronizer.cpp objectMonitor::wait(). // See synchronizer.cpp objectMonitor::wait().
IUnlock (true) ; IUnlock(true);
// Wait for either notification or timeout // Wait for either notification or timeout
// Beware that in some circumstances we might propagate // Beware that in some circumstances we might propagate
// spurious wakeups back to the caller. // spurious wakeups back to the caller.
for (;;) { for (;;) {
if (ESelf->Notified) break ; if (ESelf->Notified) break;
int err = ParkCommon (ESelf, timo) ; int err = ParkCommon(ESelf, timo);
if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ; if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break;
} }
// Prepare for reentry - if necessary, remove ESelf from WaitSet // Prepare for reentry - if necessary, remove ESelf from WaitSet
@ -799,55 +799,55 @@ int Monitor::IWait (Thread * Self, jlong timo) {
// 2. On the cxq or EntryList // 2. On the cxq or EntryList
// 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
OrderAccess::fence() ; OrderAccess::fence();
int WasOnWaitSet = 0 ; int WasOnWaitSet = 0;
if (ESelf->Notified == 0) { if (ESelf->Notified == 0) {
Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ; Thread::muxAcquire(_WaitLock, "wait:WaitLock:remove");
if (ESelf->Notified == 0) { // DCL idiom if (ESelf->Notified == 0) { // DCL idiom
assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet assert(_OnDeck != ESelf, "invariant"); // can't be both OnDeck and on WaitSet
// ESelf is resident on the WaitSet -- unlink it. // ESelf is resident on the WaitSet -- unlink it.
// A doubly-linked list would be better here so we can unlink in constant-time. // A doubly-linked list would be better here so we can unlink in constant-time.
// We have to unlink before we potentially recontend as ESelf might otherwise // We have to unlink before we potentially recontend as ESelf might otherwise
// end up on the cxq|EntryList -- it can't be on two lists at once. // end up on the cxq|EntryList -- it can't be on two lists at once.
ParkEvent * p = _WaitSet ; ParkEvent * p = _WaitSet;
ParkEvent * q = NULL ; // classic q chases p ParkEvent * q = NULL; // classic q chases p
while (p != NULL && p != ESelf) { while (p != NULL && p != ESelf) {
q = p ; q = p;
p = p->ListNext ; p = p->ListNext;
} }
assert (p == ESelf, "invariant") ; assert(p == ESelf, "invariant");
if (p == _WaitSet) { // found at head if (p == _WaitSet) { // found at head
assert (q == NULL, "invariant") ; assert(q == NULL, "invariant");
_WaitSet = p->ListNext ; _WaitSet = p->ListNext;
} else { // found in interior } else { // found in interior
assert (q->ListNext == p, "invariant") ; assert(q->ListNext == p, "invariant");
q->ListNext = p->ListNext ; q->ListNext = p->ListNext;
} }
WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout WasOnWaitSet = 1; // We were *not* notified but instead encountered timeout
} }
Thread::muxRelease (_WaitLock) ; Thread::muxRelease(_WaitLock);
} }
// Reentry phase - reacquire the lock // Reentry phase - reacquire the lock
if (WasOnWaitSet) { if (WasOnWaitSet) {
// ESelf was previously on the WaitSet but we just unlinked it above // ESelf was previously on the WaitSet but we just unlinked it above
// because of a timeout. ESelf is not resident on any list and is not OnDeck // because of a timeout. ESelf is not resident on any list and is not OnDeck
assert (_OnDeck != ESelf, "invariant") ; assert(_OnDeck != ESelf, "invariant");
ILock (Self) ; ILock(Self);
} else { } else {
// A prior notify() operation moved ESelf from the WaitSet to the cxq. // A prior notify() operation moved ESelf from the WaitSet to the cxq.
// ESelf is now on the cxq, EntryList or at the OnDeck position. // ESelf is now on the cxq, EntryList or at the OnDeck position.
// The following fragment is extracted from Monitor::ILock() // The following fragment is extracted from Monitor::ILock()
for (;;) { for (;;) {
if (_OnDeck == ESelf && TrySpin(Self)) break ; if (_OnDeck == ESelf && TrySpin(Self)) break;
ParkCommon (ESelf, 0) ; ParkCommon(ESelf, 0);
} }
assert (_OnDeck == ESelf, "invariant") ; assert(_OnDeck == ESelf, "invariant");
_OnDeck = NULL ; _OnDeck = NULL;
} }
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
return WasOnWaitSet != 0 ; // return true IFF timeout return WasOnWaitSet != 0; // return true IFF timeout
} }
@ -896,15 +896,15 @@ void Monitor::lock (Thread * Self) {
#endif // CHECK_UNHANDLED_OOPS #endif // CHECK_UNHANDLED_OOPS
debug_only(check_prelock_state(Self)); debug_only(check_prelock_state(Self));
assert (_owner != Self , "invariant") ; assert(_owner != Self , "invariant");
assert (_OnDeck != Self->_MutexEvent, "invariant") ; assert(_OnDeck != Self->_MutexEvent, "invariant");
if (TryFast()) { if (TryFast()) {
Exeunt: Exeunt:
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
assert (owner() == NULL, "invariant"); assert(owner() == NULL, "invariant");
set_owner (Self); set_owner(Self);
return ; return;
} }
// The lock is contended ... // The lock is contended ...
@ -916,23 +916,23 @@ void Monitor::lock (Thread * Self) {
// and go on. we note this with _snuck so we can also // and go on. we note this with _snuck so we can also
// pretend to unlock when the time comes. // pretend to unlock when the time comes.
_snuck = true; _snuck = true;
goto Exeunt ; goto Exeunt;
} }
// Try a brief spin to avoid passing thru thread state transition ... // Try a brief spin to avoid passing thru thread state transition ...
if (TrySpin (Self)) goto Exeunt ; if (TrySpin(Self)) goto Exeunt;
check_block_state(Self); check_block_state(Self);
if (Self->is_Java_thread()) { if (Self->is_Java_thread()) {
// Horrible dictu - we suffer through a state transition // Horrible dictu - we suffer through a state transition
assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
ThreadBlockInVM tbivm ((JavaThread *) Self) ; ThreadBlockInVM tbivm((JavaThread *) Self);
ILock (Self) ; ILock(Self);
} else { } else {
// Mirabile dictu // Mirabile dictu
ILock (Self) ; ILock(Self);
} }
goto Exeunt ; goto Exeunt;
} }
void Monitor::lock() { void Monitor::lock() {
@ -945,14 +945,14 @@ void Monitor::lock() {
// thread state set to be in VM, the safepoint synchronization code will deadlock! // thread state set to be in VM, the safepoint synchronization code will deadlock!
void Monitor::lock_without_safepoint_check (Thread * Self) { void Monitor::lock_without_safepoint_check (Thread * Self) {
assert (_owner != Self, "invariant") ; assert(_owner != Self, "invariant");
ILock (Self) ; ILock(Self);
assert (_owner == NULL, "invariant"); assert(_owner == NULL, "invariant");
set_owner (Self); set_owner(Self);
} }
void Monitor::lock_without_safepoint_check () { void Monitor::lock_without_safepoint_check() {
lock_without_safepoint_check (Thread::current()) ; lock_without_safepoint_check(Thread::current());
} }
@ -976,23 +976,23 @@ bool Monitor::try_lock() {
if (TryLock()) { if (TryLock()) {
// We got the lock // We got the lock
assert (_owner == NULL, "invariant"); assert(_owner == NULL, "invariant");
set_owner (Self); set_owner(Self);
return true; return true;
} }
return false; return false;
} }
void Monitor::unlock() { void Monitor::unlock() {
assert (_owner == Thread::current(), "invariant") ; assert(_owner == Thread::current(), "invariant");
assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ; assert(_OnDeck != Thread::current()->_MutexEvent , "invariant");
set_owner (NULL) ; set_owner(NULL);
if (_snuck) { if (_snuck) {
assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
_snuck = false; _snuck = false;
return ; return;
} }
IUnlock (false) ; IUnlock(false);
} }
// Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
@ -1020,29 +1020,29 @@ void Monitor::jvm_raw_lock() {
if (TryLock()) { if (TryLock()) {
Exeunt: Exeunt:
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
assert (_owner == NULL, "invariant"); assert(_owner == NULL, "invariant");
// This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
// might return NULL. Don't call set_owner since it will break on an NULL owner // might return NULL. Don't call set_owner since it will break on an NULL owner
// Consider installing a non-null "ANON" distinguished value instead of just NULL. // Consider installing a non-null "ANON" distinguished value instead of just NULL.
_owner = ThreadLocalStorage::thread(); _owner = ThreadLocalStorage::thread();
return ; return;
} }
if (TrySpin(NULL)) goto Exeunt ; if (TrySpin(NULL)) goto Exeunt;
// slow-path - apparent contention // slow-path - apparent contention
// Allocate a ParkEvent for transient use. // Allocate a ParkEvent for transient use.
// The ParkEvent remains associated with this thread until // The ParkEvent remains associated with this thread until
// the time the thread manages to acquire the lock. // the time the thread manages to acquire the lock.
ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ; ParkEvent * const ESelf = ParkEvent::Allocate(NULL);
ESelf->reset() ; ESelf->reset();
OrderAccess::storeload() ; OrderAccess::storeload();
// Either Enqueue Self on cxq or acquire the outer lock. // Either Enqueue Self on cxq or acquire the outer lock.
if (AcquireOrPush (ESelf)) { if (AcquireOrPush (ESelf)) {
ParkEvent::Release (ESelf) ; // surrender the ParkEvent ParkEvent::Release(ESelf); // surrender the ParkEvent
goto Exeunt ; goto Exeunt;
} }
// At any given time there is at most one ondeck thread. // At any given time there is at most one ondeck thread.
@ -1050,37 +1050,37 @@ void Monitor::jvm_raw_lock() {
// Only the OnDeck thread can try to acquire -- contended for -- the lock. // Only the OnDeck thread can try to acquire -- contended for -- the lock.
// CONSIDER: use Self->OnDeck instead of m->OnDeck. // CONSIDER: use Self->OnDeck instead of m->OnDeck.
for (;;) { for (;;) {
if (_OnDeck == ESelf && TrySpin(NULL)) break ; if (_OnDeck == ESelf && TrySpin(NULL)) break;
ParkCommon (ESelf, 0) ; ParkCommon(ESelf, 0);
} }
assert (_OnDeck == ESelf, "invariant") ; assert(_OnDeck == ESelf, "invariant");
_OnDeck = NULL ; _OnDeck = NULL;
ParkEvent::Release (ESelf) ; // surrender the ParkEvent ParkEvent::Release(ESelf); // surrender the ParkEvent
goto Exeunt ; goto Exeunt;
} }
void Monitor::jvm_raw_unlock() { void Monitor::jvm_raw_unlock() {
// Nearly the same as Monitor::unlock() ... // Nearly the same as Monitor::unlock() ...
// directly set _owner instead of using set_owner(null) // directly set _owner instead of using set_owner(null)
_owner = NULL ; _owner = NULL;
if (_snuck) { // ??? if (_snuck) { // ???
assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
_snuck = false; _snuck = false;
return ; return;
} }
IUnlock(false) ; IUnlock(false);
} }
bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) { bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
Thread * const Self = Thread::current() ; Thread * const Self = Thread::current();
assert (_owner == Self, "invariant") ; assert(_owner == Self, "invariant");
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
// as_suspend_equivalent logically implies !no_safepoint_check // as_suspend_equivalent logically implies !no_safepoint_check
guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ; guarantee(!as_suspend_equivalent || !no_safepoint_check, "invariant");
// !no_safepoint_check logically implies java_thread // !no_safepoint_check logically implies java_thread
guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ; guarantee(no_safepoint_check || Self->is_Java_thread(), "invariant");
#ifdef ASSERT #ifdef ASSERT
Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
@ -1093,14 +1093,14 @@ bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equiva
} }
#endif // ASSERT #endif // ASSERT
int wait_status ; int wait_status;
// conceptually set the owner to NULL in anticipation of // conceptually set the owner to NULL in anticipation of
// abdicating the lock in wait // abdicating the lock in wait
set_owner(NULL); set_owner(NULL);
if (no_safepoint_check) { if (no_safepoint_check) {
wait_status = IWait (Self, timeout) ; wait_status = IWait(Self, timeout);
} else { } else {
assert (Self->is_Java_thread(), "invariant") ; assert(Self->is_Java_thread(), "invariant");
JavaThread *jt = (JavaThread *)Self; JavaThread *jt = (JavaThread *)Self;
// Enter safepoint region - ornate and Rococo ... // Enter safepoint region - ornate and Rococo ...
@ -1113,7 +1113,7 @@ bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equiva
// java_suspend_self() // java_suspend_self()
} }
wait_status = IWait (Self, timeout) ; wait_status = IWait(Self, timeout);
// were we externally suspended while we were waiting? // were we externally suspended while we were waiting?
if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
@ -1121,67 +1121,67 @@ bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equiva
// while we were waiting another thread suspended us. We don't // while we were waiting another thread suspended us. We don't
// want to hold the lock while suspended because that // want to hold the lock while suspended because that
// would surprise the thread that suspended us. // would surprise the thread that suspended us.
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
IUnlock (true) ; IUnlock(true);
jt->java_suspend_self(); jt->java_suspend_self();
ILock (Self) ; ILock(Self);
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
} }
} }
// Conceptually reestablish ownership of the lock. // Conceptually reestablish ownership of the lock.
// The "real" lock -- the LockByte -- was reacquired by IWait(). // The "real" lock -- the LockByte -- was reacquired by IWait().
assert (ILocked(), "invariant") ; assert(ILocked(), "invariant");
assert (_owner == NULL, "invariant") ; assert(_owner == NULL, "invariant");
set_owner (Self) ; set_owner(Self);
return wait_status != 0 ; // return true IFF timeout return wait_status != 0; // return true IFF timeout
} }
Monitor::~Monitor() { Monitor::~Monitor() {
assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
} }
void Monitor::ClearMonitor (Monitor * m, const char *name) { void Monitor::ClearMonitor (Monitor * m, const char *name) {
m->_owner = NULL ; m->_owner = NULL;
m->_snuck = false ; m->_snuck = false;
if (name == NULL) { if (name == NULL) {
strcpy(m->_name, "UNKNOWN") ; strcpy(m->_name, "UNKNOWN");
} else { } else {
strncpy(m->_name, name, MONITOR_NAME_LEN - 1); strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
m->_name[MONITOR_NAME_LEN - 1] = '\0'; m->_name[MONITOR_NAME_LEN - 1] = '\0';
} }
m->_LockWord.FullWord = 0 ; m->_LockWord.FullWord = 0;
m->_EntryList = NULL ; m->_EntryList = NULL;
m->_OnDeck = NULL ; m->_OnDeck = NULL;
m->_WaitSet = NULL ; m->_WaitSet = NULL;
m->_WaitLock[0] = 0 ; m->_WaitLock[0] = 0;
} }
Monitor::Monitor() { ClearMonitor(this); } Monitor::Monitor() { ClearMonitor(this); }
Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
ClearMonitor (this, name) ; ClearMonitor(this, name);
#ifdef ASSERT #ifdef ASSERT
_allow_vm_block = allow_vm_block; _allow_vm_block = allow_vm_block;
_rank = Rank ; _rank = Rank;
#endif #endif
} }
Mutex::~Mutex() { Mutex::~Mutex() {
assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; assert((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "");
} }
Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
ClearMonitor ((Monitor *) this, name) ; ClearMonitor((Monitor *) this, name);
#ifdef ASSERT #ifdef ASSERT
_allow_vm_block = allow_vm_block; _allow_vm_block = allow_vm_block;
_rank = Rank ; _rank = Rank;
#endif #endif
} }
bool Monitor::owned_by_self() const { bool Monitor::owned_by_self() const {
bool ret = _owner == Thread::current(); bool ret = _owner == Thread::current();
assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ; assert(!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant");
return ret; return ret;
} }

View file

@ -217,7 +217,7 @@ class SharedRuntime: AllStatic {
static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; } static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; }
#endif // COMPILER2 #endif // COMPILER2
static address get_resolve_opt_virtual_call_stub(){ static address get_resolve_opt_virtual_call_stub() {
assert(_resolve_opt_virtual_call_blob != NULL, "oops"); assert(_resolve_opt_virtual_call_blob != NULL, "oops");
return _resolve_opt_virtual_call_blob->entry_point(); return _resolve_opt_virtual_call_blob->entry_point();
} }
@ -253,7 +253,7 @@ class SharedRuntime: AllStatic {
// bytecode tracing is only used by the TraceBytecodes // bytecode tracing is only used by the TraceBytecodes
static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0; static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0;
static oop retrieve_receiver( Symbol* sig, frame caller ); static oop retrieve_receiver(Symbol* sig, frame caller);
static void register_finalizer(JavaThread* thread, oopDesc* obj); static void register_finalizer(JavaThread* thread, oopDesc* obj);
@ -446,8 +446,8 @@ class SharedRuntime: AllStatic {
static bool is_wide_vector(int size); static bool is_wide_vector(int size);
// Save and restore a native result // Save and restore a native result
static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots ); static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots ); static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots);
// Generate a native wrapper for a given method. The method takes arguments // Generate a native wrapper for a given method. The method takes arguments
// in the Java compiled code convention, marshals them to the native // in the Java compiled code convention, marshals them to the native
@ -463,7 +463,7 @@ class SharedRuntime: AllStatic {
int compile_id, int compile_id,
BasicType* sig_bt, BasicType* sig_bt,
VMRegPair* regs, VMRegPair* regs,
BasicType ret_type ); BasicType ret_type);
// Block before entering a JNI critical method // Block before entering a JNI critical method
static void block_for_jni_critical(JavaThread* thread); static void block_for_jni_critical(JavaThread* thread);

View file

@ -75,7 +75,7 @@ class ObjectSynchronizer : AllStatic {
// Special internal-use-only method for use by JVM infrastructure // Special internal-use-only method for use by JVM infrastructure
// that needs to wait() on a java-level object but that can't risk // that needs to wait() on a java-level object but that can't risk
// throwing unexpected InterruptedExecutionExceptions. // throwing unexpected InterruptedExecutionExceptions.
static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ; static void waitUninterruptibly(Handle obj, jlong Millis, Thread * THREAD);
// used by classloading to free classloader object lock, // used by classloading to free classloader object lock,
// wait on an internal lock, and reclaim original lock // wait on an internal lock, and reclaim original lock
@ -85,9 +85,9 @@ class ObjectSynchronizer : AllStatic {
// thread-specific and global objectMonitor free list accessors // thread-specific and global objectMonitor free list accessors
// static void verifyInUse (Thread * Self) ; too slow for general assert/debug // static void verifyInUse (Thread * Self) ; too slow for general assert/debug
static ObjectMonitor * omAlloc (Thread * Self) ; static ObjectMonitor * omAlloc(Thread * Self);
static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ; static void omRelease(Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc);
static void omFlush (Thread * Self) ; static void omFlush(Thread * Self);
// Inflate light weight monitor to heavy weight monitor // Inflate light weight monitor to heavy weight monitor
static ObjectMonitor* inflate(Thread * Self, oop obj); static ObjectMonitor* inflate(Thread * Self, oop obj);
@ -97,7 +97,7 @@ class ObjectSynchronizer : AllStatic {
// Returns the identity hash value for an oop // Returns the identity hash value for an oop
// NOTE: It may cause monitor inflation // NOTE: It may cause monitor inflation
static intptr_t identity_hash_value_for(Handle obj); static intptr_t identity_hash_value_for(Handle obj);
static intptr_t FastHashCode (Thread * Self, oop obj) ; static intptr_t FastHashCode(Thread * Self, oop obj);
// java.lang.Thread support // java.lang.Thread support
static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj); static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
@ -124,7 +124,7 @@ class ObjectSynchronizer : AllStatic {
static void verify() PRODUCT_RETURN; static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ; static void RegisterSpinCallback(int(*)(intptr_t, int), intptr_t);
private: private:
enum { _BLOCKSIZE = 128 }; enum { _BLOCKSIZE = 128 };
@ -155,7 +155,7 @@ class ObjectLocker : public StackObj {
// Monitor behavior // Monitor behavior
void wait (TRAPS) { ObjectSynchronizer::wait (_obj, 0, CHECK); } // wait forever void wait (TRAPS) { ObjectSynchronizer::wait (_obj, 0, CHECK); } // wait forever
void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); } void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);} void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK); }
// complete_exit gives up lock completely, returning recursion count // complete_exit gives up lock completely, returning recursion count
// reenter reclaims lock with original recursion count // reenter reclaims lock with original recursion count
intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, CHECK_0); } intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, CHECK_0); }