mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 10:04:42 +02:00
Merge
This commit is contained in:
commit
c9b2bc62c9
2991 changed files with 67823 additions and 46239 deletions
|
@ -4101,8 +4101,8 @@ void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBl
|
|||
SharedHeap* sh = SharedHeap::heap();
|
||||
// Cannot yet substitute active_workers for n_par_threads
|
||||
// because of G1CollectedHeap::verify() use of
|
||||
// SharedHeap::process_strong_roots(). n_par_threads == 0 will
|
||||
// turn off parallelism in process_strong_roots while active_workers
|
||||
// SharedHeap::process_roots(). n_par_threads == 0 will
|
||||
// turn off parallelism in process_roots while active_workers
|
||||
// is being used for parallelism elsewhere.
|
||||
bool is_par = sh->n_par_threads() > 0;
|
||||
assert(!is_par ||
|
||||
|
@ -4376,7 +4376,7 @@ void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
|
|||
if (Yields > 5) {
|
||||
os::naked_short_sleep(1);
|
||||
} else {
|
||||
os::NakedYield();
|
||||
os::naked_yield();
|
||||
++Yields;
|
||||
}
|
||||
} else {
|
||||
|
@ -4394,6 +4394,12 @@ void Thread::SpinRelease (volatile int * adr) {
|
|||
// It's safe if subsequent LDs and STs float "up" into the critical section,
|
||||
// but prior LDs and STs within the critical section can't be allowed
|
||||
// to reorder or float past the ST that releases the lock.
|
||||
// Loads and stores in the critical section - which appear in program
|
||||
// order before the store that releases the lock - must also appear
|
||||
// before the store that releases the lock in memory visibility order.
|
||||
// Conceptually we need a #loadstore|#storestore "release" MEMBAR before
|
||||
// the ST of 0 into the lock-word which releases the lock, so fence
|
||||
// more than covers this on all platforms.
|
||||
*adr = 0;
|
||||
}
|
||||
|
||||
|
@ -4575,18 +4581,25 @@ void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
|
|||
// This implementation pops from the head of the list. This is unfair,
|
||||
// but tends to provide excellent throughput as hot threads remain hot.
|
||||
// (We wake recently run threads first).
|
||||
|
||||
//
|
||||
// All paths through muxRelease() will execute a CAS.
|
||||
// Release consistency -- We depend on the CAS in muxRelease() to provide full
|
||||
// bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations
|
||||
// executed within the critical section are complete and globally visible before the
|
||||
// store (CAS) to the lock-word that releases the lock becomes globally visible.
|
||||
void Thread::muxRelease (volatile intptr_t * Lock) {
|
||||
for (;;) {
|
||||
const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT);
|
||||
assert(w & LOCKBIT, "invariant");
|
||||
if (w == LOCKBIT) return;
|
||||
ParkEvent * List = (ParkEvent *)(w & ~LOCKBIT);
|
||||
ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
|
||||
assert(List != NULL, "invariant");
|
||||
assert(List->OnList == intptr_t(Lock), "invariant");
|
||||
ParkEvent * nxt = List->ListNext;
|
||||
ParkEvent * const nxt = List->ListNext;
|
||||
guarantee((intptr_t(nxt) & LOCKBIT) == 0, "invariant");
|
||||
|
||||
// The following CAS() releases the lock and pops the head element.
|
||||
// The CAS() also ratifies the previously fetched lock-word value.
|
||||
if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
|
||||
continue;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue