mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-19 10:34:38 +02:00
8280136: Serial: Remove unnecessary use of ExpandHeap_lock
Reviewed-by: iwalulya, kbarrett, sjohanss
This commit is contained in:
parent
2112a9dc49
commit
bc6148407e
7 changed files with 21 additions and 16 deletions
|
@ -217,7 +217,7 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
|
||||||
|
|
||||||
// Only used by oldgen allocation.
|
// Only used by oldgen allocation.
|
||||||
bool MutableSpace::needs_expand(size_t word_size) const {
|
bool MutableSpace::needs_expand(size_t word_size) const {
|
||||||
assert_lock_strong(ExpandHeap_lock);
|
assert_lock_strong(PSOldGenExpand_lock);
|
||||||
// Holding the lock means end is stable. So while top may be advancing
|
// Holding the lock means end is stable. So while top may be advancing
|
||||||
// via concurrent allocations, there is no need to order the reads of top
|
// via concurrent allocations, there is no need to order the reads of top
|
||||||
// and end here, unlike in cas_allocate.
|
// and end here, unlike in cas_allocate.
|
||||||
|
|
|
@ -145,7 +145,7 @@ class MutableSpace: public CHeapObj<mtGC> {
|
||||||
// Return true if this space needs to be expanded in order to satisfy an
|
// Return true if this space needs to be expanded in order to satisfy an
|
||||||
// allocation request of the indicated size. Concurrent allocations and
|
// allocation request of the indicated size. Concurrent allocations and
|
||||||
// resizes may change the result of a later call. Used by oldgen allocator.
|
// resizes may change the result of a later call. Used by oldgen allocator.
|
||||||
// precondition: holding ExpandHeap_lock
|
// precondition: holding PSOldGenExpand_lock
|
||||||
bool needs_expand(size_t word_size) const;
|
bool needs_expand(size_t word_size) const;
|
||||||
|
|
||||||
// Iteration.
|
// Iteration.
|
||||||
|
|
|
@ -163,7 +163,7 @@ bool PSOldGen::expand_for_allocate(size_t word_size) {
|
||||||
assert(word_size > 0, "allocating zero words?");
|
assert(word_size > 0, "allocating zero words?");
|
||||||
bool result = true;
|
bool result = true;
|
||||||
{
|
{
|
||||||
MutexLocker x(ExpandHeap_lock);
|
MutexLocker x(PSOldGenExpand_lock);
|
||||||
// Avoid "expand storms" by rechecking available space after obtaining
|
// Avoid "expand storms" by rechecking available space after obtaining
|
||||||
// the lock, because another thread may have already made sufficient
|
// the lock, because another thread may have already made sufficient
|
||||||
// space available. If insufficient space available, that will remain
|
// space available. If insufficient space available, that will remain
|
||||||
|
@ -181,7 +181,7 @@ bool PSOldGen::expand_for_allocate(size_t word_size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PSOldGen::expand(size_t bytes) {
|
bool PSOldGen::expand(size_t bytes) {
|
||||||
assert_lock_strong(ExpandHeap_lock);
|
assert_lock_strong(PSOldGenExpand_lock);
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
assert(bytes > 0, "precondition");
|
assert(bytes > 0, "precondition");
|
||||||
const size_t alignment = virtual_space()->alignment();
|
const size_t alignment = virtual_space()->alignment();
|
||||||
|
@ -219,7 +219,7 @@ bool PSOldGen::expand(size_t bytes) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PSOldGen::expand_by(size_t bytes) {
|
bool PSOldGen::expand_by(size_t bytes) {
|
||||||
assert_lock_strong(ExpandHeap_lock);
|
assert_lock_strong(PSOldGenExpand_lock);
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
assert(bytes > 0, "precondition");
|
assert(bytes > 0, "precondition");
|
||||||
bool result = virtual_space()->expand_by(bytes);
|
bool result = virtual_space()->expand_by(bytes);
|
||||||
|
@ -255,7 +255,7 @@ bool PSOldGen::expand_by(size_t bytes) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PSOldGen::expand_to_reserved() {
|
bool PSOldGen::expand_to_reserved() {
|
||||||
assert_lock_strong(ExpandHeap_lock);
|
assert_lock_strong(PSOldGenExpand_lock);
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
|
|
||||||
bool result = false;
|
bool result = false;
|
||||||
|
@ -268,12 +268,11 @@ bool PSOldGen::expand_to_reserved() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void PSOldGen::shrink(size_t bytes) {
|
void PSOldGen::shrink(size_t bytes) {
|
||||||
assert_lock_strong(ExpandHeap_lock);
|
assert_lock_strong(PSOldGenExpand_lock);
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
|
|
||||||
size_t size = align_down(bytes, virtual_space()->alignment());
|
size_t size = align_down(bytes, virtual_space()->alignment());
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
assert_lock_strong(ExpandHeap_lock);
|
|
||||||
virtual_space()->shrink_by(bytes);
|
virtual_space()->shrink_by(bytes);
|
||||||
post_resize();
|
post_resize();
|
||||||
|
|
||||||
|
@ -312,11 +311,11 @@ void PSOldGen::resize(size_t desired_free_space) {
|
||||||
}
|
}
|
||||||
if (new_size > current_size) {
|
if (new_size > current_size) {
|
||||||
size_t change_bytes = new_size - current_size;
|
size_t change_bytes = new_size - current_size;
|
||||||
MutexLocker x(ExpandHeap_lock);
|
MutexLocker x(PSOldGenExpand_lock);
|
||||||
expand(change_bytes);
|
expand(change_bytes);
|
||||||
} else {
|
} else {
|
||||||
size_t change_bytes = current_size - new_size;
|
size_t change_bytes = current_size - new_size;
|
||||||
MutexLocker x(ExpandHeap_lock);
|
MutexLocker x(PSOldGenExpand_lock);
|
||||||
shrink(change_bytes);
|
shrink(change_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -288,7 +288,6 @@ void DefNewGeneration::swap_spaces() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DefNewGeneration::expand(size_t bytes) {
|
bool DefNewGeneration::expand(size_t bytes) {
|
||||||
MutexLocker x(ExpandHeap_lock);
|
|
||||||
HeapWord* prev_high = (HeapWord*) _virtual_space.high();
|
HeapWord* prev_high = (HeapWord*) _virtual_space.high();
|
||||||
bool success = _virtual_space.expand_by(bytes);
|
bool success = _virtual_space.expand_by(bytes);
|
||||||
if (success && ZapUnusedHeapArea) {
|
if (success && ZapUnusedHeapArea) {
|
||||||
|
|
|
@ -196,7 +196,6 @@ TenuredGeneration::expand_and_allocate(size_t word_size, bool is_tlab) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
|
bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
|
||||||
GCMutexLocker x(ExpandHeap_lock);
|
|
||||||
return CardGeneration::expand(bytes, expand_bytes);
|
return CardGeneration::expand(bytes, expand_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +208,7 @@ size_t TenuredGeneration::contiguous_available() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
void TenuredGeneration::assert_correct_size_change_locking() {
|
void TenuredGeneration::assert_correct_size_change_locking() {
|
||||||
assert_locked_or_safepoint(ExpandHeap_lock);
|
assert_locked_or_safepoint(Heap_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TenuredGeneration::object_iterate(ObjectClosure* blk) {
|
void TenuredGeneration::object_iterate(ObjectClosure* blk) {
|
||||||
|
|
|
@ -54,7 +54,9 @@ Monitor* JNICritical_lock = NULL;
|
||||||
Mutex* JvmtiThreadState_lock = NULL;
|
Mutex* JvmtiThreadState_lock = NULL;
|
||||||
Monitor* EscapeBarrier_lock = NULL;
|
Monitor* EscapeBarrier_lock = NULL;
|
||||||
Monitor* Heap_lock = NULL;
|
Monitor* Heap_lock = NULL;
|
||||||
Mutex* ExpandHeap_lock = NULL;
|
#ifdef INCLUDE_PARALLELGC
|
||||||
|
Mutex* PSOldGenExpand_lock = NULL;
|
||||||
|
#endif
|
||||||
Mutex* AdapterHandlerLibrary_lock = NULL;
|
Mutex* AdapterHandlerLibrary_lock = NULL;
|
||||||
Mutex* SignatureHandlerLibrary_lock = NULL;
|
Mutex* SignatureHandlerLibrary_lock = NULL;
|
||||||
Mutex* VtableStubs_lock = NULL;
|
Mutex* VtableStubs_lock = NULL;
|
||||||
|
@ -358,7 +360,11 @@ void mutex_init() {
|
||||||
defl(G1OldGCCount_lock , PaddedMonitor, Threads_lock, true);
|
defl(G1OldGCCount_lock , PaddedMonitor, Threads_lock, true);
|
||||||
}
|
}
|
||||||
defl(CompileTaskAlloc_lock , PaddedMutex , MethodCompileQueue_lock);
|
defl(CompileTaskAlloc_lock , PaddedMutex , MethodCompileQueue_lock);
|
||||||
defl(ExpandHeap_lock , PaddedMutex , Heap_lock, true);
|
#ifdef INCLUDE_PARALLELGC
|
||||||
|
if (UseParallelGC) {
|
||||||
|
defl(PSOldGenExpand_lock , PaddedMutex , Heap_lock, true);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
defl(OopMapCacheAlloc_lock , PaddedMutex , Threads_lock, true);
|
defl(OopMapCacheAlloc_lock , PaddedMutex , Threads_lock, true);
|
||||||
defl(Module_lock , PaddedMutex , ClassLoaderDataGraph_lock);
|
defl(Module_lock , PaddedMutex , ClassLoaderDataGraph_lock);
|
||||||
defl(SystemDictionary_lock , PaddedMonitor, Module_lock);
|
defl(SystemDictionary_lock , PaddedMonitor, Module_lock);
|
||||||
|
|
|
@ -46,7 +46,9 @@ extern Monitor* JNICritical_lock; // a lock used while entering a
|
||||||
extern Mutex* JvmtiThreadState_lock; // a lock on modification of JVMTI thread data
|
extern Mutex* JvmtiThreadState_lock; // a lock on modification of JVMTI thread data
|
||||||
extern Monitor* EscapeBarrier_lock; // a lock to sync reallocating and relocking objects because of JVMTI access
|
extern Monitor* EscapeBarrier_lock; // a lock to sync reallocating and relocking objects because of JVMTI access
|
||||||
extern Monitor* Heap_lock; // a lock on the heap
|
extern Monitor* Heap_lock; // a lock on the heap
|
||||||
extern Mutex* ExpandHeap_lock; // a lock on expanding the heap
|
#ifdef INCLUDE_PARALLELGC
|
||||||
|
extern Mutex* PSOldGenExpand_lock; // a lock on expanding the heap
|
||||||
|
#endif
|
||||||
extern Mutex* AdapterHandlerLibrary_lock; // a lock on the AdapterHandlerLibrary
|
extern Mutex* AdapterHandlerLibrary_lock; // a lock on the AdapterHandlerLibrary
|
||||||
extern Mutex* SignatureHandlerLibrary_lock; // a lock on the SignatureHandlerLibrary
|
extern Mutex* SignatureHandlerLibrary_lock; // a lock on the SignatureHandlerLibrary
|
||||||
extern Mutex* VtableStubs_lock; // a lock on the VtableStubs
|
extern Mutex* VtableStubs_lock; // a lock on the VtableStubs
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue