mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 18:14:38 +02:00
6979279: remove special-case code for ParallelGCThreads==0
Reviewed-by: jwilhelm, brutisso, kbarrett
This commit is contained in:
parent
bd227a9bac
commit
46e9fb5176
22 changed files with 334 additions and 650 deletions
|
@ -64,23 +64,6 @@ void AdaptiveFreeList<Chunk>::reset(size_t hint) {
|
|||
set_hint(hint);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
template <class Chunk>
|
||||
void AdaptiveFreeList<Chunk>::assert_proper_lock_protection_work() const {
|
||||
assert(protecting_lock() != NULL, "Don't call this directly");
|
||||
assert(ParallelGCThreads > 0, "Don't call this directly");
|
||||
Thread* thr = Thread::current();
|
||||
if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
|
||||
// assert that we are holding the freelist lock
|
||||
} else if (thr->is_GC_task_thread()) {
|
||||
assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
|
||||
} else if (thr->is_Java_thread()) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
|
||||
} else {
|
||||
ShouldNotReachHere(); // unaccounted thread type?
|
||||
}
|
||||
}
|
||||
#endif
|
||||
template <class Chunk>
|
||||
void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
|
||||
_allocation_stats.initialize(split_birth);
|
||||
|
|
|
@ -81,8 +81,6 @@ class AdaptiveFreeList : public FreeList<Chunk> {
|
|||
// Reset the head, tail, hint, and count of a free list.
|
||||
void reset(size_t hint);
|
||||
|
||||
void assert_proper_lock_protection_work() const PRODUCT_RETURN;
|
||||
|
||||
void print_on(outputStream* st, const char* c = NULL) const;
|
||||
|
||||
size_t hint() const {
|
||||
|
|
|
@ -149,18 +149,15 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
|||
check_free_list_consistency();
|
||||
|
||||
// Initialize locks for parallel case.
|
||||
|
||||
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
|
||||
"a freelist par lock",
|
||||
true);
|
||||
DEBUG_ONLY(
|
||||
_indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
|
||||
)
|
||||
}
|
||||
_dictionary->set_par_lock(&_parDictionaryAllocLock);
|
||||
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
|
||||
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
|
||||
"a freelist par lock",
|
||||
true);
|
||||
DEBUG_ONLY(
|
||||
_indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
|
||||
)
|
||||
}
|
||||
_dictionary->set_par_lock(&_parDictionaryAllocLock);
|
||||
}
|
||||
|
||||
// Like CompactibleSpace forward() but always calls cross_threshold() to
|
||||
|
@ -622,17 +619,11 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
|
|||
// Mark the boundary of the new block in BOT
|
||||
_bt.mark_block(prevEnd, value);
|
||||
// put it all in the linAB
|
||||
if (ParallelGCThreads == 0) {
|
||||
_smallLinearAllocBlock._ptr = prevEnd;
|
||||
_smallLinearAllocBlock._word_size = newFcSize;
|
||||
repairLinearAllocBlock(&_smallLinearAllocBlock);
|
||||
} else { // ParallelGCThreads > 0
|
||||
MutexLockerEx x(parDictionaryAllocLock(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
_smallLinearAllocBlock._ptr = prevEnd;
|
||||
_smallLinearAllocBlock._word_size = newFcSize;
|
||||
repairLinearAllocBlock(&_smallLinearAllocBlock);
|
||||
}
|
||||
MutexLockerEx x(parDictionaryAllocLock(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
_smallLinearAllocBlock._ptr = prevEnd;
|
||||
_smallLinearAllocBlock._word_size = newFcSize;
|
||||
repairLinearAllocBlock(&_smallLinearAllocBlock);
|
||||
// Births of chunks put into a LinAB are not recorded. Births
|
||||
// of chunks as they are allocated out of a LinAB are.
|
||||
} else {
|
||||
|
@ -1740,10 +1731,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
|
|||
assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
|
||||
// One of the parallel gc task threads may be here
|
||||
// whilst others are allocating.
|
||||
Mutex* lock = NULL;
|
||||
if (ParallelGCThreads != 0) {
|
||||
lock = &_parDictionaryAllocLock;
|
||||
}
|
||||
Mutex* lock = &_parDictionaryAllocLock;
|
||||
FreeChunk* ec;
|
||||
{
|
||||
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
|
||||
|
@ -1760,7 +1748,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
|
|||
}
|
||||
ec->set_size(size);
|
||||
debug_only(ec->mangleFreed(size));
|
||||
if (size < SmallForDictionary && ParallelGCThreads != 0) {
|
||||
if (size < SmallForDictionary) {
|
||||
lock = _indexedFreeListParLocks[size];
|
||||
}
|
||||
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
|
||||
|
|
|
@ -887,10 +887,8 @@ void ConcurrentMarkSweepGeneration::reset_after_compaction() {
|
|||
// along with all the other pointers into the heap but
|
||||
// compaction is expected to be a rare event with
|
||||
// a heap using cms so don't do it without seeing the need.
|
||||
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_par_gc_thread_states[i]->promo.reset();
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_par_gc_thread_states[i]->promo.reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2804,10 +2802,8 @@ void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
|
|||
collector()->gc_epilogue(full);
|
||||
|
||||
// Also reset promotion tracking in par gc thread states.
|
||||
if (CollectedHeap::use_parallel_gc_threads()) {
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
|
||||
}
|
||||
for (uint i = 0; i < ParallelGCThreads; i++) {
|
||||
_par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue