mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8139952: Remove UseCMSAdaptiveFreeLists, UseAsyncConcMarkSweepGC, CMSDictionaryChoice, CMSOverflowEarlyRestoration and CMSTestInFreeList
Reviewed-by: jwilhelm, ecaspole
This commit is contained in:
parent
f8b8fb330b
commit
437751031d
8 changed files with 33 additions and 320 deletions
|
@ -73,11 +73,7 @@ void CompactibleFreeListSpace::set_cms_values() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constructor
|
// Constructor
|
||||||
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
|
||||||
MemRegion mr, bool use_adaptive_freelists,
|
|
||||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
|
|
||||||
_dictionaryChoice(dictionaryChoice),
|
|
||||||
_adaptive_freelists(use_adaptive_freelists),
|
|
||||||
_bt(bs, mr),
|
_bt(bs, mr),
|
||||||
// free list locks are in the range of values taken by _lockRank
|
// free list locks are in the range of values taken by _lockRank
|
||||||
// This range currently is [_leaf+2, _leaf+3]
|
// This range currently is [_leaf+2, _leaf+3]
|
||||||
|
@ -100,48 +96,17 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
|
||||||
"FreeChunk is larger than expected");
|
"FreeChunk is larger than expected");
|
||||||
_bt.set_space(this);
|
_bt.set_space(this);
|
||||||
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||||
// We have all of "mr", all of which we place in the dictionary
|
|
||||||
// as one big chunk. We'll need to decide here which of several
|
|
||||||
// possible alternative dictionary implementations to use. For
|
|
||||||
// now the choice is easy, since we have only one working
|
|
||||||
// implementation, namely, the simple binary tree (splaying
|
|
||||||
// temporarily disabled).
|
|
||||||
switch (dictionaryChoice) {
|
|
||||||
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
|
|
||||||
_dictionary = new AFLBinaryTreeDictionary(mr);
|
_dictionary = new AFLBinaryTreeDictionary(mr);
|
||||||
break;
|
|
||||||
case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
|
|
||||||
case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
|
|
||||||
default:
|
|
||||||
warning("dictionaryChoice: selected option not understood; using"
|
|
||||||
" default BinaryTreeDictionary implementation instead.");
|
|
||||||
}
|
|
||||||
assert(_dictionary != NULL, "CMS dictionary initialization");
|
assert(_dictionary != NULL, "CMS dictionary initialization");
|
||||||
// The indexed free lists are initially all empty and are lazily
|
// The indexed free lists are initially all empty and are lazily
|
||||||
// filled in on demand. Initialize the array elements to NULL.
|
// filled in on demand. Initialize the array elements to NULL.
|
||||||
initializeIndexedFreeListArray();
|
initializeIndexedFreeListArray();
|
||||||
|
|
||||||
// Not using adaptive free lists assumes that allocation is first
|
|
||||||
// from the linAB's. Also a cms perm gen which can be compacted
|
|
||||||
// has to have the klass's klassKlass allocated at a lower
|
|
||||||
// address in the heap than the klass so that the klassKlass is
|
|
||||||
// moved to its new location before the klass is moved.
|
|
||||||
// Set the _refillSize for the linear allocation blocks
|
|
||||||
if (!use_adaptive_freelists) {
|
|
||||||
FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
|
|
||||||
FreeBlockDictionary<FreeChunk>::atLeast);
|
|
||||||
// The small linAB initially has all the space and will allocate
|
|
||||||
// a chunk of any size.
|
|
||||||
HeapWord* addr = (HeapWord*) fc;
|
|
||||||
_smallLinearAllocBlock.set(addr, fc->size() ,
|
|
||||||
1024*SmallForLinearAlloc, fc->size());
|
|
||||||
// Note that _unallocated_block is not updated here.
|
|
||||||
// Allocations from the linear allocation block should
|
|
||||||
// update it.
|
|
||||||
} else {
|
|
||||||
_smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
|
_smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
|
||||||
SmallForLinearAlloc);
|
SmallForLinearAlloc);
|
||||||
}
|
|
||||||
// CMSIndexedFreeListReplenish should be at least 1
|
// CMSIndexedFreeListReplenish should be at least 1
|
||||||
CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
|
CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
|
||||||
_promoInfo.setSpace(this);
|
_promoInfo.setSpace(this);
|
||||||
|
@ -297,22 +262,7 @@ void CompactibleFreeListSpace::reset_after_compaction() {
|
||||||
MemRegion mr(compaction_top(), end());
|
MemRegion mr(compaction_top(), end());
|
||||||
reset(mr);
|
reset(mr);
|
||||||
// Now refill the linear allocation block(s) if possible.
|
// Now refill the linear allocation block(s) if possible.
|
||||||
if (_adaptive_freelists) {
|
|
||||||
refillLinearAllocBlocksIfNeeded();
|
refillLinearAllocBlocksIfNeeded();
|
||||||
} else {
|
|
||||||
// Place as much of mr in the linAB as we can get,
|
|
||||||
// provided it was big enough to go into the dictionary.
|
|
||||||
FreeChunk* fc = dictionary()->find_largest_dict();
|
|
||||||
if (fc != NULL) {
|
|
||||||
assert(fc->size() == mr.word_size(),
|
|
||||||
"Why was the chunk broken up?");
|
|
||||||
removeChunkFromDictionary(fc);
|
|
||||||
HeapWord* addr = (HeapWord*) fc;
|
|
||||||
_smallLinearAllocBlock.set(addr, fc->size() ,
|
|
||||||
1024*SmallForLinearAlloc, fc->size());
|
|
||||||
// Note that _unallocated_block is not updated here.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Walks the entire dictionary, returning a coterminal
|
// Walks the entire dictionary, returning a coterminal
|
||||||
|
@ -445,8 +395,7 @@ void CompactibleFreeListSpace::print_on(outputStream* st) const {
|
||||||
|
|
||||||
// dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
|
// dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
|
||||||
|
|
||||||
st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
|
st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
|
||||||
_fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
|
void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
|
||||||
|
@ -617,26 +566,12 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
|
||||||
// Now, take this new chunk and add it to the free blocks.
|
// Now, take this new chunk and add it to the free blocks.
|
||||||
// Note that the BOT has not yet been updated for this block.
|
// Note that the BOT has not yet been updated for this block.
|
||||||
size_t newFcSize = pointer_delta(value, prevEnd);
|
size_t newFcSize = pointer_delta(value, prevEnd);
|
||||||
// XXX This is REALLY UGLY and should be fixed up. XXX
|
|
||||||
if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
|
|
||||||
// Mark the boundary of the new block in BOT
|
|
||||||
_bt.mark_block(prevEnd, value);
|
|
||||||
// put it all in the linAB
|
|
||||||
MutexLockerEx x(parDictionaryAllocLock(),
|
|
||||||
Mutex::_no_safepoint_check_flag);
|
|
||||||
_smallLinearAllocBlock._ptr = prevEnd;
|
|
||||||
_smallLinearAllocBlock._word_size = newFcSize;
|
|
||||||
repairLinearAllocBlock(&_smallLinearAllocBlock);
|
|
||||||
// Births of chunks put into a LinAB are not recorded. Births
|
|
||||||
// of chunks as they are allocated out of a LinAB are.
|
|
||||||
} else {
|
|
||||||
// Add the block to the free lists, if possible coalescing it
|
// Add the block to the free lists, if possible coalescing it
|
||||||
// with the last free block, and update the BOT and census data.
|
// with the last free block, and update the BOT and census data.
|
||||||
addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
|
addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
class FreeListSpace_DCTOC : public Filtering_DCTOC {
|
class FreeListSpace_DCTOC : public Filtering_DCTOC {
|
||||||
CompactibleFreeListSpace* _cfls;
|
CompactibleFreeListSpace* _cfls;
|
||||||
|
@ -1177,11 +1112,7 @@ HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
|
||||||
assert(size == adjustObjectSize(size),
|
assert(size == adjustObjectSize(size),
|
||||||
"use adjustObjectSize() before calling into allocate()");
|
"use adjustObjectSize() before calling into allocate()");
|
||||||
|
|
||||||
if (_adaptive_freelists) {
|
|
||||||
res = allocate_adaptive_freelists(size);
|
res = allocate_adaptive_freelists(size);
|
||||||
} else { // non-adaptive free lists
|
|
||||||
res = allocate_non_adaptive_freelists(size);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (res != NULL) {
|
if (res != NULL) {
|
||||||
// check that res does lie in this space!
|
// check that res does lie in this space!
|
||||||
|
@ -1203,27 +1134,6 @@ HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
|
|
||||||
HeapWord* res = NULL;
|
|
||||||
// try and use linear allocation for smaller blocks
|
|
||||||
if (size < _smallLinearAllocBlock._allocation_size_limit) {
|
|
||||||
// if successful, the following also adjusts block offset table
|
|
||||||
res = getChunkFromSmallLinearAllocBlock(size);
|
|
||||||
}
|
|
||||||
// Else triage to indexed lists for smaller sizes
|
|
||||||
if (res == NULL) {
|
|
||||||
if (size < SmallForDictionary) {
|
|
||||||
res = (HeapWord*) getChunkFromIndexedFreeList(size);
|
|
||||||
} else {
|
|
||||||
// else get it from the big dictionary; if even this doesn't
|
|
||||||
// work we are out of luck.
|
|
||||||
res = (HeapWord*)getChunkFromDictionaryExact(size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
|
HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
|
||||||
assert_lock_strong(freelistLock());
|
assert_lock_strong(freelistLock());
|
||||||
HeapWord* res = NULL;
|
HeapWord* res = NULL;
|
||||||
|
@ -1281,9 +1191,6 @@ size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
|
||||||
// bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
|
// bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
|
||||||
// is added because the dictionary may over-allocate to avoid fragmentation.
|
// is added because the dictionary may over-allocate to avoid fragmentation.
|
||||||
size_t space = obj_size;
|
size_t space = obj_size;
|
||||||
if (!_adaptive_freelists) {
|
|
||||||
space = MAX2(space, _smallLinearAllocBlock._refillSize);
|
|
||||||
}
|
|
||||||
space += _promoInfo.refillSize() + 2 * MinChunkSize;
|
space += _promoInfo.refillSize() + 2 * MinChunkSize;
|
||||||
return space;
|
return space;
|
||||||
}
|
}
|
||||||
|
@ -1698,11 +1605,7 @@ CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
|
||||||
size_t size = fc->size();
|
size_t size = fc->size();
|
||||||
_bt.verify_single_block((HeapWord*) fc, size);
|
_bt.verify_single_block((HeapWord*) fc, size);
|
||||||
_bt.verify_not_unallocated((HeapWord*) fc, size);
|
_bt.verify_not_unallocated((HeapWord*) fc, size);
|
||||||
if (_adaptive_freelists) {
|
|
||||||
_indexedFreeList[size].return_chunk_at_tail(fc);
|
_indexedFreeList[size].return_chunk_at_tail(fc);
|
||||||
} else {
|
|
||||||
_indexedFreeList[size].return_chunk_at_head(fc);
|
|
||||||
}
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
|
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
|
||||||
_indexedFreeList[size].verify_stats();
|
_indexedFreeList[size].verify_stats();
|
||||||
|
@ -1931,10 +1834,6 @@ CompactibleFreeListSpace::gc_prologue() {
|
||||||
void
|
void
|
||||||
CompactibleFreeListSpace::gc_epilogue() {
|
CompactibleFreeListSpace::gc_epilogue() {
|
||||||
assert_locked();
|
assert_locked();
|
||||||
if (PrintGCDetails && Verbose && !_adaptive_freelists) {
|
|
||||||
if (_smallLinearAllocBlock._word_size == 0)
|
|
||||||
warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
|
|
||||||
}
|
|
||||||
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
|
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
|
||||||
_promoInfo.stopTrackingPromotions();
|
_promoInfo.stopTrackingPromotions();
|
||||||
repairLinearAllocationBlocks();
|
repairLinearAllocationBlocks();
|
||||||
|
@ -2060,13 +1959,6 @@ CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support for concurrent collection policy decisions.
|
|
||||||
bool CompactibleFreeListSpace::should_concurrent_collect() const {
|
|
||||||
// In the future we might want to add in fragmentation stats --
|
|
||||||
// including erosion of the "mountain" into this decision as well.
|
|
||||||
return !adaptive_freelists() && linearAllocationWouldFail();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Support for compaction
|
// Support for compaction
|
||||||
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||||
scan_and_forward(this, cp);
|
scan_and_forward(this, cp);
|
||||||
|
|
|
@ -138,7 +138,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
// Linear allocation blocks
|
// Linear allocation blocks
|
||||||
LinearAllocBlock _smallLinearAllocBlock;
|
LinearAllocBlock _smallLinearAllocBlock;
|
||||||
|
|
||||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
|
|
||||||
AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks
|
AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks
|
||||||
|
|
||||||
// Indexed array for small size blocks
|
// Indexed array for small size blocks
|
||||||
|
@ -146,7 +145,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
|
|
||||||
// Allocation strategy
|
// Allocation strategy
|
||||||
bool _fitStrategy; // Use best fit strategy
|
bool _fitStrategy; // Use best fit strategy
|
||||||
bool _adaptive_freelists; // Use adaptive freelists
|
|
||||||
|
|
||||||
// This is an address close to the largest free chunk in the heap.
|
// This is an address close to the largest free chunk in the heap.
|
||||||
// It is currently assumed to be at the end of the heap. Free
|
// It is currently assumed to be at the end of the heap. Free
|
||||||
|
@ -204,10 +202,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
// strategy that attempts to keep the needed number of chunks in each
|
// strategy that attempts to keep the needed number of chunks in each
|
||||||
// indexed free lists.
|
// indexed free lists.
|
||||||
HeapWord* allocate_adaptive_freelists(size_t size);
|
HeapWord* allocate_adaptive_freelists(size_t size);
|
||||||
// Allocate from the linear allocation buffers first. This allocation
|
|
||||||
// strategy assumes maximal coalescing can maintain chunks large enough
|
|
||||||
// to be used as linear allocation buffers.
|
|
||||||
HeapWord* allocate_non_adaptive_freelists(size_t size);
|
|
||||||
|
|
||||||
// Gets a chunk from the linear allocation block (LinAB). If there
|
// Gets a chunk from the linear allocation block (LinAB). If there
|
||||||
// is not enough space in the LinAB, refills it.
|
// is not enough space in the LinAB, refills it.
|
||||||
|
@ -333,9 +327,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Constructor
|
// Constructor
|
||||||
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
|
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr);
|
||||||
bool use_adaptive_freelists,
|
|
||||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
|
|
||||||
// Accessors
|
// Accessors
|
||||||
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
|
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
|
||||||
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
|
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
|
||||||
|
@ -349,8 +341,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
// chunk exists, return NULL.
|
// chunk exists, return NULL.
|
||||||
FreeChunk* find_chunk_at_end();
|
FreeChunk* find_chunk_at_end();
|
||||||
|
|
||||||
bool adaptive_freelists() const { return _adaptive_freelists; }
|
|
||||||
|
|
||||||
void set_collector(CMSCollector* collector) { _collector = collector; }
|
void set_collector(CMSCollector* collector) { _collector = collector; }
|
||||||
|
|
||||||
// Support for parallelization of rescan and marking.
|
// Support for parallelization of rescan and marking.
|
||||||
|
@ -536,9 +526,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
|
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
|
||||||
bool coalesced);
|
bool coalesced);
|
||||||
|
|
||||||
// Support for decisions regarding concurrent collection policy.
|
|
||||||
bool should_concurrent_collect() const;
|
|
||||||
|
|
||||||
// Support for compaction.
|
// Support for compaction.
|
||||||
void prepare_for_compaction(CompactPoint* cp);
|
void prepare_for_compaction(CompactPoint* cp);
|
||||||
void adjust_pointers();
|
void adjust_pointers();
|
||||||
|
|
|
@ -190,9 +190,7 @@ class CMSParGCThreadState: public CHeapObj<mtGC> {
|
||||||
};
|
};
|
||||||
|
|
||||||
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||||
ReservedSpace rs, size_t initial_byte_size,
|
ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
|
||||||
CardTableRS* ct, bool use_adaptive_freelists,
|
|
||||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
|
|
||||||
CardGeneration(rs, initial_byte_size, ct),
|
CardGeneration(rs, initial_byte_size, ct),
|
||||||
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
|
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
|
||||||
_did_compact(false)
|
_did_compact(false)
|
||||||
|
@ -208,9 +206,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
||||||
_numWordsAllocated = 0;
|
_numWordsAllocated = 0;
|
||||||
)
|
)
|
||||||
|
|
||||||
_cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
|
_cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
|
||||||
use_adaptive_freelists,
|
|
||||||
dictionaryChoice);
|
|
||||||
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
|
NOT_PRODUCT(debug_cms_space = _cmsSpace;)
|
||||||
_cmsSpace->_old_gen = this;
|
_cmsSpace->_old_gen = this;
|
||||||
|
|
||||||
|
@ -1312,13 +1308,6 @@ bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (_cmsSpace->should_concurrent_collect()) {
|
|
||||||
if (PrintGCDetails && Verbose) {
|
|
||||||
gclog_or_tty->print(" %s: collect because cmsSpace says so ",
|
|
||||||
short_name());
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1766,9 +1755,8 @@ void CMSCollector::collect_in_background(GCCause::Cause cause) {
|
||||||
MutexLockerEx hl(Heap_lock, safepoint_check);
|
MutexLockerEx hl(Heap_lock, safepoint_check);
|
||||||
FreelistLocker fll(this);
|
FreelistLocker fll(this);
|
||||||
MutexLockerEx x(CGC_lock, safepoint_check);
|
MutexLockerEx x(CGC_lock, safepoint_check);
|
||||||
if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
|
if (_foregroundGCIsActive) {
|
||||||
// The foreground collector is active or we're
|
// The foreground collector is. Skip this
|
||||||
// not using asynchronous collections. Skip this
|
|
||||||
// background collection.
|
// background collection.
|
||||||
assert(!_foregroundGCShouldWait, "Should be clear");
|
assert(!_foregroundGCShouldWait, "Should be clear");
|
||||||
return;
|
return;
|
||||||
|
@ -5214,9 +5202,8 @@ void CMSCollector::do_remark_non_parallel() {
|
||||||
|
|
||||||
verify_work_stacks_empty();
|
verify_work_stacks_empty();
|
||||||
// Restore evacuated mark words, if any, used for overflow list links
|
// Restore evacuated mark words, if any, used for overflow list links
|
||||||
if (!CMSOverflowEarlyRestoration) {
|
|
||||||
restore_preserved_marks_if_any();
|
restore_preserved_marks_if_any();
|
||||||
}
|
|
||||||
verify_overflow_empty();
|
verify_overflow_empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6186,17 +6173,8 @@ void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
|
||||||
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
|
assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
|
||||||
assert(_collector->overflow_list_is_empty(),
|
assert(_collector->overflow_list_is_empty(),
|
||||||
"overflow list was drained above");
|
"overflow list was drained above");
|
||||||
// We could restore evacuated mark words, if any, used for
|
|
||||||
// overflow list links here because the overflow list is
|
assert(_collector->no_preserved_marks(),
|
||||||
// provably empty here. That would reduce the maximum
|
|
||||||
// size requirements for preserved_{oop,mark}_stack.
|
|
||||||
// But we'll just postpone it until we are all done
|
|
||||||
// so we can just stream through.
|
|
||||||
if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
|
|
||||||
_collector->restore_preserved_marks_if_any();
|
|
||||||
assert(_collector->no_preserved_marks(), "No preserved marks");
|
|
||||||
}
|
|
||||||
assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
|
|
||||||
"All preserved marks should have been restored above");
|
"All preserved marks should have been restored above");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7372,14 +7350,6 @@ void SweepClosure::initialize_free_range(HeapWord* freeFinger,
|
||||||
|
|
||||||
set_freeFinger(freeFinger);
|
set_freeFinger(freeFinger);
|
||||||
set_freeRangeInFreeLists(freeRangeInFreeLists);
|
set_freeRangeInFreeLists(freeRangeInFreeLists);
|
||||||
if (CMSTestInFreeList) {
|
|
||||||
if (freeRangeInFreeLists) {
|
|
||||||
FreeChunk* fc = (FreeChunk*) freeFinger;
|
|
||||||
assert(fc->is_free(), "A chunk on the free list should be free.");
|
|
||||||
assert(fc->size() > 0, "Free range should have a size");
|
|
||||||
assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note that the sweeper runs concurrently with mutators. Thus,
|
// Note that the sweeper runs concurrently with mutators. Thus,
|
||||||
|
@ -7532,12 +7502,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
|
||||||
|
|
||||||
void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
||||||
const size_t size = fc->size();
|
const size_t size = fc->size();
|
||||||
// Chunks that cannot be coalesced are not in the
|
|
||||||
// free lists.
|
|
||||||
if (CMSTestInFreeList && !fc->cantCoalesce()) {
|
|
||||||
assert(_sp->verify_chunk_in_free_list(fc),
|
|
||||||
"free chunk should be in free lists");
|
|
||||||
}
|
|
||||||
// a chunk that is already free, should not have been
|
// a chunk that is already free, should not have been
|
||||||
// marked in the bit map
|
// marked in the bit map
|
||||||
HeapWord* const addr = (HeapWord*) fc;
|
HeapWord* const addr = (HeapWord*) fc;
|
||||||
|
@ -7550,57 +7515,8 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
|
||||||
// See the definition of cantCoalesce().
|
// See the definition of cantCoalesce().
|
||||||
if (!fc->cantCoalesce()) {
|
if (!fc->cantCoalesce()) {
|
||||||
// This chunk can potentially be coalesced.
|
// This chunk can potentially be coalesced.
|
||||||
if (_sp->adaptive_freelists()) {
|
|
||||||
// All the work is done in
|
// All the work is done in
|
||||||
do_post_free_or_garbage_chunk(fc, size);
|
do_post_free_or_garbage_chunk(fc, size);
|
||||||
} else { // Not adaptive free lists
|
|
||||||
// this is a free chunk that can potentially be coalesced by the sweeper;
|
|
||||||
if (!inFreeRange()) {
|
|
||||||
// if the next chunk is a free block that can't be coalesced
|
|
||||||
// it doesn't make sense to remove this chunk from the free lists
|
|
||||||
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
|
|
||||||
assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
|
|
||||||
if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
|
|
||||||
nextChunk->is_free() && // ... which is free...
|
|
||||||
nextChunk->cantCoalesce()) { // ... but can't be coalesced
|
|
||||||
// nothing to do
|
|
||||||
} else {
|
|
||||||
// Potentially the start of a new free range:
|
|
||||||
// Don't eagerly remove it from the free lists.
|
|
||||||
// No need to remove it if it will just be put
|
|
||||||
// back again. (Also from a pragmatic point of view
|
|
||||||
// if it is a free block in a region that is beyond
|
|
||||||
// any allocated blocks, an assertion will fail)
|
|
||||||
// Remember the start of a free run.
|
|
||||||
initialize_free_range(addr, true);
|
|
||||||
// end - can coalesce with next chunk
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// the midst of a free range, we are coalescing
|
|
||||||
print_free_block_coalesced(fc);
|
|
||||||
if (CMSTraceSweeper) {
|
|
||||||
gclog_or_tty->print(" -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
|
|
||||||
}
|
|
||||||
// remove it from the free lists
|
|
||||||
_sp->removeFreeChunkFromFreeLists(fc);
|
|
||||||
set_lastFreeRangeCoalesced(true);
|
|
||||||
// If the chunk is being coalesced and the current free range is
|
|
||||||
// in the free lists, remove the current free range so that it
|
|
||||||
// will be returned to the free lists in its entirety - all
|
|
||||||
// the coalesced pieces included.
|
|
||||||
if (freeRangeInFreeLists()) {
|
|
||||||
FreeChunk* ffc = (FreeChunk*) freeFinger();
|
|
||||||
assert(ffc->size() == pointer_delta(addr, freeFinger()),
|
|
||||||
"Size of free range is inconsistent with chunk size.");
|
|
||||||
if (CMSTestInFreeList) {
|
|
||||||
assert(_sp->verify_chunk_in_free_list(ffc),
|
|
||||||
"free range is not in free lists");
|
|
||||||
}
|
|
||||||
_sp->removeFreeChunkFromFreeLists(ffc);
|
|
||||||
set_freeRangeInFreeLists(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Note that if the chunk is not coalescable (the else arm
|
// Note that if the chunk is not coalescable (the else arm
|
||||||
// below), we unconditionally flush, without needing to do
|
// below), we unconditionally flush, without needing to do
|
||||||
// a "lookahead," as we do below.
|
// a "lookahead," as we do below.
|
||||||
|
@ -7626,46 +7542,11 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
|
||||||
HeapWord* const addr = (HeapWord*) fc;
|
HeapWord* const addr = (HeapWord*) fc;
|
||||||
const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
|
const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
|
||||||
|
|
||||||
if (_sp->adaptive_freelists()) {
|
|
||||||
// Verify that the bit map has no bits marked between
|
// Verify that the bit map has no bits marked between
|
||||||
// addr and purported end of just dead object.
|
// addr and purported end of just dead object.
|
||||||
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
||||||
|
|
||||||
do_post_free_or_garbage_chunk(fc, size);
|
do_post_free_or_garbage_chunk(fc, size);
|
||||||
} else {
|
|
||||||
if (!inFreeRange()) {
|
|
||||||
// start of a new free range
|
|
||||||
assert(size > 0, "A free range should have a size");
|
|
||||||
initialize_free_range(addr, false);
|
|
||||||
} else {
|
|
||||||
// this will be swept up when we hit the end of the
|
|
||||||
// free range
|
|
||||||
if (CMSTraceSweeper) {
|
|
||||||
gclog_or_tty->print(" -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
|
|
||||||
}
|
|
||||||
// If the chunk is being coalesced and the current free range is
|
|
||||||
// in the free lists, remove the current free range so that it
|
|
||||||
// will be returned to the free lists in its entirety - all
|
|
||||||
// the coalesced pieces included.
|
|
||||||
if (freeRangeInFreeLists()) {
|
|
||||||
FreeChunk* ffc = (FreeChunk*)freeFinger();
|
|
||||||
assert(ffc->size() == pointer_delta(addr, freeFinger()),
|
|
||||||
"Size of free range is inconsistent with chunk size.");
|
|
||||||
if (CMSTestInFreeList) {
|
|
||||||
assert(_sp->verify_chunk_in_free_list(ffc),
|
|
||||||
"free range is not in free lists");
|
|
||||||
}
|
|
||||||
_sp->removeFreeChunkFromFreeLists(ffc);
|
|
||||||
set_freeRangeInFreeLists(false);
|
|
||||||
}
|
|
||||||
set_lastFreeRangeCoalesced(true);
|
|
||||||
}
|
|
||||||
// this will be swept up when we hit the end of the free range
|
|
||||||
|
|
||||||
// Verify that the bit map has no bits marked between
|
|
||||||
// addr and purported end of just dead object.
|
|
||||||
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
|
|
||||||
}
|
|
||||||
assert(_limit >= addr + size,
|
assert(_limit >= addr + size,
|
||||||
"A freshly garbage chunk can't possibly straddle over _limit");
|
"A freshly garbage chunk can't possibly straddle over _limit");
|
||||||
if (inFreeRange()) lookahead_and_flush(fc, size);
|
if (inFreeRange()) lookahead_and_flush(fc, size);
|
||||||
|
@ -7727,11 +7608,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
||||||
// do_post_free_or_garbage_chunk() should only be called in the case
|
// do_post_free_or_garbage_chunk() should only be called in the case
|
||||||
// of the adaptive free list allocator.
|
// of the adaptive free list allocator.
|
||||||
const bool fcInFreeLists = fc->is_free();
|
const bool fcInFreeLists = fc->is_free();
|
||||||
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
|
|
||||||
assert((HeapWord*)fc <= _limit, "sweep invariant");
|
assert((HeapWord*)fc <= _limit, "sweep invariant");
|
||||||
if (CMSTestInFreeList && fcInFreeLists) {
|
|
||||||
assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (CMSTraceSweeper) {
|
if (CMSTraceSweeper) {
|
||||||
gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
|
gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
|
||||||
|
@ -7784,10 +7661,6 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
|
||||||
FreeChunk* const ffc = (FreeChunk*)freeFinger();
|
FreeChunk* const ffc = (FreeChunk*)freeFinger();
|
||||||
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
|
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
|
||||||
"Size of free range is inconsistent with chunk size.");
|
"Size of free range is inconsistent with chunk size.");
|
||||||
if (CMSTestInFreeList) {
|
|
||||||
assert(_sp->verify_chunk_in_free_list(ffc),
|
|
||||||
"Chunk is not in free lists");
|
|
||||||
}
|
|
||||||
_sp->coalDeath(ffc->size());
|
_sp->coalDeath(ffc->size());
|
||||||
_sp->removeFreeChunkFromFreeLists(ffc);
|
_sp->removeFreeChunkFromFreeLists(ffc);
|
||||||
set_freeRangeInFreeLists(false);
|
set_freeRangeInFreeLists(false);
|
||||||
|
@ -7856,12 +7729,6 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
|
||||||
assert(size > 0,
|
assert(size > 0,
|
||||||
"A zero sized chunk cannot be added to the free lists.");
|
"A zero sized chunk cannot be added to the free lists.");
|
||||||
if (!freeRangeInFreeLists()) {
|
if (!freeRangeInFreeLists()) {
|
||||||
if (CMSTestInFreeList) {
|
|
||||||
FreeChunk* fc = (FreeChunk*) chunk;
|
|
||||||
fc->set_size(size);
|
|
||||||
assert(!_sp->verify_chunk_in_free_list(fc),
|
|
||||||
"chunk should not be in free lists yet");
|
|
||||||
}
|
|
||||||
if (CMSTraceSweeper) {
|
if (CMSTraceSweeper) {
|
||||||
gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
|
gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
|
||||||
p2i(chunk), size);
|
p2i(chunk), size);
|
||||||
|
|
|
@ -1076,10 +1076,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
void assert_correct_size_change_locking();
|
void assert_correct_size_change_locking();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
|
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct);
|
||||||
CardTableRS* ct,
|
|
||||||
bool use_adaptive_freelists,
|
|
||||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
|
|
||||||
|
|
||||||
// Accessors
|
// Accessors
|
||||||
CMSCollector* collector() const { return _collector; }
|
CMSCollector* collector() const { return _collector; }
|
||||||
|
|
|
@ -138,7 +138,6 @@ class VM_GenCollectFullConcurrent: public VM_GC_Operation {
|
||||||
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
|
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
|
||||||
{
|
{
|
||||||
assert(FullGCCount_lock != NULL, "Error");
|
assert(FullGCCount_lock != NULL, "Error");
|
||||||
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
|
|
||||||
}
|
}
|
||||||
~VM_GenCollectFullConcurrent() {}
|
~VM_GenCollectFullConcurrent() {}
|
||||||
virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
|
virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
|
||||||
|
|
|
@ -58,9 +58,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, CardTableRS* remset) {
|
||||||
// else registers with an existing CMSCollector
|
// else registers with an existing CMSCollector
|
||||||
|
|
||||||
ConcurrentMarkSweepGeneration* g = NULL;
|
ConcurrentMarkSweepGeneration* g = NULL;
|
||||||
g = new ConcurrentMarkSweepGeneration(rs,
|
g = new ConcurrentMarkSweepGeneration(rs, init_size(), remset);
|
||||||
init_size(), remset, UseCMSAdaptiveFreeLists,
|
|
||||||
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
|
|
||||||
|
|
||||||
g->initialize_performance_counters();
|
g->initialize_performance_counters();
|
||||||
|
|
||||||
|
|
|
@ -2469,16 +2469,6 @@ bool Arguments::check_vm_args_consistency() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: only executed in non-PRODUCT mode
|
|
||||||
if (!UseAsyncConcMarkSweepGC &&
|
|
||||||
(ExplicitGCInvokesConcurrent ||
|
|
||||||
ExplicitGCInvokesConcurrentAndUnloadsClasses)) {
|
|
||||||
jio_fprintf(defaultStream::error_stream(),
|
|
||||||
"error: +ExplicitGCInvokesConcurrent[AndUnloadsClasses] conflicts"
|
|
||||||
" with -UseAsyncConcMarkSweepGC");
|
|
||||||
status = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (PrintNMTStatistics) {
|
if (PrintNMTStatistics) {
|
||||||
#if INCLUDE_NMT
|
#if INCLUDE_NMT
|
||||||
if (MemTracker::tracking_level() == NMT_off) {
|
if (MemTracker::tracking_level() == NMT_off) {
|
||||||
|
|
|
@ -1622,12 +1622,6 @@ public:
|
||||||
"Number of times to retry allocations when " \
|
"Number of times to retry allocations when " \
|
||||||
"blocked by the GC locker") \
|
"blocked by the GC locker") \
|
||||||
\
|
\
|
||||||
develop(bool, UseCMSAdaptiveFreeLists, true, \
|
|
||||||
"Use adaptive free lists in the CMS generation") \
|
|
||||||
\
|
|
||||||
develop(bool, UseAsyncConcMarkSweepGC, true, \
|
|
||||||
"Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
|
|
||||||
\
|
|
||||||
product(bool, UseCMSBestFit, true, \
|
product(bool, UseCMSBestFit, true, \
|
||||||
"Use CMS best fit allocation strategy") \
|
"Use CMS best fit allocation strategy") \
|
||||||
\
|
\
|
||||||
|
@ -1822,10 +1816,6 @@ public:
|
||||||
"When CMS class unloading is enabled, the maximum CMS cycle " \
|
"When CMS class unloading is enabled, the maximum CMS cycle " \
|
||||||
"count for which classes may not be unloaded") \
|
"count for which classes may not be unloaded") \
|
||||||
\
|
\
|
||||||
develop(intx, CMSDictionaryChoice, 0, \
|
|
||||||
"Use BinaryTreeDictionary as default in the CMS generation") \
|
|
||||||
range(0, 2) \
|
|
||||||
\
|
|
||||||
product(uintx, CMSIndexedFreeListReplenish, 4, \
|
product(uintx, CMSIndexedFreeListReplenish, 4, \
|
||||||
"Replenish an indexed free list with this number of chunks") \
|
"Replenish an indexed free list with this number of chunks") \
|
||||||
range(1, max_uintx) \
|
range(1, max_uintx) \
|
||||||
|
@ -1840,9 +1830,6 @@ public:
|
||||||
product(bool, CMSLoopWarn, false, \
|
product(bool, CMSLoopWarn, false, \
|
||||||
"Warn in case of excessive CMS looping") \
|
"Warn in case of excessive CMS looping") \
|
||||||
\
|
\
|
||||||
develop(bool, CMSOverflowEarlyRestoration, false, \
|
|
||||||
"Restore preserved marks early") \
|
|
||||||
\
|
|
||||||
/* where does the range max value of (max_jint - 1) come from? */ \
|
/* where does the range max value of (max_jint - 1) come from? */ \
|
||||||
product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
|
product(size_t, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
|
||||||
"Maximum size of marking stack") \
|
"Maximum size of marking stack") \
|
||||||
|
@ -2080,10 +2067,6 @@ public:
|
||||||
"unloading of classes when class unloading is enabled") \
|
"unloading of classes when class unloading is enabled") \
|
||||||
range(0, 100) \
|
range(0, 100) \
|
||||||
\
|
\
|
||||||
develop(bool, CMSTestInFreeList, false, \
|
|
||||||
"Check if the coalesced range is already in the " \
|
|
||||||
"free lists as claimed") \
|
|
||||||
\
|
|
||||||
notproduct(bool, CMSVerifyReturnedBytes, false, \
|
notproduct(bool, CMSVerifyReturnedBytes, false, \
|
||||||
"Check that all the garbage collected was returned to the " \
|
"Check that all the garbage collected was returned to the " \
|
||||||
"free lists") \
|
"free lists") \
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue