7164144: Fix variable naming style in freeBlockDictionary.* and binaryTreeDictionary*

Fix naming style to be consistent with the predominant hotspot style.

Reviewed-by: ysr, brutisso
This commit is contained in:
Jon Masamitsu 2012-04-25 09:55:55 -07:00
parent f5558edf7b
commit b63f7f3a18
14 changed files with 620 additions and 620 deletions

View file

@ -119,7 +119,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
// moved to its new location before the klass is moved.
// Set the _refillSize for the linear allocation blocks
if (!use_adaptive_freelists) {
FreeChunk* fc = _dictionary->getChunk(mr.word_size());
FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
// The small linAB initially has all the space and will allocate
// a chunk of any size.
HeapWord* addr = (HeapWord*) fc;
@ -275,12 +275,12 @@ void CompactibleFreeListSpace::reset(MemRegion mr) {
assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
_bt.single_block(mr.start(), mr.word_size());
FreeChunk* fc = (FreeChunk*) mr.start();
fc->setSize(mr.word_size());
fc->set_size(mr.word_size());
if (mr.word_size() >= IndexSetSize ) {
returnChunkToDictionary(fc);
} else {
_bt.verify_not_unallocated((HeapWord*)fc, fc->size());
_indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
_indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
}
}
_promoInfo.reset();
@ -298,7 +298,7 @@ void CompactibleFreeListSpace::reset_after_compaction() {
} else {
// Place as much of mr in the linAB as we can get,
// provided it was big enough to go into the dictionary.
FreeChunk* fc = dictionary()->findLargestDict();
FreeChunk* fc = dictionary()->find_largest_dict();
if (fc != NULL) {
assert(fc->size() == mr.word_size(),
"Why was the chunk broken up?");
@ -325,14 +325,14 @@ FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
#ifndef PRODUCT
void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
_indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
_indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
}
}
size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
size_t sum = 0;
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
}
return sum;
}
@ -356,7 +356,7 @@ size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
size_t CompactibleFreeListSpace::totalCount() {
size_t num = totalCountInIndexedFreeLists();
num += dictionary()->totalCount();
num += dictionary()->total_count();
if (_smallLinearAllocBlock._word_size != 0) {
num++;
}
@ -366,7 +366,7 @@ size_t CompactibleFreeListSpace::totalCount() {
bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
FreeChunk* fc = (FreeChunk*) p;
return fc->isFree();
return fc->is_free();
}
size_t CompactibleFreeListSpace::used() const {
@ -393,7 +393,7 @@ size_t CompactibleFreeListSpace::free() const {
// that supports jvmstat, and you are apt to see the values
// flicker in such cases.
assert(_dictionary != NULL, "No _dictionary?");
return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
totalSizeInIndexedFreeLists() +
_smallLinearAllocBlock._word_size) * HeapWordSize;
}
@ -401,7 +401,7 @@ size_t CompactibleFreeListSpace::free() const {
size_t CompactibleFreeListSpace::max_alloc_in_words() const {
assert(_dictionary != NULL, "No _dictionary?");
assert_locked();
size_t res = _dictionary->maxChunkSize();
size_t res = _dictionary->max_chunk_size();
res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
(size_t) SmallForLinearAlloc - 1));
// XXX the following could potentially be pretty slow;
@ -469,7 +469,7 @@ const {
void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
const {
_dictionary->reportStatistics();
_dictionary->report_statistics();
st->print_cr("Layout of Freelists in Tree");
st->print_cr("---------------------------");
_dictionary->print_free_lists(st);
@ -547,12 +547,12 @@ void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
void CompactibleFreeListSpace::reportFreeListStatistics() const {
assert_lock_strong(&_freelistLock);
assert(PrintFLSStatistics != 0, "Reporting error");
_dictionary->reportStatistics();
_dictionary->report_statistics();
if (PrintFLSStatistics > 1) {
reportIndexedFreeListStatistics();
size_t totalSize = totalSizeInIndexedFreeLists() +
_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
size_t total_size = totalSizeInIndexedFreeLists() +
_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
}
}
@ -560,13 +560,13 @@ void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
assert_lock_strong(&_freelistLock);
gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
"--------------------------------\n");
size_t totalSize = totalSizeInIndexedFreeLists();
size_t freeBlocks = numFreeBlocksInIndexedFreeLists();
gclog_or_tty->print("Total Free Space: %d\n", totalSize);
size_t total_size = totalSizeInIndexedFreeLists();
size_t free_blocks = numFreeBlocksInIndexedFreeLists();
gclog_or_tty->print("Total Free Space: %d\n", total_size);
gclog_or_tty->print("Max Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
if (freeBlocks != 0) {
gclog_or_tty->print("Av. Block Size: %d\n", totalSize/freeBlocks);
gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
if (free_blocks != 0) {
gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
}
}
@ -913,7 +913,7 @@ CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
for (addr = bottom(), last = end();
addr < last; addr += size) {
FreeChunk* fc = (FreeChunk*)addr;
if (fc->isFree()) {
if (fc->is_free()) {
// Since we hold the free list lock, which protects direct
// allocation in this generation by mutators, a free object
// will remain free throughout this iteration code.
@ -955,7 +955,7 @@ CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
for (addr = block_start_careful(mr.start()), end = mr.end();
addr < end; addr += size) {
FreeChunk* fc = (FreeChunk*)addr;
if (fc->isFree()) {
if (fc->is_free()) {
// Since we hold the free list lock, which protects direct
// allocation in this generation by mutators, a free object
// will remain free throughout this iteration code.
@ -1071,7 +1071,7 @@ size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
NOT_PRODUCT(verify_objects_initialized());
assert(MemRegion(bottom(), end()).contains(p), "p not in space");
FreeChunk* fc = (FreeChunk*)p;
if (fc->isFree()) {
if (fc->is_free()) {
return fc->size();
} else {
// Ignore mark word because this may be a recently promoted
@ -1162,7 +1162,7 @@ bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
FreeChunk* fc = (FreeChunk*)p;
assert(is_in_reserved(p), "Should be in space");
assert(_bt.block_start(p) == p, "Should be a block boundary");
if (!fc->isFree()) {
if (!fc->is_free()) {
// Ignore mark word because it may have been used to
// chain together promoted objects (the last one
// would have a null value).
@ -1224,7 +1224,7 @@ HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
FreeChunk* fc = (FreeChunk*)res;
fc->markNotFree();
assert(!fc->isFree(), "shouldn't be marked free");
assert(!fc->is_free(), "shouldn't be marked free");
assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
// Verify that the block offset table shows this to
// be a single block, but not one which is unallocated.
@ -1336,7 +1336,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
FreeList<FreeChunk>* fl = &_indexedFreeList[i];
if (fl->head()) {
ret = getFromListGreater(fl, numWords);
assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
return ret;
}
}
@ -1347,7 +1347,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
/* Try to get a chunk that satisfies request, while avoiding
fragmentation that can't be handled. */
{
ret = dictionary()->getChunk(currSize);
ret = dictionary()->get_chunk(currSize);
if (ret != NULL) {
assert(ret->size() - numWords >= MinChunkSize,
"Chunk is too small");
@ -1355,10 +1355,10 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
/* Carve returned chunk. */
(void) splitChunkAndReturnRemainder(ret, numWords);
/* Label this as no longer a free chunk. */
assert(ret->isFree(), "This chunk should be free");
ret->linkPrev(NULL);
assert(ret->is_free(), "This chunk should be free");
ret->link_prev(NULL);
}
assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
return ret;
}
ShouldNotReachHere();
@ -1366,7 +1366,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
assert(fc->size() < IndexSetSize, "Size of chunk is too large");
return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
}
bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
@ -1380,13 +1380,13 @@ bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc)
// Check if the purported free chunk is present either as a linear
// allocation block, the size-indexed table of (smaller) free blocks,
// or the larger free blocks kept in the binary tree dictionary.
bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
if (verify_chunk_is_linear_alloc_block(fc)) {
return true;
} else if (fc->size() < IndexSetSize) {
return verifyChunkInIndexedFreeLists(fc);
} else {
return dictionary()->verifyChunkInFreeLists(fc);
return dictionary()->verify_chunk_in_free_list(fc);
}
}
@ -1414,7 +1414,7 @@ FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
}
if (fc != NULL) {
fc->dontCoalesce();
assert(fc->isFree(), "Should be free, but not coalescable");
assert(fc->is_free(), "Should be free, but not coalescable");
// Verify that the block offset table shows this to
// be a single block, but not one which is unallocated.
_bt.verify_single_block((HeapWord*)fc, fc->size());
@ -1494,7 +1494,7 @@ CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
}
// Return the chunk that isn't big enough, and then refill below.
addChunkToFreeLists(blk->_ptr, sz);
splitBirth(sz);
split_birth(sz);
// Don't keep statistics on adding back chunk from a LinAB.
} else {
// A refilled block would not satisfy the request.
@ -1506,14 +1506,14 @@ CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
"block was replenished");
if (res != NULL) {
splitBirth(size);
split_birth(size);
repairLinearAllocBlock(blk);
} else if (blk->_ptr != NULL) {
res = blk->_ptr;
size_t blk_size = blk->_word_size;
blk->_word_size -= size;
blk->_ptr += size;
splitBirth(size);
split_birth(size);
repairLinearAllocBlock(blk);
// Update BOT last so that other (parallel) GC threads see a consistent
// view of the BOT and free blocks.
@ -1542,7 +1542,7 @@ HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
size_t blk_size = blk->_word_size;
blk->_word_size -= size;
blk->_ptr += size;
splitBirth(size);
split_birth(size);
repairLinearAllocBlock(blk);
// Update BOT last so that other (parallel) GC threads see a consistent
// view of the BOT and free blocks.
@ -1559,7 +1559,7 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
assert_locked();
assert(size < SmallForDictionary, "just checking");
FreeChunk* res;
res = _indexedFreeList[size].getChunkAtHead();
res = _indexedFreeList[size].get_chunk_at_head();
if (res == NULL) {
res = getChunkFromIndexedFreeListHelper(size);
}
@ -1593,7 +1593,7 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
// Do not replenish from an underpopulated size.
if (_indexedFreeList[replenish_size].surplus() > 0 &&
_indexedFreeList[replenish_size].head() != NULL) {
newFc = _indexedFreeList[replenish_size].getChunkAtHead();
newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
} else if (bestFitFirst()) {
newFc = bestFitSmall(replenish_size);
}
@ -1626,13 +1626,13 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
i < (num_blk - 1);
curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
i++) {
curFc->setSize(size);
curFc->set_size(size);
// Don't record this as a return in order to try and
// determine the "returns" from a GC.
_bt.verify_not_unallocated((HeapWord*) fc, size);
_indexedFreeList[size].returnChunkAtTail(curFc, false);
_indexedFreeList[size].return_chunk_at_tail(curFc, false);
_bt.mark_block((HeapWord*)curFc, size);
splitBirth(size);
split_birth(size);
// Don't record the initial population of the indexed list
// as a split birth.
}
@ -1640,9 +1640,9 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
// check that the arithmetic was OK above
assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
"inconsistency in carving newFc");
curFc->setSize(size);
curFc->set_size(size);
_bt.mark_block((HeapWord*)curFc, size);
splitBirth(size);
split_birth(size);
fc = curFc;
} else {
// Return entire block to caller
@ -1655,14 +1655,14 @@ CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
// replenish the indexed free list.
fc = getChunkFromDictionaryExact(size);
}
// assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
// assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
return fc;
}
FreeChunk*
CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
assert_locked();
FreeChunk* fc = _dictionary->getChunk(size);
FreeChunk* fc = _dictionary->get_chunk(size);
if (fc == NULL) {
return NULL;
}
@ -1679,7 +1679,7 @@ CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
FreeChunk*
CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
assert_locked();
FreeChunk* fc = _dictionary->getChunk(size);
FreeChunk* fc = _dictionary->get_chunk(size);
if (fc == NULL) {
return fc;
}
@ -1688,11 +1688,11 @@ CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
_bt.verify_single_block((HeapWord*)fc, size);
return fc;
}
assert(fc->size() > size, "getChunk() guarantee");
assert(fc->size() > size, "get_chunk() guarantee");
if (fc->size() < size + MinChunkSize) {
// Return the chunk to the dictionary and go get a bigger one.
returnChunkToDictionary(fc);
fc = _dictionary->getChunk(size + MinChunkSize);
fc = _dictionary->get_chunk(size + MinChunkSize);
if (fc == NULL) {
return NULL;
}
@ -1713,7 +1713,7 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
_bt.verify_single_block((HeapWord*)chunk, size);
// adjust _unallocated_block downward, as necessary
_bt.freed((HeapWord*)chunk, size);
_dictionary->returnChunk(chunk);
_dictionary->return_chunk(chunk);
#ifndef PRODUCT
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
@ -1728,9 +1728,9 @@ CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
_bt.verify_single_block((HeapWord*) fc, size);
_bt.verify_not_unallocated((HeapWord*) fc, size);
if (_adaptive_freelists) {
_indexedFreeList[size].returnChunkAtTail(fc);
_indexedFreeList[size].return_chunk_at_tail(fc);
} else {
_indexedFreeList[size].returnChunkAtHead(fc);
_indexedFreeList[size].return_chunk_at_head(fc);
}
#ifndef PRODUCT
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
@ -1758,7 +1758,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
FreeChunk* ec;
{
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
ec = dictionary()->findLargestDict(); // get largest block
ec = dictionary()->find_largest_dict(); // get largest block
if (ec != NULL && ec->end() == chunk) {
// It's a coterminal block - we can coalesce.
size_t old_size = ec->size();
@ -1769,7 +1769,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
ec = (FreeChunk*)chunk;
}
}
ec->setSize(size);
ec->set_size(size);
debug_only(ec->mangleFreed(size));
if (size < SmallForDictionary) {
lock = _indexedFreeListParLocks[size];
@ -1792,7 +1792,7 @@ CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
_bt.verify_single_block(chunk, size);
FreeChunk* fc = (FreeChunk*) chunk;
fc->setSize(size);
fc->set_size(size);
debug_only(fc->mangleFreed(size));
if (size < SmallForDictionary) {
returnChunkToFreeList(fc);
@ -1835,7 +1835,7 @@ CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
assert_locked();
assert(fc != NULL, "null chunk");
_bt.verify_single_block((HeapWord*)fc, size);
_dictionary->removeChunk(fc);
_dictionary->remove_chunk(fc);
// adjust _unallocated_block upward, as necessary
_bt.allocated((HeapWord*)fc, size);
}
@ -1850,7 +1850,7 @@ CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
verifyIndexedFreeList(size);
}
)
_indexedFreeList[size].removeChunk(fc);
_indexedFreeList[size].remove_chunk(fc);
NOT_PRODUCT(
if (FLSVerifyIndexTable) {
verifyIndexedFreeList(size);
@ -1874,7 +1874,7 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
// and split out a free chunk which is returned.
_indexedFreeList[start].set_hint(hint);
FreeChunk* res = getFromListGreater(fl, numWords);
assert(res == NULL || res->isFree(),
assert(res == NULL || res->is_free(),
"Should be returning a free chunk");
return res;
}
@ -1896,13 +1896,13 @@ FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
assert(oldNumWords >= numWords + MinChunkSize,
"Size of chunks in the list is too small");
fl->removeChunk(curr);
fl->remove_chunk(curr);
// recorded indirectly by splitChunkAndReturnRemainder -
// smallSplit(oldNumWords, numWords);
FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
// Does anything have to be done for the remainder in terms of
// fixing the card table?
assert(new_chunk == NULL || new_chunk->isFree(),
assert(new_chunk == NULL || new_chunk->is_free(),
"Should be returning a free chunk");
return new_chunk;
}
@ -1920,13 +1920,13 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
assert(is_aligned(ffc), "alignment problem");
ffc->setSize(rem_size);
ffc->linkNext(NULL);
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ffc->set_size(rem_size);
ffc->link_next(NULL);
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
// Above must occur before BOT is updated below.
// adjust block offset table
OrderAccess::storestore();
assert(chunk->isFree() && ffc->isFree(), "Error");
assert(chunk->is_free() && ffc->is_free(), "Error");
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
if (rem_size < SmallForDictionary) {
bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
@ -1941,7 +1941,7 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
returnChunkToDictionary(ffc);
split(size ,rem_size);
}
chunk->setSize(new_size);
chunk->set_size(new_size);
return chunk;
}
@ -2048,10 +2048,10 @@ void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
"Minimum block size requirement");
FreeChunk* fc = (FreeChunk*)(blk->_ptr);
fc->setSize(blk->_word_size);
fc->linkPrev(NULL); // mark as free
fc->set_size(blk->_word_size);
fc->link_prev(NULL); // mark as free
fc->dontCoalesce();
assert(fc->isFree(), "just marked it free");
assert(fc->is_free(), "just marked it free");
assert(fc->cantCoalesce(), "just marked it uncoalescable");
}
}
@ -2151,7 +2151,7 @@ double CompactibleFreeListSpace::flsFrag() const {
}
double totFree = itabFree +
_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
if (totFree > 0) {
frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
(totFree * totFree));
@ -2174,11 +2174,11 @@ void CompactibleFreeListSpace::beginSweepFLCensus(
gclog_or_tty->print("size[%d] : ", i);
}
fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
fl->set_beforeSweep(fl->count());
fl->set_bfrSurp(fl->surplus());
fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
fl->set_before_sweep(fl->count());
fl->set_bfr_surp(fl->surplus());
}
_dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
_dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
inter_sweep_current,
inter_sweep_estimate,
intra_sweep_estimate);
@ -2212,17 +2212,17 @@ void CompactibleFreeListSpace::clearFLCensus() {
size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_prevSweep(fl->count());
fl->set_coalBirths(0);
fl->set_coalDeaths(0);
fl->set_splitBirths(0);
fl->set_splitDeaths(0);
fl->set_prev_sweep(fl->count());
fl->set_coal_births(0);
fl->set_coal_deaths(0);
fl->set_split_births(0);
fl->set_split_deaths(0);
}
}
void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
if (PrintFLSStatistics > 0) {
HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
largestAddr);
}
@ -2233,30 +2233,30 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
}
clearFLCensus();
assert_locked();
_dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
_dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
}
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
if (size < SmallForDictionary) {
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
return (fl->coalDesired() < 0) ||
((int)fl->count() > fl->coalDesired());
return (fl->coal_desired() < 0) ||
((int)fl->count() > fl->coal_desired());
} else {
return dictionary()->coalDictOverPopulated(size);
return dictionary()->coal_dict_over_populated(size);
}
}
void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coalBirths();
fl->increment_coal_births();
fl->increment_surplus();
}
void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coalDeaths();
fl->increment_coal_deaths();
fl->decrement_surplus();
}
@ -2264,7 +2264,7 @@ void CompactibleFreeListSpace::coalBirth(size_t size) {
if (size < SmallForDictionary) {
smallCoalBirth(size);
} else {
dictionary()->dictCensusUpdate(size,
dictionary()->dict_census_udpate(size,
false /* split */,
true /* birth */);
}
@ -2274,7 +2274,7 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
if(size < SmallForDictionary) {
smallCoalDeath(size);
} else {
dictionary()->dictCensusUpdate(size,
dictionary()->dict_census_udpate(size,
false /* split */,
false /* birth */);
}
@ -2283,22 +2283,22 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_splitBirths();
fl->increment_split_births();
fl->increment_surplus();
}
void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_splitDeaths();
fl->increment_split_deaths();
fl->decrement_surplus();
}
void CompactibleFreeListSpace::splitBirth(size_t size) {
void CompactibleFreeListSpace::split_birth(size_t size) {
if (size < SmallForDictionary) {
smallSplitBirth(size);
} else {
dictionary()->dictCensusUpdate(size,
dictionary()->dict_census_udpate(size,
true /* split */,
true /* birth */);
}
@ -2308,7 +2308,7 @@ void CompactibleFreeListSpace::splitDeath(size_t size) {
if (size < SmallForDictionary) {
smallSplitDeath(size);
} else {
dictionary()->dictCensusUpdate(size,
dictionary()->dict_census_udpate(size,
true /* split */,
false /* birth */);
}
@ -2317,8 +2317,8 @@ void CompactibleFreeListSpace::splitDeath(size_t size) {
void CompactibleFreeListSpace::split(size_t from, size_t to1) {
size_t to2 = from - to1;
splitDeath(from);
splitBirth(to1);
splitBirth(to2);
split_birth(to1);
split_birth(to2);
}
void CompactibleFreeListSpace::print() const {
@ -2364,7 +2364,7 @@ class VerifyAllBlksClosure: public BlkClosure {
FreeChunk* fc = (FreeChunk*)addr;
res = fc->size();
if (FLSVerifyLists && !fc->cantCoalesce()) {
guarantee(_sp->verifyChunkInFreeLists(fc),
guarantee(_sp->verify_chunk_in_free_list(fc),
"Chunk should be on a free list");
}
}
@ -2520,7 +2520,7 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
"Slot should have been empty");
for (; fc != NULL; fc = fc->next(), n++) {
guarantee(fc->size() == size, "Size inconsistency");
guarantee(fc->isFree(), "!free?");
guarantee(fc->is_free(), "!free?");
guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
}
@ -2529,7 +2529,7 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
#ifndef PRODUCT
void CompactibleFreeListSpace::check_free_list_consistency() const {
assert(_dictionary->minSize() <= IndexSetSize,
assert(_dictionary->min_size() <= IndexSetSize,
"Some sizes can't be allocated without recourse to"
" linear allocation buffers");
assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
@ -2548,33 +2548,33 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
FreeList<FreeChunk> total;
gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
size_t totalFree = 0;
size_t total_free = 0;
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
totalFree += fl->count() * fl->size();
total_free += fl->count() * fl->size();
if (i % (40*IndexSetStride) == 0) {
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
}
fl->print_on(gclog_or_tty);
total.set_bfrSurp( total.bfrSurp() + fl->bfrSurp() );
total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
total.set_surplus( total.surplus() + fl->surplus() );
total.set_desired( total.desired() + fl->desired() );
total.set_prevSweep( total.prevSweep() + fl->prevSweep() );
total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
total.set_before_sweep(total.before_sweep() + fl->before_sweep());
total.set_count( total.count() + fl->count() );
total.set_coalBirths( total.coalBirths() + fl->coalBirths() );
total.set_coalDeaths( total.coalDeaths() + fl->coalDeaths() );
total.set_splitBirths(total.splitBirths() + fl->splitBirths());
total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
total.set_coal_births( total.coal_births() + fl->coal_births() );
total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
total.set_split_births(total.split_births() + fl->split_births());
total.set_split_deaths(total.split_deaths() + fl->split_deaths());
}
total.print_on(gclog_or_tty, "TOTAL");
gclog_or_tty->print_cr("Total free in indexed lists "
SIZE_FORMAT " words", totalFree);
SIZE_FORMAT " words", total_free);
gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n",
(double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
(total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
(double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
(total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
(double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
_dictionary->printDictCensus();
_dictionary->print_dict_census();
}
///////////////////////////////////////////////////////////////////////////
@ -2643,11 +2643,11 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
// If it didn't work, give up.
if (fl->count() == 0) return NULL;
}
res = fl->getChunkAtHead();
res = fl->get_chunk_at_head();
assert(res != NULL, "Why was count non-zero?");
}
res->markNotFree();
assert(!res->isFree(), "shouldn't be marked free");
assert(!res->is_free(), "shouldn't be marked free");
assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
// mangle a just allocated object with a distinct pattern.
debug_only(res->mangleAllocated(word_sz));
@ -2786,9 +2786,9 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// we increment the split death count by the number of blocks
// we just took from the cur_sz-size blocks list and which
// we will be splitting below.
ssize_t deaths = gfl->splitDeaths() +
ssize_t deaths = gfl->split_deaths() +
fl_for_cur_sz.count();
gfl->set_splitDeaths(deaths);
gfl->set_split_deaths(deaths);
}
}
}
@ -2799,21 +2799,21 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
} else {
// Divide each block on fl_for_cur_sz up k ways.
FreeChunk* fc;
while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
// Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we
// change it.
size_t fc_size = fc->size();
assert(fc->isFree(), "Error");
assert(fc->is_free(), "Error");
for (int i = k-1; i >= 0; i--) {
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
assert((i != 0) ||
((fc == ffc) && ffc->isFree() &&
((fc == ffc) && ffc->is_free() &&
(ffc->size() == k*word_sz) && (fc_size == word_sz)),
"Counting error");
ffc->setSize(word_sz);
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ffc->linkNext(NULL);
ffc->set_size(word_sz);
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
ffc->link_next(NULL);
// Above must occur before BOT is updated below.
OrderAccess::storestore();
// splitting from the right, fc_size == i * word_sz
@ -2824,7 +2824,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
_bt.verify_single_block((HeapWord*)fc, fc_size);
_bt.verify_single_block((HeapWord*)ffc, word_sz);
// Push this on "fl".
fl->returnChunkAtHead(ffc);
fl->return_chunk_at_head(ffc);
}
// TRAP
assert(fl->tail()->next() == NULL, "List invariant.");
@ -2834,8 +2834,8 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
size_t num = fl->count();
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
_indexedFreeList[word_sz].set_splitBirths(births);
ssize_t births = _indexedFreeList[word_sz].split_births() + num;
_indexedFreeList[word_sz].set_split_births(births);
return;
}
}
@ -2848,12 +2848,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
MutexLockerEx x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
while (n > 0) {
fc = dictionary()->getChunk(MAX2(n * word_sz,
_dictionary->minSize()),
fc = dictionary()->get_chunk(MAX2(n * word_sz,
_dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dictCensusUpdate(fc->size(),
dictionary()->dict_census_udpate(fc->size(),
true /*split*/,
false /*birth*/);
break;
@ -2864,7 +2864,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
if (fc == NULL) return;
// Otherwise, split up that block.
assert((ssize_t)n >= 1, "Control point invariant");
assert(fc->isFree(), "Error: should be a free block");
assert(fc->is_free(), "Error: should be a free block");
_bt.verify_single_block((HeapWord*)fc, fc->size());
const size_t nn = fc->size() / word_sz;
n = MIN2(nn, n);
@ -2895,18 +2895,18 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
if (rem > 0) {
size_t prefix_size = n * word_sz;
rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
rem_fc->setSize(rem);
rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
rem_fc->linkNext(NULL);
rem_fc->set_size(rem);
rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
rem_fc->link_next(NULL);
// Above must occur before BOT is updated below.
assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
OrderAccess::storestore();
_bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
assert(fc->isFree(), "Error");
fc->setSize(prefix_size);
assert(fc->is_free(), "Error");
fc->set_size(prefix_size);
if (rem >= IndexSetSize) {
returnChunkToDictionary(rem_fc);
dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
rem_fc = NULL;
}
// Otherwise, return it to the small list below.
@ -2916,7 +2916,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
MutexLockerEx x(_indexedFreeListParLocks[rem],
Mutex::_no_safepoint_check_flag);
_bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
_indexedFreeList[rem].returnChunkAtHead(rem_fc);
_indexedFreeList[rem].return_chunk_at_head(rem_fc);
smallSplitBirth(rem);
}
assert((ssize_t)n > 0 && fc != NULL, "Consistency");
@ -2928,9 +2928,9 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// All but first chunk in this loop
for (ssize_t i = n-1; i > 0; i--) {
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ffc->setSize(word_sz);
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ffc->linkNext(NULL);
ffc->set_size(word_sz);
ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
ffc->link_next(NULL);
// Above must occur before BOT is updated below.
OrderAccess::storestore();
// splitting from the right, fc_size == (n - i + 1) * wordsize
@ -2940,25 +2940,25 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
_bt.verify_single_block((HeapWord*)ffc, ffc->size());
_bt.verify_single_block((HeapWord*)fc, fc_size);
// Push this on "fl".
fl->returnChunkAtHead(ffc);
fl->return_chunk_at_head(ffc);
}
// First chunk
assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
// The blocks above should show their new sizes before the first block below
fc->setSize(word_sz);
fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above
fc->linkNext(NULL);
fc->set_size(word_sz);
fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
fc->link_next(NULL);
_bt.verify_not_unallocated((HeapWord*)fc, fc->size());
_bt.verify_single_block((HeapWord*)fc, fc->size());
fl->returnChunkAtHead(fc);
fl->return_chunk_at_head(fc);
assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
{
// Update the stats for this block size.
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
_indexedFreeList[word_sz].set_splitBirths(births);
const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
_indexedFreeList[word_sz].set_split_births(births);
// ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
// _indexedFreeList[word_sz].set_surplus(new_surplus);
}

View file

@ -499,7 +499,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Verify that the given chunk is in the free lists:
// i.e. either the binary tree dictionary, the indexed free lists
// or the linear allocation block.
bool verifyChunkInFreeLists(FreeChunk* fc) const;
bool verify_chunk_in_free_list(FreeChunk* fc) const;
// Verify that the given chunk is the linear allocation block
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
// Do some basic checks on the the free lists.
@ -608,7 +608,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void coalDeath(size_t size);
void smallSplitBirth(size_t size);
void smallSplitDeath(size_t size);
void splitBirth(size_t size);
void split_birth(size_t size);
void splitDeath(size_t size);
void split(size_t from, size_t to1);

View file

@ -1026,7 +1026,7 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
// its mark-bit or P-bits not yet set. Such objects need
// to be safely navigable by block_start().
assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
collector()->direct_allocated(res, adjustedSize);
_direct_allocated_words += adjustedSize;
// allocation counters
@ -1391,7 +1391,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop obj = oop(obj_ptr);
OrderAccess::storestore();
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
// IMPORTANT: See note on object initialization for CMS above.
// Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object.
@ -1400,7 +1400,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
// Restore the mark word copied above.
obj->set_mark(m);
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
OrderAccess::storestore();
if (UseCompressedOops) {
@ -1421,7 +1421,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
promoInfo->track((PromotedObject*)obj, old->klass());
}
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
assert(old->is_oop(), "Will use and dereference old klass ptr below");
// Finally, install the klass pointer (this should be volatile).
@ -2034,7 +2034,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
pointer_delta(cms_space->end(), cms_space->compaction_top())
* HeapWordSize,
"All the free space should be compacted into one chunk at top");
assert(cms_space->dictionary()->totalChunkSize(
assert(cms_space->dictionary()->total_chunk_size(
debug_only(cms_space->freelistLock())) == 0 ||
cms_space->totalSizeInIndexedFreeLists() == 0,
"All the free space should be in a single chunk");
@ -6131,7 +6131,7 @@ void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
double nearLargestPercent = FLSLargestBlockCoalesceProximity;
HeapWord* minAddr = _cmsSpace->bottom();
HeapWord* largestAddr =
(HeapWord*) _cmsSpace->dictionary()->findLargestDict();
(HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
if (largestAddr == NULL) {
// The dictionary appears to be empty. In this case
// try to coalesce at the end of the heap.
@ -7906,7 +7906,7 @@ SweepClosure::SweepClosure(CMSCollector* collector,
_last_fc = NULL;
_sp->initializeIndexedFreeListArrayReturnedBytes();
_sp->dictionary()->initializeDictReturnedBytes();
_sp->dictionary()->initialize_dict_returned_bytes();
)
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds");
@ -7954,13 +7954,13 @@ SweepClosure::~SweepClosure() {
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
indexListReturnedBytes);
gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
dictReturnedBytes);
dict_returned_bytes);
}
}
if (CMSTraceSweeper) {
@ -7985,9 +7985,9 @@ void SweepClosure::initialize_free_range(HeapWord* freeFinger,
if (CMSTestInFreeList) {
if (freeRangeInFreeLists) {
FreeChunk* fc = (FreeChunk*) freeFinger;
assert(fc->isFree(), "A chunk on the free list should be free.");
assert(fc->is_free(), "A chunk on the free list should be free.");
assert(fc->size() > 0, "Free range should have a size");
assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
}
}
}
@ -8057,7 +8057,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
assert(addr < _limit, "sweep invariant");
// check if we should yield
do_yield_check(addr);
if (fc->isFree()) {
if (fc->is_free()) {
// Chunk that is already free
res = fc->size();
do_already_free_chunk(fc);
@ -8145,7 +8145,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
// Chunks that cannot be coalesced are not in the
// free lists.
if (CMSTestInFreeList && !fc->cantCoalesce()) {
assert(_sp->verifyChunkInFreeLists(fc),
assert(_sp->verify_chunk_in_free_list(fc),
"free chunk should be in free lists");
}
// a chunk that is already free, should not have been
@ -8171,7 +8171,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
nextChunk->isFree() && // ... which is free...
nextChunk->is_free() && // ... which is free...
nextChunk->cantCoalesce()) { // ... but can't be coalesced
// nothing to do
} else {
@ -8203,7 +8203,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
assert(ffc->size() == pointer_delta(addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"free range is not in free lists");
}
_sp->removeFreeChunkFromFreeLists(ffc);
@ -8262,7 +8262,7 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
assert(ffc->size() == pointer_delta(addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"free range is not in free lists");
}
_sp->removeFreeChunkFromFreeLists(ffc);
@ -8351,11 +8351,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t chunkSize) {
// do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator.
const bool fcInFreeLists = fc->isFree();
const bool fcInFreeLists = fc->is_free();
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
assert((HeapWord*)fc <= _limit, "sweep invariant");
if (CMSTestInFreeList && fcInFreeLists) {
assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
}
if (CMSTraceSweeper) {
@ -8410,7 +8410,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"Chunk is not in free lists");
}
_sp->coalDeath(ffc->size());
@ -8459,7 +8459,7 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
if (eob >= _limit) {
assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
"[" PTR_FORMAT "," PTR_FORMAT ") in space "
@ -8482,8 +8482,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
if (!freeRangeInFreeLists()) {
if (CMSTestInFreeList) {
FreeChunk* fc = (FreeChunk*) chunk;
fc->setSize(size);
assert(!_sp->verifyChunkInFreeLists(fc),
fc->set_size(size);
assert(!_sp->verify_chunk_in_free_list(fc),
"chunk should not be in free lists yet");
}
if (CMSTraceSweeper) {
@ -8557,8 +8557,8 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
// This is actually very useful in a product build if it can
// be called from the debugger. Compile it into the product
// as needed.
bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
return debug_cms_space->verifyChunkInFreeLists(fc);
bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
return debug_cms_space->verify_chunk_in_free_list(fc);
}
#endif
@ -9255,7 +9255,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
size_t chunk_at_end_old_size = chunk_at_end->size();
assert(chunk_at_end_old_size >= word_size_change,
"Shrink is too large");
chunk_at_end->setSize(chunk_at_end_old_size -
chunk_at_end->set_size(chunk_at_end_old_size -
word_size_change);
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
word_size_change);

View file

@ -75,20 +75,20 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
// calls. We really want the read of _mark and _prev from this pointer
// to be volatile but making the fields volatile causes all sorts of
// compilation errors.
return ((volatile FreeChunk*)addr)->isFree();
return ((volatile FreeChunk*)addr)->is_free();
}
bool isFree() const volatile {
bool is_free() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
return (((intptr_t)_prev) & 0x1) == 0x1;
}
bool cantCoalesce() const {
assert(isFree(), "can't get coalesce bit on not free");
assert(is_free(), "can't get coalesce bit on not free");
return (((intptr_t)_prev) & 0x2) == 0x2;
}
void dontCoalesce() {
// the block should be free
assert(isFree(), "Should look like a free block");
assert(is_free(), "Should look like a free block");
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
}
FreeChunk* prev() const {
@ -103,23 +103,23 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
return _size;
}
void setSize(size_t sz) {
void set_size(size_t sz) {
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
_size = sz;
}
FreeChunk* next() const { return _next; }
void linkAfter(FreeChunk* ptr) {
linkNext(ptr);
if (ptr != NULL) ptr->linkPrev(this);
void link_after(FreeChunk* ptr) {
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) {
void link_next(FreeChunk* ptr) { _next = ptr; }
void link_prev(FreeChunk* ptr) {
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
_prev = (FreeChunk*)((intptr_t)ptr | 0x1);
}
void clearNext() { _next = NULL; }
void clear_next() { _next = NULL; }
void markNotFree() {
// Set _prev (klass) to null before (if) clearing the mark word below
_prev = NULL;
@ -129,7 +129,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
set_mark(markOopDesc::prototype());
}
#endif
assert(!isFree(), "Error");
assert(!is_free(), "Error");
}
// Return the address past the end of this chunk

View file

@ -121,7 +121,7 @@ void PromotionInfo::track(PromotedObject* trackOop) {
void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
// make a copy of header as it may need to be spooled
markOop mark = oop(trackOop)->mark();
trackOop->clearNext();
trackOop->clear_next();
if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
// save non-prototypical header, and mark oop
saveDisplacedHeader(mark);

View file

@ -43,7 +43,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
// whose position will depend on endian-ness of the platform.
// This is so that there is no interference with the
// cms_free_bit occupying bit position 7 (lsb == 0)
// when we are using compressed oops; see FreeChunk::isFree().
// when we are using compressed oops; see FreeChunk::is_free().
// We cannot move the cms_free_bit down because currently
// biased locking code assumes that age bits are contiguous
// with the lock bits. Even if that assumption were relaxed,
@ -65,7 +65,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
};
public:
inline PromotedObject* next() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
@ -85,27 +85,27 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline void setPromotedMark() {
_next |= promoted_mask;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline bool hasPromotedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
return (_next & promoted_mask) == promoted_mask;
}
inline void setDisplacedMark() {
_next |= displaced_mark;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline bool hasDisplacedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
return (_next & displaced_mark) != 0;
}
inline void clearNext() {
inline void clear_next() {
_next = 0;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
debug_only(void *next_addr() { return (void *) &_next; })
};

View file

@ -46,7 +46,7 @@
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_totalSize, size_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary<FreeChunk>*) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)