mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 18:14:38 +02:00
8025856: Fix typos in the GC code
Fix about 440 typos in comments in the VM code Reviewed-by: mgerdin, tschatzl, coleenp, kmo, jcoomes
This commit is contained in:
parent
4c7c3f0613
commit
81ba2e32c0
150 changed files with 524 additions and 545 deletions
|
@ -466,7 +466,7 @@ void CMSAdaptiveSizePolicy::checkpoint_roots_initial_end(
|
|||
void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() {
|
||||
_STW_timer.stop();
|
||||
_latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds();
|
||||
// Start accumumlating time for the remark in the STW timer.
|
||||
// Start accumulating time for the remark in the STW timer.
|
||||
_STW_timer.reset();
|
||||
_STW_timer.start();
|
||||
}
|
||||
|
@ -537,8 +537,8 @@ void CMSAdaptiveSizePolicy::msc_collection_end(GCCause::Cause gc_cause) {
|
|||
avg_msc_pause()->sample(msc_pause_in_seconds);
|
||||
double mutator_time_in_seconds = 0.0;
|
||||
if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
|
||||
// This assertion may fail because of time stamp gradularity.
|
||||
// Comment it out and investiage it at a later time. The large
|
||||
// This assertion may fail because of time stamp granularity.
|
||||
// Comment it out and investigate it at a later time. The large
|
||||
// time stamp granularity occurs on some older linux systems.
|
||||
#ifndef CLOCK_GRANULARITY_TOO_LARGE
|
||||
assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
|
||||
|
@ -836,7 +836,7 @@ double CMSAdaptiveSizePolicy::cms_gc_cost() const {
|
|||
|
||||
void CMSAdaptiveSizePolicy::ms_collection_marking_begin() {
|
||||
_STW_timer.stop();
|
||||
// Start accumumlating time for the marking in the STW timer.
|
||||
// Start accumulating time for the marking in the STW timer.
|
||||
_STW_timer.reset();
|
||||
_STW_timer.start();
|
||||
}
|
||||
|
@ -1227,7 +1227,7 @@ uint CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
|
|||
// We use the tenuring threshold to equalize the cost of major
|
||||
// and minor collections.
|
||||
// ThresholdTolerance is used to indicate how sensitive the
|
||||
// tenuring threshold is to differences in cost betweent the
|
||||
// tenuring threshold is to differences in cost between the
|
||||
// collection types.
|
||||
|
||||
// Get the times of interest. This involves a little work, so
|
||||
|
|
|
@ -356,7 +356,7 @@ class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
|||
void concurrent_sweeping_begin();
|
||||
void concurrent_sweeping_end();
|
||||
// Similar to the above (e.g., concurrent_marking_end()) and
|
||||
// is used for both the precleaning an abortable precleaing
|
||||
// is used for both the precleaning an abortable precleaning
|
||||
// phases.
|
||||
void concurrent_precleaning_begin();
|
||||
void concurrent_precleaning_end();
|
||||
|
|
|
@ -88,8 +88,7 @@ class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
|||
// of the tenured generation.
|
||||
PerfVariable* _avg_msc_pause_counter;
|
||||
// Average for the time between the most recent end of a
|
||||
// MSC collection and the beginning of the next
|
||||
// MSC collection.
|
||||
// MSC collection and the beginning of the next MSC collection.
|
||||
PerfVariable* _avg_msc_interval_counter;
|
||||
// Average for the GC cost of a MSC collection based on
|
||||
// _avg_msc_pause_counter and _avg_msc_interval_counter.
|
||||
|
@ -99,8 +98,7 @@ class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
|||
// of the tenured generation.
|
||||
PerfVariable* _avg_ms_pause_counter;
|
||||
// Average for the time between the most recent end of a
|
||||
// MS collection and the beginning of the next
|
||||
// MS collection.
|
||||
// MS collection and the beginning of the next MS collection.
|
||||
PerfVariable* _avg_ms_interval_counter;
|
||||
// Average for the GC cost of a MS collection based on
|
||||
// _avg_ms_pause_counter and _avg_ms_interval_counter.
|
||||
|
@ -108,9 +106,9 @@ class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
|||
|
||||
// Average of the bytes promoted per minor collection.
|
||||
PerfVariable* _promoted_avg_counter;
|
||||
// Average of the deviation of the promoted average
|
||||
// Average of the deviation of the promoted average.
|
||||
PerfVariable* _promoted_avg_dev_counter;
|
||||
// Padded average of the bytes promoted per minor colleciton
|
||||
// Padded average of the bytes promoted per minor collection.
|
||||
PerfVariable* _promoted_padded_avg_counter;
|
||||
|
||||
// See description of the _change_young_gen_for_maj_pauses
|
||||
|
|
|
@ -258,10 +258,10 @@ class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
|||
bool take_from_overflow_list();
|
||||
};
|
||||
|
||||
// Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
|
||||
// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
|
||||
// stack and the bitMap are shared, so access needs to be suitably
|
||||
// sycnhronized. An OopTaskQueue structure, supporting efficient
|
||||
// workstealing, replaces a CMSMarkStack for storing grey objects.
|
||||
// synchronized. An OopTaskQueue structure, supporting efficient
|
||||
// work stealing, replaces a CMSMarkStack for storing grey objects.
|
||||
class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
|
||||
private:
|
||||
MemRegion _span;
|
||||
|
|
|
@ -407,8 +407,8 @@ size_t CompactibleFreeListSpace::max_alloc_in_words() const {
|
|||
res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
|
||||
(size_t) SmallForLinearAlloc - 1));
|
||||
// XXX the following could potentially be pretty slow;
|
||||
// should one, pesimally for the rare cases when res
|
||||
// caclulated above is less than IndexSetSize,
|
||||
// should one, pessimistically for the rare cases when res
|
||||
// calculated above is less than IndexSetSize,
|
||||
// just return res calculated above? My reasoning was that
|
||||
// those cases will be so rare that the extra time spent doesn't
|
||||
// really matter....
|
||||
|
@ -759,7 +759,7 @@ CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
|
|||
// Note on locking for the space iteration functions:
|
||||
// since the collector's iteration activities are concurrent with
|
||||
// allocation activities by mutators, absent a suitable mutual exclusion
|
||||
// mechanism the iterators may go awry. For instace a block being iterated
|
||||
// mechanism the iterators may go awry. For instance a block being iterated
|
||||
// may suddenly be allocated or divided up and part of it allocated and
|
||||
// so on.
|
||||
|
||||
|
@ -2090,7 +2090,7 @@ CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
|
|||
|
||||
// Support for concurrent collection policy decisions.
|
||||
bool CompactibleFreeListSpace::should_concurrent_collect() const {
|
||||
// In the future we might want to add in frgamentation stats --
|
||||
// In the future we might want to add in fragmentation stats --
|
||||
// including erosion of the "mountain" into this decision as well.
|
||||
return !adaptive_freelists() && linearAllocationWouldFail();
|
||||
}
|
||||
|
@ -2099,7 +2099,7 @@ bool CompactibleFreeListSpace::should_concurrent_collect() const {
|
|||
|
||||
void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||
SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
|
||||
// prepare_for_compaction() uses the space between live objects
|
||||
// Prepare_for_compaction() uses the space between live objects
|
||||
// so that later phase can skip dead space quickly. So verification
|
||||
// of the free lists doesn't work after.
|
||||
}
|
||||
|
@ -2122,7 +2122,7 @@ void CompactibleFreeListSpace::compact() {
|
|||
SCAN_AND_COMPACT(obj_size);
|
||||
}
|
||||
|
||||
// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
|
||||
// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
|
||||
// where fbs is free block sizes
|
||||
double CompactibleFreeListSpace::flsFrag() const {
|
||||
size_t itabFree = totalSizeInIndexedFreeLists();
|
||||
|
@ -2651,7 +2651,7 @@ void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>*
|
|||
// changes on-the-fly during a scavenge and avoid such a phase-change
|
||||
// pothole. The following code is a heuristic attempt to do that.
|
||||
// It is protected by a product flag until we have gained
|
||||
// enough experience with this heuristic and fine-tuned its behaviour.
|
||||
// enough experience with this heuristic and fine-tuned its behavior.
|
||||
// WARNING: This might increase fragmentation if we overreact to
|
||||
// small spikes, so some kind of historical smoothing based on
|
||||
// previous experience with the greater reactivity might be useful.
|
||||
|
|
|
@ -58,7 +58,7 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
|
|||
HeapWord* _ptr;
|
||||
size_t _word_size;
|
||||
size_t _refillSize;
|
||||
size_t _allocation_size_limit; // largest size that will be allocated
|
||||
size_t _allocation_size_limit; // Largest size that will be allocated
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
};
|
||||
|
@ -116,14 +116,14 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
|
||||
PromotionInfo _promoInfo;
|
||||
|
||||
// helps to impose a global total order on freelistLock ranks;
|
||||
// Helps to impose a global total order on freelistLock ranks;
|
||||
// assumes that CFLSpace's are allocated in global total order
|
||||
static int _lockRank;
|
||||
|
||||
// a lock protecting the free lists and free blocks;
|
||||
// A lock protecting the free lists and free blocks;
|
||||
// mutable because of ubiquity of locking even for otherwise const methods
|
||||
mutable Mutex _freelistLock;
|
||||
// locking verifier convenience function
|
||||
// Locking verifier convenience function
|
||||
void assert_locked() const PRODUCT_RETURN;
|
||||
void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
|
||||
|
||||
|
@ -131,12 +131,13 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
LinearAllocBlock _smallLinearAllocBlock;
|
||||
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
|
||||
AFLBinaryTreeDictionary* _dictionary; // ptr to dictionary for large size blocks
|
||||
AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks
|
||||
|
||||
// Indexed array for small size blocks
|
||||
AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
|
||||
// indexed array for small size blocks
|
||||
// allocation stategy
|
||||
bool _fitStrategy; // Use best fit strategy.
|
||||
|
||||
// Allocation strategy
|
||||
bool _fitStrategy; // Use best fit strategy
|
||||
bool _adaptive_freelists; // Use adaptive freelists
|
||||
|
||||
// This is an address close to the largest free chunk in the heap.
|
||||
|
@ -157,7 +158,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
|
||||
// Extra stuff to manage promotion parallelism.
|
||||
|
||||
// a lock protecting the dictionary during par promotion allocation.
|
||||
// A lock protecting the dictionary during par promotion allocation.
|
||||
mutable Mutex _parDictionaryAllocLock;
|
||||
Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
|
||||
|
||||
|
@ -275,26 +276,26 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
}
|
||||
|
||||
protected:
|
||||
// reset the indexed free list to its initial empty condition.
|
||||
// Reset the indexed free list to its initial empty condition.
|
||||
void resetIndexedFreeListArray();
|
||||
// reset to an initial state with a single free block described
|
||||
// Reset to an initial state with a single free block described
|
||||
// by the MemRegion parameter.
|
||||
void reset(MemRegion mr);
|
||||
// Return the total number of words in the indexed free lists.
|
||||
size_t totalSizeInIndexedFreeLists() const;
|
||||
|
||||
public:
|
||||
// Constructor...
|
||||
// Constructor
|
||||
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
|
||||
bool use_adaptive_freelists,
|
||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
|
||||
// accessors
|
||||
// Accessors
|
||||
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
|
||||
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
|
||||
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
|
||||
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
|
||||
|
||||
// Set CMS global values
|
||||
// Set CMS global values.
|
||||
static void set_cms_values();
|
||||
|
||||
// Return the free chunk at the end of the space. If no such
|
||||
|
@ -305,7 +306,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
|
||||
void set_collector(CMSCollector* collector) { _collector = collector; }
|
||||
|
||||
// Support for parallelization of rescan and marking
|
||||
// Support for parallelization of rescan and marking.
|
||||
const size_t rescan_task_size() const { return _rescan_task_size; }
|
||||
const size_t marking_task_size() const { return _marking_task_size; }
|
||||
SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
|
||||
|
@ -346,7 +347,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// Resizing support
|
||||
void set_end(HeapWord* value); // override
|
||||
|
||||
// mutual exclusion support
|
||||
// Mutual exclusion support
|
||||
Mutex* freelistLock() const { return &_freelistLock; }
|
||||
|
||||
// Iteration support
|
||||
|
@ -370,7 +371,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// If the iteration encounters an unparseable portion of the region,
|
||||
// terminate the iteration and return the address of the start of the
|
||||
// subregion that isn't done. Return of "NULL" indicates that the
|
||||
// interation completed.
|
||||
// iteration completed.
|
||||
virtual HeapWord*
|
||||
object_iterate_careful_m(MemRegion mr,
|
||||
ObjectClosureCareful* cl);
|
||||
|
@ -393,11 +394,11 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
size_t block_size_nopar(const HeapWord* p) const;
|
||||
bool block_is_obj_nopar(const HeapWord* p) const;
|
||||
|
||||
// iteration support for promotion
|
||||
// Iteration support for promotion
|
||||
void save_marks();
|
||||
bool no_allocs_since_save_marks();
|
||||
|
||||
// iteration support for sweeping
|
||||
// Iteration support for sweeping
|
||||
void save_sweep_limit() {
|
||||
_sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
|
||||
unallocated_block() : end();
|
||||
|
@ -457,7 +458,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
|
||||
FreeChunk* allocateScratch(size_t size);
|
||||
|
||||
// returns true if either the small or large linear allocation buffer is empty.
|
||||
// Returns true if either the small or large linear allocation buffer is empty.
|
||||
bool linearAllocationWouldFail() const;
|
||||
|
||||
// Adjust the chunk for the minimum size. This version is called in
|
||||
|
@ -477,18 +478,18 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
|
||||
bool coalesced);
|
||||
|
||||
// Support for decisions regarding concurrent collection policy
|
||||
// Support for decisions regarding concurrent collection policy.
|
||||
bool should_concurrent_collect() const;
|
||||
|
||||
// Support for compaction
|
||||
// Support for compaction.
|
||||
void prepare_for_compaction(CompactPoint* cp);
|
||||
void adjust_pointers();
|
||||
void compact();
|
||||
// reset the space to reflect the fact that a compaction of the
|
||||
// Reset the space to reflect the fact that a compaction of the
|
||||
// space has been done.
|
||||
virtual void reset_after_compaction();
|
||||
|
||||
// Debugging support
|
||||
// Debugging support.
|
||||
void print() const;
|
||||
void print_on(outputStream* st) const;
|
||||
void prepare_for_verify();
|
||||
|
@ -500,7 +501,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// i.e. either the binary tree dictionary, the indexed free lists
|
||||
// or the linear allocation block.
|
||||
bool verify_chunk_in_free_list(FreeChunk* fc) const;
|
||||
// Verify that the given chunk is the linear allocation block
|
||||
// Verify that the given chunk is the linear allocation block.
|
||||
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
|
||||
// Do some basic checks on the the free lists.
|
||||
void check_free_list_consistency() const PRODUCT_RETURN;
|
||||
|
@ -516,7 +517,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
size_t sumIndexedFreeListArrayReturnedBytes();
|
||||
// Return the total number of chunks in the indexed free lists.
|
||||
size_t totalCountInIndexedFreeLists() const;
|
||||
// Return the total numberof chunks in the space.
|
||||
// Return the total number of chunks in the space.
|
||||
size_t totalCount();
|
||||
)
|
||||
|
||||
|
|
|
@ -117,10 +117,10 @@ GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
|
|||
// hide the naked CGC_lock manipulation in the baton-passing code
|
||||
// further below. That's something we should try to do. Also, the proof
|
||||
// of correctness of this 2-level locking scheme is far from obvious,
|
||||
// and potentially quite slippery. We have an uneasy supsicion, for instance,
|
||||
// and potentially quite slippery. We have an uneasy suspicion, for instance,
|
||||
// that there may be a theoretical possibility of delay/starvation in the
|
||||
// low-level lock/wait/notify scheme used for the baton-passing because of
|
||||
// potential intereference with the priority scheme embodied in the
|
||||
// potential interference with the priority scheme embodied in the
|
||||
// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
|
||||
// invocation further below and marked with "XXX 20011219YSR".
|
||||
// Indeed, as we note elsewhere, this may become yet more slippery
|
||||
|
@ -259,7 +259,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
|||
// Ideally, in the calculation below, we'd compute the dilatation
|
||||
// factor as: MinChunkSize/(promoting_gen's min object size)
|
||||
// Since we do not have such a general query interface for the
|
||||
// promoting generation, we'll instead just use the mimimum
|
||||
// promoting generation, we'll instead just use the minimum
|
||||
// object size (which today is a header's worth of space);
|
||||
// note that all arithmetic is in units of HeapWords.
|
||||
assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
|
||||
|
@ -274,7 +274,7 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
|
|||
//
|
||||
// Let "f" be MinHeapFreeRatio in
|
||||
//
|
||||
// _intiating_occupancy = 100-f +
|
||||
// _initiating_occupancy = 100-f +
|
||||
// f * (CMSTriggerRatio/100)
|
||||
// where CMSTriggerRatio is the argument "tr" below.
|
||||
//
|
||||
|
@ -2671,7 +2671,7 @@ bool CMSCollector::waitForForegroundGC() {
|
|||
// that it's responsible for collecting, while itself doing any
|
||||
// work common to all generations it's responsible for. A similar
|
||||
// comment applies to the gc_epilogue()'s.
|
||||
// The role of the varaible _between_prologue_and_epilogue is to
|
||||
// The role of the variable _between_prologue_and_epilogue is to
|
||||
// enforce the invocation protocol.
|
||||
void CMSCollector::gc_prologue(bool full) {
|
||||
// Call gc_prologue_work() for the CMSGen
|
||||
|
@ -2878,10 +2878,10 @@ bool CMSCollector::have_cms_token() {
|
|||
// Check reachability of the given heap address in CMS generation,
|
||||
// treating all other generations as roots.
|
||||
bool CMSCollector::is_cms_reachable(HeapWord* addr) {
|
||||
// We could "guarantee" below, rather than assert, but i'll
|
||||
// We could "guarantee" below, rather than assert, but I'll
|
||||
// leave these as "asserts" so that an adventurous debugger
|
||||
// could try this in the product build provided some subset of
|
||||
// the conditions were met, provided they were intersted in the
|
||||
// the conditions were met, provided they were interested in the
|
||||
// results and knew that the computation below wouldn't interfere
|
||||
// with other concurrent computations mutating the structures
|
||||
// being read or written.
|
||||
|
@ -2982,7 +2982,7 @@ bool CMSCollector::verify_after_remark(bool silent) {
|
|||
// This is as intended, because by this time
|
||||
// GC must already have cleared any refs that need to be cleared,
|
||||
// and traced those that need to be marked; moreover,
|
||||
// the marking done here is not going to intefere in any
|
||||
// the marking done here is not going to interfere in any
|
||||
// way with the marking information used by GC.
|
||||
NoRefDiscovery no_discovery(ref_processor());
|
||||
|
||||
|
@ -3000,7 +3000,7 @@ bool CMSCollector::verify_after_remark(bool silent) {
|
|||
|
||||
if (CMSRemarkVerifyVariant == 1) {
|
||||
// In this first variant of verification, we complete
|
||||
// all marking, then check if the new marks-verctor is
|
||||
// all marking, then check if the new marks-vector is
|
||||
// a subset of the CMS marks-vector.
|
||||
verify_after_remark_work_1();
|
||||
} else if (CMSRemarkVerifyVariant == 2) {
|
||||
|
@ -3399,7 +3399,7 @@ HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThr
|
|||
CMSExpansionCause::_allocate_par_lab);
|
||||
// Now go around the loop and try alloc again;
|
||||
// A competing par_promote might beat us to the expansion space,
|
||||
// so we may go around the loop again if promotion fails agaion.
|
||||
// so we may go around the loop again if promotion fails again.
|
||||
if (GCExpandToAllocateDelayMillis > 0) {
|
||||
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
|
||||
}
|
||||
|
@ -4370,7 +4370,7 @@ void CMSConcMarkingTask::coordinator_yield() {
|
|||
// should really use wait/notify, which is the recommended
|
||||
// way of doing this type of interaction. Additionally, we should
|
||||
// consolidate the eight methods that do the yield operation and they
|
||||
// are almost identical into one for better maintenability and
|
||||
// are almost identical into one for better maintainability and
|
||||
// readability. See 6445193.
|
||||
//
|
||||
// Tony 2006.06.29
|
||||
|
@ -4538,7 +4538,7 @@ void CMSCollector::abortable_preclean() {
|
|||
// If Eden's current occupancy is below this threshold,
|
||||
// immediately schedule the remark; else preclean
|
||||
// past the next scavenge in an effort to
|
||||
// schedule the pause as described avove. By choosing
|
||||
// schedule the pause as described above. By choosing
|
||||
// CMSScheduleRemarkEdenSizeThreshold >= max eden size
|
||||
// we will never do an actual abortable preclean cycle.
|
||||
if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
|
||||
|
@ -5532,8 +5532,8 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
|
|||
// CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
|
||||
// CAUTION: This closure has state that persists across calls to
|
||||
// the work method dirty_range_iterate_clear() in that it has
|
||||
// imbedded in it a (subtype of) UpwardsObjectClosure. The
|
||||
// use of that state in the imbedded UpwardsObjectClosure instance
|
||||
// embedded in it a (subtype of) UpwardsObjectClosure. The
|
||||
// use of that state in the embedded UpwardsObjectClosure instance
|
||||
// assumes that the cards are always iterated (even if in parallel
|
||||
// by several threads) in monotonically increasing order per each
|
||||
// thread. This is true of the implementation below which picks
|
||||
|
@ -5548,7 +5548,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
|
|||
// sure that the changes there do not run counter to the
|
||||
// assumptions made here and necessary for correctness and
|
||||
// efficiency. Note also that this code might yield inefficient
|
||||
// behaviour in the case of very large objects that span one or
|
||||
// behavior in the case of very large objects that span one or
|
||||
// more work chunks. Such objects would potentially be scanned
|
||||
// several times redundantly. Work on 4756801 should try and
|
||||
// address that performance anomaly if at all possible. XXX
|
||||
|
@ -5574,7 +5574,7 @@ CMSParRemarkTask::do_dirty_card_rescan_tasks(
|
|||
|
||||
while (!pst->is_task_claimed(/* reference */ nth_task)) {
|
||||
// Having claimed the nth_task, compute corresponding mem-region,
|
||||
// which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
|
||||
// which is a-fortiori aligned correctly (i.e. at a MUT boundary).
|
||||
// The alignment restriction ensures that we do not need any
|
||||
// synchronization with other gang-workers while setting or
|
||||
// clearing bits in thus chunk of the MUT.
|
||||
|
@ -6365,7 +6365,7 @@ void CMSCollector::sweep(bool asynch) {
|
|||
_inter_sweep_timer.reset();
|
||||
_inter_sweep_timer.start();
|
||||
|
||||
// We need to use a monotonically non-deccreasing time in ms
|
||||
// We need to use a monotonically non-decreasing time in ms
|
||||
// or we will see time-warp warnings and os::javaTimeMillis()
|
||||
// does not guarantee monotonicity.
|
||||
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
|
||||
|
@ -6726,7 +6726,7 @@ bool CMSBitMap::allocate(MemRegion mr) {
|
|||
warning("CMS bit map allocation failure");
|
||||
return false;
|
||||
}
|
||||
// For now we'll just commit all of the bit map up fromt.
|
||||
// For now we'll just commit all of the bit map up front.
|
||||
// Later on we'll try to be more parsimonious with swap.
|
||||
if (!_virtual_space.initialize(brs, brs.size())) {
|
||||
warning("CMS bit map backing store failure");
|
||||
|
@ -6833,8 +6833,8 @@ bool CMSMarkStack::allocate(size_t size) {
|
|||
|
||||
// XXX FIX ME !!! In the MT case we come in here holding a
|
||||
// leaf lock. For printing we need to take a further lock
|
||||
// which has lower rank. We need to recallibrate the two
|
||||
// lock-ranks involved in order to be able to rpint the
|
||||
// which has lower rank. We need to recalibrate the two
|
||||
// lock-ranks involved in order to be able to print the
|
||||
// messages below. (Or defer the printing to the caller.
|
||||
// For now we take the expedient path of just disabling the
|
||||
// messages for the problematic case.)
|
||||
|
@ -7174,7 +7174,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
|||
}
|
||||
#endif // ASSERT
|
||||
} else {
|
||||
// an unitialized object
|
||||
// An uninitialized object.
|
||||
assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
|
||||
HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
|
||||
size = pointer_delta(nextOneAddr + 1, addr);
|
||||
|
@ -7182,7 +7182,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
|
|||
"alignment problem");
|
||||
// Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
|
||||
// will dirty the card when the klass pointer is installed in the
|
||||
// object (signalling the completion of initialization).
|
||||
// object (signaling the completion of initialization).
|
||||
}
|
||||
} else {
|
||||
// Either a not yet marked object or an uninitialized object
|
||||
|
@ -7993,7 +7993,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
|
|||
// we need to dirty all of the cards that the object spans,
|
||||
// since the rescan of object arrays will be limited to the
|
||||
// dirty cards.
|
||||
// Note that no one can be intefering with us in this action
|
||||
// Note that no one can be interfering with us in this action
|
||||
// of dirtying the mod union table, so no locking or atomics
|
||||
// are required.
|
||||
if (obj->is_objArray()) {
|
||||
|
@ -9019,7 +9019,7 @@ void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
|
|||
|
||||
// It's OK to call this multi-threaded; the worst thing
|
||||
// that can happen is that we'll get a bunch of closely
|
||||
// spaced simulated oveflows, but that's OK, in fact
|
||||
// spaced simulated overflows, but that's OK, in fact
|
||||
// probably good as it would exercise the overflow code
|
||||
// under contention.
|
||||
bool CMSCollector::simulate_overflow() {
|
||||
|
@ -9139,7 +9139,7 @@ bool CMSCollector::par_take_from_overflow_list(size_t num,
|
|||
(void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
|
||||
}
|
||||
} else {
|
||||
// Chop off the suffix and rerturn it to the global list.
|
||||
// Chop off the suffix and return it to the global list.
|
||||
assert(cur->mark() != BUSY, "Error");
|
||||
oop suffix_head = cur->mark(); // suffix will be put back on global list
|
||||
cur->set_mark(NULL); // break off suffix
|
||||
|
|
|
@ -171,19 +171,19 @@ class CMSBitMap VALUE_OBJ_CLASS_SPEC {
|
|||
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
|
||||
class CMSMarkStack: public CHeapObj<mtGC> {
|
||||
//
|
||||
friend class CMSCollector; // to get at expasion stats further below
|
||||
friend class CMSCollector; // To get at expansion stats further below.
|
||||
//
|
||||
|
||||
VirtualSpace _virtual_space; // space for the stack
|
||||
oop* _base; // bottom of stack
|
||||
size_t _index; // one more than last occupied index
|
||||
size_t _capacity; // max #elements
|
||||
Mutex _par_lock; // an advisory lock used in case of parallel access
|
||||
NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
|
||||
VirtualSpace _virtual_space; // Space for the stack
|
||||
oop* _base; // Bottom of stack
|
||||
size_t _index; // One more than last occupied index
|
||||
size_t _capacity; // Max #elements
|
||||
Mutex _par_lock; // An advisory lock used in case of parallel access
|
||||
NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run
|
||||
|
||||
protected:
|
||||
size_t _hit_limit; // we hit max stack size limit
|
||||
size_t _failed_double; // we failed expansion before hitting limit
|
||||
size_t _hit_limit; // We hit max stack size limit
|
||||
size_t _failed_double; // We failed expansion before hitting limit
|
||||
|
||||
public:
|
||||
CMSMarkStack():
|
||||
|
@ -238,7 +238,7 @@ class CMSMarkStack: public CHeapObj<mtGC> {
|
|||
_index = 0;
|
||||
}
|
||||
|
||||
// Expand the stack, typically in response to an overflow condition
|
||||
// Expand the stack, typically in response to an overflow condition.
|
||||
void expand();
|
||||
|
||||
// Compute the least valued stack element.
|
||||
|
@ -250,7 +250,7 @@ class CMSMarkStack: public CHeapObj<mtGC> {
|
|||
return least;
|
||||
}
|
||||
|
||||
// Exposed here to allow stack expansion in || case
|
||||
// Exposed here to allow stack expansion in || case.
|
||||
Mutex* par_lock() { return &_par_lock; }
|
||||
};
|
||||
|
||||
|
@ -557,7 +557,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
// Manipulated with CAS in the parallel/multi-threaded case.
|
||||
oop _overflow_list;
|
||||
// The following array-pair keeps track of mark words
|
||||
// displaced for accomodating overflow list above.
|
||||
// displaced for accommodating overflow list above.
|
||||
// This code will likely be revisited under RFE#4922830.
|
||||
Stack<oop, mtGC> _preserved_oop_stack;
|
||||
Stack<markOop, mtGC> _preserved_mark_stack;
|
||||
|
@ -599,7 +599,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
void verify_after_remark_work_1();
|
||||
void verify_after_remark_work_2();
|
||||
|
||||
// true if any verification flag is on.
|
||||
// True if any verification flag is on.
|
||||
bool _verifying;
|
||||
bool verifying() const { return _verifying; }
|
||||
void set_verifying(bool v) { _verifying = v; }
|
||||
|
@ -611,9 +611,9 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
void set_did_compact(bool v);
|
||||
|
||||
// XXX Move these to CMSStats ??? FIX ME !!!
|
||||
elapsedTimer _inter_sweep_timer; // time between sweeps
|
||||
elapsedTimer _intra_sweep_timer; // time _in_ sweeps
|
||||
// padded decaying average estimates of the above
|
||||
elapsedTimer _inter_sweep_timer; // Time between sweeps
|
||||
elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
|
||||
// Padded decaying average estimates of the above
|
||||
AdaptivePaddedAverage _inter_sweep_estimate;
|
||||
AdaptivePaddedAverage _intra_sweep_estimate;
|
||||
|
||||
|
@ -632,16 +632,16 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
void report_heap_summary(GCWhen::Type when);
|
||||
|
||||
protected:
|
||||
ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
|
||||
MemRegion _span; // span covering above two
|
||||
CardTableRS* _ct; // card table
|
||||
ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
|
||||
MemRegion _span; // Span covering above two
|
||||
CardTableRS* _ct; // Card table
|
||||
|
||||
// CMS marking support structures
|
||||
CMSBitMap _markBitMap;
|
||||
CMSBitMap _modUnionTable;
|
||||
CMSMarkStack _markStack;
|
||||
|
||||
HeapWord* _restart_addr; // in support of marking stack overflow
|
||||
HeapWord* _restart_addr; // In support of marking stack overflow
|
||||
void lower_restart_addr(HeapWord* low);
|
||||
|
||||
// Counters in support of marking stack / work queue overflow handling:
|
||||
|
@ -656,12 +656,12 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
size_t _par_kac_ovflw;
|
||||
NOT_PRODUCT(ssize_t _num_par_pushes;)
|
||||
|
||||
// ("Weak") Reference processing support
|
||||
// ("Weak") Reference processing support.
|
||||
ReferenceProcessor* _ref_processor;
|
||||
CMSIsAliveClosure _is_alive_closure;
|
||||
// keep this textually after _markBitMap and _span; c'tor dependency
|
||||
// Keep this textually after _markBitMap and _span; c'tor dependency.
|
||||
|
||||
ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
|
||||
ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
|
||||
ModUnionClosure _modUnionClosure;
|
||||
ModUnionClosurePar _modUnionClosurePar;
|
||||
|
||||
|
@ -697,7 +697,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
// State related to prologue/epilogue invocation for my generations
|
||||
bool _between_prologue_and_epilogue;
|
||||
|
||||
// Signalling/State related to coordination between fore- and backgroud GC
|
||||
// Signaling/State related to coordination between fore- and background GC
|
||||
// Note: When the baton has been passed from background GC to foreground GC,
|
||||
// _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
|
||||
static bool _foregroundGCIsActive; // true iff foreground collector is active or
|
||||
|
@ -712,13 +712,13 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
int _numYields;
|
||||
size_t _numDirtyCards;
|
||||
size_t _sweep_count;
|
||||
// number of full gc's since the last concurrent gc.
|
||||
// Number of full gc's since the last concurrent gc.
|
||||
uint _full_gcs_since_conc_gc;
|
||||
|
||||
// occupancy used for bootstrapping stats
|
||||
// Occupancy used for bootstrapping stats
|
||||
double _bootstrap_occupancy;
|
||||
|
||||
// timer
|
||||
// Timer
|
||||
elapsedTimer _timer;
|
||||
|
||||
// Timing, allocation and promotion statistics, used for scheduling.
|
||||
|
@ -770,7 +770,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
int no_of_gc_threads);
|
||||
void push_on_overflow_list(oop p);
|
||||
void par_push_on_overflow_list(oop p);
|
||||
// the following is, obviously, not, in general, "MT-stable"
|
||||
// The following is, obviously, not, in general, "MT-stable"
|
||||
bool overflow_list_is_empty() const;
|
||||
|
||||
void preserve_mark_if_necessary(oop p);
|
||||
|
@ -778,24 +778,24 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
void preserve_mark_work(oop p, markOop m);
|
||||
void restore_preserved_marks_if_any();
|
||||
NOT_PRODUCT(bool no_preserved_marks() const;)
|
||||
// in support of testing overflow code
|
||||
// In support of testing overflow code
|
||||
NOT_PRODUCT(int _overflow_counter;)
|
||||
NOT_PRODUCT(bool simulate_overflow();) // sequential
|
||||
NOT_PRODUCT(bool simulate_overflow();) // Sequential
|
||||
NOT_PRODUCT(bool par_simulate_overflow();) // MT version
|
||||
|
||||
// CMS work methods
|
||||
void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
|
||||
void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
|
||||
|
||||
// a return value of false indicates failure due to stack overflow
|
||||
bool markFromRootsWork(bool asynch); // concurrent marking work
|
||||
// A return value of false indicates failure due to stack overflow
|
||||
bool markFromRootsWork(bool asynch); // Concurrent marking work
|
||||
|
||||
public: // FIX ME!!! only for testing
|
||||
bool do_marking_st(bool asynch); // single-threaded marking
|
||||
bool do_marking_mt(bool asynch); // multi-threaded marking
|
||||
bool do_marking_st(bool asynch); // Single-threaded marking
|
||||
bool do_marking_mt(bool asynch); // Multi-threaded marking
|
||||
|
||||
private:
|
||||
|
||||
// concurrent precleaning work
|
||||
// Concurrent precleaning work
|
||||
size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
|
||||
ScanMarkedObjectsAgainCarefullyClosure* cl);
|
||||
size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
|
||||
|
@ -811,26 +811,26 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
// Resets (i.e. clears) the per-thread plab sample vectors
|
||||
void reset_survivor_plab_arrays();
|
||||
|
||||
// final (second) checkpoint work
|
||||
// Final (second) checkpoint work
|
||||
void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
|
||||
bool init_mark_was_synchronous);
|
||||
// work routine for parallel version of remark
|
||||
// Work routine for parallel version of remark
|
||||
void do_remark_parallel();
|
||||
// work routine for non-parallel version of remark
|
||||
// Work routine for non-parallel version of remark
|
||||
void do_remark_non_parallel();
|
||||
// reference processing work routine (during second checkpoint)
|
||||
// Reference processing work routine (during second checkpoint)
|
||||
void refProcessingWork(bool asynch, bool clear_all_soft_refs);
|
||||
|
||||
// concurrent sweeping work
|
||||
// Concurrent sweeping work
|
||||
void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
|
||||
|
||||
// (concurrent) resetting of support data structures
|
||||
// (Concurrent) resetting of support data structures
|
||||
void reset(bool asynch);
|
||||
|
||||
// Clear _expansion_cause fields of constituent generations
|
||||
void clear_expansion_cause();
|
||||
|
||||
// An auxilliary method used to record the ends of
|
||||
// An auxiliary method used to record the ends of
|
||||
// used regions of each generation to limit the extent of sweep
|
||||
void save_sweep_limits();
|
||||
|
||||
|
@ -854,7 +854,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
bool is_external_interruption();
|
||||
void report_concurrent_mode_interruption();
|
||||
|
||||
// If the backgrould GC is active, acquire control from the background
|
||||
// If the background GC is active, acquire control from the background
|
||||
// GC and do the collection.
|
||||
void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
|
||||
|
||||
|
@ -893,7 +893,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
|
||||
ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
|
||||
|
||||
// locking checks
|
||||
// Locking checks
|
||||
NOT_PRODUCT(static bool have_cms_token();)
|
||||
|
||||
// XXXPERM bool should_collect(bool full, size_t size, bool tlab);
|
||||
|
@ -958,7 +958,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
CMSBitMap* markBitMap() { return &_markBitMap; }
|
||||
void directAllocated(HeapWord* start, size_t size);
|
||||
|
||||
// main CMS steps and related support
|
||||
// Main CMS steps and related support
|
||||
void checkpointRootsInitial(bool asynch);
|
||||
bool markFromRoots(bool asynch); // a return value of false indicates failure
|
||||
// due to stack overflow
|
||||
|
@ -977,7 +977,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
// Performance Counter Support
|
||||
CollectorCounters* counters() { return _gc_counters; }
|
||||
|
||||
// timer stuff
|
||||
// Timer stuff
|
||||
void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
|
||||
void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
|
||||
void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
|
||||
|
@ -1014,18 +1014,18 @@ class CMSCollector: public CHeapObj<mtGC> {
|
|||
|
||||
static void print_on_error(outputStream* st);
|
||||
|
||||
// debugging
|
||||
// Debugging
|
||||
void verify();
|
||||
bool verify_after_remark(bool silent = VerifySilently);
|
||||
void verify_ok_to_terminate() const PRODUCT_RETURN;
|
||||
void verify_work_stacks_empty() const PRODUCT_RETURN;
|
||||
void verify_overflow_empty() const PRODUCT_RETURN;
|
||||
|
||||
// convenience methods in support of debugging
|
||||
// Convenience methods in support of debugging
|
||||
static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
|
||||
HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
|
||||
|
||||
// accessors
|
||||
// Accessors
|
||||
CMSMarkStack* verification_mark_stack() { return &_markStack; }
|
||||
CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
|
||||
|
||||
|
@ -1109,7 +1109,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
|
||||
CollectionTypes _debug_collection_type;
|
||||
|
||||
// True if a compactiing collection was done.
|
||||
// True if a compacting collection was done.
|
||||
bool _did_compact;
|
||||
bool did_compact() { return _did_compact; }
|
||||
|
||||
|
@ -1203,7 +1203,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
|
||||
// Support for compaction
|
||||
CompactibleSpace* first_compaction_space() const;
|
||||
// Adjust quantites in the generation affected by
|
||||
// Adjust quantities in the generation affected by
|
||||
// the compaction.
|
||||
void reset_after_compaction();
|
||||
|
||||
|
@ -1301,7 +1301,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
void setNearLargestChunk();
|
||||
bool isNearLargestChunk(HeapWord* addr);
|
||||
|
||||
// Get the chunk at the end of the space. Delagates to
|
||||
// Get the chunk at the end of the space. Delegates to
|
||||
// the space.
|
||||
FreeChunk* find_chunk_at_end();
|
||||
|
||||
|
@ -1422,7 +1422,6 @@ class MarkFromRootsClosure: public BitMapClosure {
|
|||
// marking from the roots following the first checkpoint.
|
||||
// XXX This should really be a subclass of The serial version
|
||||
// above, but i have not had the time to refactor things cleanly.
|
||||
// That willbe done for Dolphin.
|
||||
class Par_MarkFromRootsClosure: public BitMapClosure {
|
||||
CMSCollector* _collector;
|
||||
MemRegion _whole_span;
|
||||
|
@ -1780,7 +1779,7 @@ class SweepClosure: public BlkClosureCareful {
|
|||
void do_already_free_chunk(FreeChunk *fc);
|
||||
// Work method called when processing an already free or a
|
||||
// freshly garbage chunk to do a lookahead and possibly a
|
||||
// premptive flush if crossing over _limit.
|
||||
// preemptive flush if crossing over _limit.
|
||||
void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
|
||||
// Process a garbage chunk during sweeping.
|
||||
size_t do_garbage_chunk(FreeChunk *fc);
|
||||
|
@ -1879,7 +1878,7 @@ class CMSParDrainMarkingStackClosure: public VoidClosure {
|
|||
};
|
||||
|
||||
// Allow yielding or short-circuiting of reference list
|
||||
// prelceaning work.
|
||||
// precleaning work.
|
||||
class CMSPrecleanRefsYieldClosure: public YieldClosure {
|
||||
CMSCollector* _collector;
|
||||
void do_yield_work();
|
||||
|
|
|
@ -197,13 +197,13 @@ inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
|
|||
}
|
||||
|
||||
|
||||
// Return the HeapWord address corrsponding to the next "0" bit
|
||||
// Return the HeapWord address corresponding to the next "0" bit
|
||||
// (inclusive).
|
||||
inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
|
||||
return getNextUnmarkedWordAddress(addr, endWord());
|
||||
}
|
||||
|
||||
// Return the HeapWord address corrsponding to the next "0" bit
|
||||
// Return the HeapWord address corresponding to the next "0" bit
|
||||
// (inclusive).
|
||||
inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
|
||||
HeapWord* start_addr, HeapWord* end_addr) const {
|
||||
|
|
|
@ -164,7 +164,7 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
|
|||
// _pending_yields that holds the sum (of both sync and async requests), and
|
||||
// a second counter _pending_decrements that only holds the async requests,
|
||||
// for greater efficiency, since in a typical CMS run, there are many more
|
||||
// pontential (i.e. static) yield points than there are actual
|
||||
// potential (i.e. static) yield points than there are actual
|
||||
// (i.e. dynamic) yields because of requests, which are few and far between.
|
||||
//
|
||||
// Note that, while "_pending_yields >= _pending_decrements" is an invariant,
|
||||
|
|
|
@ -279,7 +279,7 @@ void PromotionInfo::print_statistics(uint worker_id) const {
|
|||
// When _spoolTail is NULL, then the set of slots with displaced headers
|
||||
// is all those starting at the slot <_spoolHead, _firstIndex> and
|
||||
// going up to the last slot of last block in the linked list.
|
||||
// In this lartter case, _splice_point points to the tail block of
|
||||
// In this latter case, _splice_point points to the tail block of
|
||||
// this linked list of blocks holding displaced headers.
|
||||
void PromotionInfo::verify() const {
|
||||
// Verify the following:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue