8080112: Replace and remove the last usages of CollectedHeap::n_par_threads()

Reviewed-by: jmasa, kbarrett
This commit is contained in:
Stefan Karlsson 2015-05-21 09:35:38 +02:00
parent b77b3ec014
commit 8d0f1a6528
20 changed files with 72 additions and 101 deletions

View file

@ -641,6 +641,7 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
class FreeListSpace_DCTOC : public Filtering_DCTOC { class FreeListSpace_DCTOC : public Filtering_DCTOC {
CompactibleFreeListSpace* _cfls; CompactibleFreeListSpace* _cfls;
CMSCollector* _collector; CMSCollector* _collector;
bool _parallel;
protected: protected:
// Override. // Override.
#define walk_mem_region_with_cl_DECL(ClosureType) \ #define walk_mem_region_with_cl_DECL(ClosureType) \
@ -661,9 +662,10 @@ public:
CMSCollector* collector, CMSCollector* collector,
ExtendedOopClosure* cl, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) : HeapWord* boundary,
bool parallel) :
Filtering_DCTOC(sp, cl, precision, boundary), Filtering_DCTOC(sp, cl, precision, boundary),
_cfls(sp), _collector(collector) {} _cfls(sp), _collector(collector), _parallel(parallel) {}
}; };
// We de-virtualize the block-related calls below, since we know that our // We de-virtualize the block-related calls below, since we know that our
@ -674,10 +676,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
HeapWord* bottom, \ HeapWord* bottom, \
HeapWord* top, \ HeapWord* top, \
ClosureType* cl) { \ ClosureType* cl) { \
bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \ if (_parallel) { \
if (is_par) { \
assert(GenCollectedHeap::heap()->n_par_threads() == \
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
walk_mem_region_with_cl_par(mr, bottom, top, cl); \ walk_mem_region_with_cl_par(mr, bottom, top, cl); \
} else { \ } else { \
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
@ -747,8 +746,9 @@ FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure* DirtyCardToOopClosure*
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) { HeapWord* boundary,
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); bool parallel) {
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
} }
@ -1897,11 +1897,9 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
assert(chunk->is_free() && ffc->is_free(), "Error"); assert(chunk->is_free() && ffc->is_free(), "Error");
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size); _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
if (rem_sz < SmallForDictionary) { if (rem_sz < SmallForDictionary) {
bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0); // The freeList lock is held, but multiple GC task threads might be executing in parallel.
bool is_par = Thread::current()->is_GC_task_thread();
if (is_par) _indexedFreeListParLocks[rem_sz]->lock(); if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
assert(!is_par ||
(GenCollectedHeap::heap()->n_par_threads() ==
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
returnChunkToFreeList(ffc); returnChunkToFreeList(ffc);
split(size, rem_sz); split(size, rem_sz);
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock(); if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
@ -1972,8 +1970,6 @@ void CompactibleFreeListSpace::save_marks() {
bool CompactibleFreeListSpace::no_allocs_since_save_marks() { bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
assert(_promoInfo.tracking(), "No preceding save_marks?"); assert(_promoInfo.tracking(), "No preceding save_marks?");
assert(GenCollectedHeap::heap()->n_par_threads() == 0,
"Shouldn't be called if using parallel gc.");
return _promoInfo.noPromotions(); return _promoInfo.noPromotions();
} }
@ -1981,8 +1977,6 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
\ \
void CompactibleFreeListSpace:: \ void CompactibleFreeListSpace:: \
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
assert(GenCollectedHeap::heap()->n_par_threads() == 0, \
"Shouldn't be called (yet) during parallel part of gc."); \
_promoInfo.promoted_oops_iterate##nv_suffix(blk); \ _promoInfo.promoted_oops_iterate##nv_suffix(blk); \
/* \ /* \
* This also restores any displaced headers and removes the elements from \ * This also restores any displaced headers and removes the elements from \

View file

@ -438,7 +438,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Override: provides a DCTO_CL specific to this kind of space. // Override: provides a DCTO_CL specific to this kind of space.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary); HeapWord* boundary,
bool parallel);
void blk_iterate(BlkClosure* cl); void blk_iterate(BlkClosure* cl);
void blk_iterate_careful(BlkClosureCareful* cl); void blk_iterate_careful(BlkClosureCareful* cl);

View file

@ -39,16 +39,11 @@
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl, OopsInGenClosure* cl,
CardTableRS* ct, CardTableRS* ct,
int n_threads) { uint n_threads) {
assert(n_threads > 0, "Error: expected n_threads > 0"); assert(n_threads > 0, "expected n_threads > 0");
assert((n_threads == 1 && ParallelGCThreads == 0) || assert(n_threads <= ParallelGCThreads,
n_threads <= (int)ParallelGCThreads, err_msg("n_threads: %u > ParallelGCThreads: " UINTX_FORMAT, n_threads, ParallelGCThreads));
"# worker threads != # requested!");
assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
assert(UseDynamicNumberOfGCThreads ||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
n_threads == (int)ParallelGCThreads,
"# worker threads != # requested!");
// Make sure the LNC array is valid for the space. // Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean; jbyte** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index; uintptr_t lowest_non_clean_base_chunk_index;
@ -66,7 +61,8 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
uint stride = 0; uint stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) { while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides, cl, ct, process_stride(sp, mr, stride, n_strides,
cl, ct,
lowest_non_clean, lowest_non_clean,
lowest_non_clean_base_chunk_index, lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size); lowest_non_clean_chunk_size);
@ -132,9 +128,13 @@ process_stride(Space* sp,
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
// This function is used by the parallel card table iteration.
const bool parallel = true;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
cl->gen_boundary()); cl->gen_boundary(),
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
// Process the chunk. // Process the chunk.

View file

@ -1938,15 +1938,11 @@ void ConcurrentMark::cleanup() {
HeapRegionRemSet::reset_for_cleanup_tasks(); HeapRegionRemSet::reset_for_cleanup_tasks();
uint n_workers;
// Do counting once more with the world stopped for good measure. // Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm); G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
g1h->set_par_threads(); g1h->set_par_threads();
n_workers = g1h->n_par_threads(); uint n_workers = _g1h->workers()->active_workers();
assert(g1h->n_par_threads() == n_workers,
"Should not have been reset");
g1h->workers()->run_task(&g1_par_count_task); g1h->workers()->run_task(&g1_par_count_task);
// Done with the parallel phase so reset to 0. // Done with the parallel phase so reset to 0.
g1h->set_par_threads(0); g1h->set_par_threads(0);

View file

@ -455,7 +455,7 @@ void DefNewGeneration::compute_new_size() {
} }
} }
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
assert(false, "NYI -- are you sure you want to call this?"); assert(false, "NYI -- are you sure you want to call this?");
} }

View file

@ -255,7 +255,7 @@ protected:
// Iteration // Iteration
void object_iterate(ObjectClosure* blk); void object_iterate(ObjectClosure* blk);
void younger_refs_iterate(OopsInGenClosure* cl); void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads);
void space_iterate(SpaceClosure* blk, bool usedOnly = false); void space_iterate(SpaceClosure* blk, bool usedOnly = false);

View file

@ -353,8 +353,8 @@ void CardGeneration::space_iterate(SpaceClosure* blk,
blk->do_space(space()); blk->do_space(space());
} }
void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk, uint n_threads) {
blk->set_generation(this); blk->set_generation(this);
younger_refs_in_space_iterate(space(), blk); younger_refs_in_space_iterate(space(), blk, n_threads);
blk->reset_generation(); blk->reset_generation();
} }

View file

@ -89,7 +89,7 @@ class CardGeneration: public Generation {
void space_iterate(SpaceClosure* blk, bool usedOnly = false); void space_iterate(SpaceClosure* blk, bool usedOnly = false);
void younger_refs_iterate(OopsInGenClosure* blk); void younger_refs_iterate(OopsInGenClosure* blk, uint n_threads);
bool is_in(const void* p) const; bool is_in(const void* p) const;

View file

@ -440,31 +440,11 @@ void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool relea
void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
MemRegion mr, MemRegion mr,
OopsInGenClosure* cl, OopsInGenClosure* cl,
CardTableRS* ct) { CardTableRS* ct,
uint n_threads) {
if (!mr.is_empty()) { if (!mr.is_empty()) {
// Caller (process_roots()) claims that all GC threads if (n_threads > 0) {
// execute this call. With UseDynamicNumberOfGCThreads now all
// active GC threads execute this call. The number of active GC
// threads needs to be passed to par_non_clean_card_iterate_work()
// to get proper partitioning and termination.
//
// This is an example of where n_par_threads() is used instead
// of workers()->active_workers(). n_par_threads can be set to 0 to
// turn off parallelism. For example when this code is called as
// part of verification during root processing then n_par_threads()
// may have been set to 0. active_workers is not overloaded with
// the meaning that it is a switch to disable parallelism and so keeps
// the meaning of the number of active gc workers. If parallelism has
// not been shut off by setting n_par_threads to 0, then n_par_threads
// should be equal to active_workers. When a different mechanism for
// shutting off parallelism is used, then active_workers can be used in
// place of n_par_threads.
int n_threads = GenCollectedHeap::heap()->n_par_threads();
bool is_par = n_threads > 0;
if (is_par) {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
assert(GenCollectedHeap::heap()->n_par_threads() ==
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // INCLUDE_ALL_GCS #else // INCLUDE_ALL_GCS
fatal("Parallel gc not supported here."); fatal("Parallel gc not supported here.");
@ -472,8 +452,11 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
} else { } else {
// clear_cl finds contiguous dirty ranges of cards to process and clear. // clear_cl finds contiguous dirty ranges of cards to process and clear.
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary()); // This is the single-threaded version.
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); const bool parallel = false;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
clear_cl.do_MemRegion(mr); clear_cl.do_MemRegion(mr);
} }

View file

@ -178,14 +178,15 @@ class CardTableModRefBS: public ModRefBarrierSet {
// region mr in the given space and apply cl to any dirty sub-regions // region mr in the given space and apply cl to any dirty sub-regions
// of mr. Clears the dirty cards as they are processed. // of mr. Clears the dirty cards as they are processed.
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr, void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct); OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
private: private:
// Work method used to implement non_clean_card_iterate_possibly_parallel() // Work method used to implement non_clean_card_iterate_possibly_parallel()
// above in the parallel case. // above in the parallel case.
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct, OopsInGenClosure* cl, CardTableRS* ct,
int n_threads); uint n_threads);
protected: protected:
// Dirty the bytes corresponding to "mr" (not all of which must be // Dirty the bytes corresponding to "mr" (not all of which must be

View file

@ -102,9 +102,10 @@ void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
} }
void CardTableRS::younger_refs_iterate(Generation* g, void CardTableRS::younger_refs_iterate(Generation* g,
OopsInGenClosure* blk) { OopsInGenClosure* blk,
uint n_threads) {
_last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val(); _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
g->younger_refs_iterate(blk); g->younger_refs_iterate(blk, n_threads);
} }
inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
@ -164,15 +165,8 @@ inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
} }
ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) : DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) :
_dirty_card_closure(dirty_card_closure), _ct(ct) { _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
// Cannot yet substitute active_workers for n_par_threads
// in the case where parallelism is being turned off by
// setting n_par_threads to 0.
_is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
assert(!_is_par ||
(GenCollectedHeap::heap()->n_par_threads() ==
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
} }
bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
@ -272,7 +266,8 @@ void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
} }
void CardTableRS::younger_refs_in_space_iterate(Space* sp, void CardTableRS::younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) { OopsInGenClosure* cl,
uint n_threads) {
const MemRegion urasm = sp->used_region_at_save_marks(); const MemRegion urasm = sp->used_region_at_save_marks();
#ifdef ASSERT #ifdef ASSERT
// Convert the assertion check to a warning if we are running // Convert the assertion check to a warning if we are running
@ -301,7 +296,7 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
ShouldNotReachHere(); ShouldNotReachHere();
} }
#endif #endif
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this); _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
} }
void CardTableRS::clear_into_younger(Generation* old_gen) { void CardTableRS::clear_into_younger(Generation* old_gen) {

View file

@ -56,7 +56,7 @@ class CardTableRS: public GenRemSet {
CardTableModRefBSForCTRS* _ct_bs; CardTableModRefBSForCTRS* _ct_bs;
virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
void verify_space(Space* s, HeapWord* gen_start); void verify_space(Space* s, HeapWord* gen_start);
@ -116,7 +116,7 @@ public:
// Card table entries are cleared before application; "blk" is // Card table entries are cleared before application; "blk" is
// responsible for dirtying if the oop is still older-to-younger after // responsible for dirtying if the oop is still older-to-younger after
// closure application. // closure application.
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk); void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
void inline_write_ref_field_gc(void* field, oop new_val) { void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = _ct_bs->byte_for(field); jbyte* byte = _ct_bs->byte_for(field);
@ -183,7 +183,7 @@ private:
bool is_word_aligned(jbyte* entry); bool is_word_aligned(jbyte* entry);
public: public:
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct); ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par);
void do_MemRegion(MemRegion mr); void do_MemRegion(MemRegion mr);
}; };

View file

@ -160,8 +160,7 @@ void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
// Memory state functions. // Memory state functions.
CollectedHeap::CollectedHeap() : _n_par_threads(0) CollectedHeap::CollectedHeap() {
{
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
const size_t elements_per_word = HeapWordSize / sizeof(jint); const size_t elements_per_word = HeapWordSize / sizeof(jint);
_filler_array_max_size = align_object_size(filler_array_hdr_size() + _filler_array_max_size = align_object_size(filler_array_hdr_size() +

View file

@ -101,7 +101,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
protected: protected:
BarrierSet* _barrier_set; BarrierSet* _barrier_set;
bool _is_gc_active; bool _is_gc_active;
uint _n_par_threads;
unsigned int _total_collections; // ... started unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started unsigned int _total_full_collections; // ... started
@ -291,11 +290,8 @@ class CollectedHeap : public CHeapObj<mtInternal> {
} }
GCCause::Cause gc_cause() { return _gc_cause; } GCCause::Cause gc_cause() { return _gc_cause; }
// Number of threads currently working on GC tasks.
uint n_par_threads() { return _n_par_threads; }
// May be overridden to set additional parallelism. // May be overridden to set additional parallelism.
virtual void set_par_threads(uint t) { _n_par_threads = t; }; virtual void set_par_threads(uint t) { (void)t; };
// General obj/array allocation facilities. // General obj/array allocation facilities.
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);

View file

@ -700,7 +700,7 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
// older-gen scanning. // older-gen scanning.
if (level == 0) { if (level == 0) {
older_gens->set_generation(_old_gen); older_gens->set_generation(_old_gen);
rem_set()->younger_refs_iterate(_old_gen, older_gens); rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
older_gens->reset_generation(); older_gens->reset_generation();
} }

View file

@ -77,10 +77,11 @@ public:
// 1) that are in objects allocated in "g" at the time of the last call // 1) that are in objects allocated in "g" at the time of the last call
// to "save_Marks", and // to "save_Marks", and
// 2) that point to objects in younger generations. // 2) that point to objects in younger generations.
virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk) = 0; virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads) = 0;
virtual void younger_refs_in_space_iterate(Space* sp, virtual void younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) = 0; OopsInGenClosure* cl,
uint n_threads) = 0;
// This method is used to notify the remembered set that "new_val" has // This method is used to notify the remembered set that "new_val" has
// been written into "field" by the garbage collector. // been written into "field" by the garbage collector.

View file

@ -293,9 +293,10 @@ void Generation::oop_iterate(ExtendedOopClosure* cl) {
} }
void Generation::younger_refs_in_space_iterate(Space* sp, void Generation::younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) { OopsInGenClosure* cl,
uint n_threads) {
GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
rs->younger_refs_in_space_iterate(sp, cl); rs->younger_refs_in_space_iterate(sp, cl, n_threads);
} }
class GenerationObjIterateClosure : public SpaceClosure { class GenerationObjIterateClosure : public SpaceClosure {

View file

@ -122,7 +122,7 @@ class Generation: public CHeapObj<mtGC> {
// The iteration is only over objects allocated at the start of the // The iteration is only over objects allocated at the start of the
// iterations; objects allocated as a result of applying the closure are // iterations; objects allocated as a result of applying the closure are
// not included. // not included.
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
public: public:
// The set of possible generation kinds. // The set of possible generation kinds.
@ -526,7 +526,7 @@ class Generation: public CHeapObj<mtGC> {
// in the current generation that contain pointers to objects in younger // in the current generation that contain pointers to objects in younger
// generations. Objects allocated since the last "save_marks" call are // generations. Objects allocated since the last "save_marks" call are
// excluded. // excluded.
virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0;
// Inform a generation that it longer contains references to objects // Inform a generation that it longer contains references to objects
// in any younger generation. [e.g. Because younger gens are empty, // in any younger generation. [e.g. Because younger gens are empty,

View file

@ -181,7 +181,8 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) { HeapWord* boundary,
bool parallel) {
return new DirtyCardToOopClosure(this, cl, precision, boundary); return new DirtyCardToOopClosure(this, cl, precision, boundary);
} }
@ -260,7 +261,8 @@ ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure* DirtyCardToOopClosure*
ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) { HeapWord* boundary,
bool parallel) {
return new ContiguousSpaceDCTOC(this, cl, precision, boundary); return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
} }

View file

@ -183,7 +183,8 @@ class Space: public CHeapObj<mtGC> {
// operate. ResourceArea allocated. // operate. ResourceArea allocated.
virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary = NULL); HeapWord* boundary,
bool parallel);
// If "p" is in the space, returns the address of the start of the // If "p" is in the space, returns the address of the start of the
// "block" that contains "p". We say "block" instead of "object" since // "block" that contains "p". We say "block" instead of "object" since
@ -629,7 +630,8 @@ class ContiguousSpace: public CompactibleSpace {
// Override. // Override.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision, CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary = NULL); HeapWord* boundary,
bool parallel);
// Apply "blk->do_oop" to the addresses of all reference fields in objects // Apply "blk->do_oop" to the addresses of all reference fields in objects
// starting with the _saved_mark_word, which was noted during a generation's // starting with the _saved_mark_word, which was noted during a generation's