8080112: Replace and remove the last usages of CollectedHeap::n_par_threads()

Reviewed-by: jmasa, kbarrett
This commit is contained in:
Stefan Karlsson 2015-05-21 09:35:38 +02:00
parent b77b3ec014
commit 8d0f1a6528
20 changed files with 72 additions and 101 deletions

View file

@ -641,6 +641,7 @@ void CompactibleFreeListSpace::set_end(HeapWord* value) {
class FreeListSpace_DCTOC : public Filtering_DCTOC {
CompactibleFreeListSpace* _cfls;
CMSCollector* _collector;
bool _parallel;
protected:
// Override.
#define walk_mem_region_with_cl_DECL(ClosureType) \
@ -661,9 +662,10 @@ public:
CMSCollector* collector,
ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) :
HeapWord* boundary,
bool parallel) :
Filtering_DCTOC(sp, cl, precision, boundary),
_cfls(sp), _collector(collector) {}
_cfls(sp), _collector(collector), _parallel(parallel) {}
};
// We de-virtualize the block-related calls below, since we know that our
@ -674,10 +676,7 @@ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,
HeapWord* bottom, \
HeapWord* top, \
ClosureType* cl) { \
bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \
if (is_par) { \
assert(GenCollectedHeap::heap()->n_par_threads() == \
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
if (_parallel) { \
walk_mem_region_with_cl_par(mr, bottom, top, cl); \
} else { \
walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
@ -747,8 +746,9 @@ FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure*
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) {
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
HeapWord* boundary,
bool parallel) {
return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
}
@ -1897,11 +1897,9 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
assert(chunk->is_free() && ffc->is_free(), "Error");
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
if (rem_sz < SmallForDictionary) {
bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
// The freeList lock is held, but multiple GC task threads might be executing in parallel.
bool is_par = Thread::current()->is_GC_task_thread();
if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
assert(!is_par ||
(GenCollectedHeap::heap()->n_par_threads() ==
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
returnChunkToFreeList(ffc);
split(size, rem_sz);
if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
@ -1972,8 +1970,6 @@ void CompactibleFreeListSpace::save_marks() {
bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
assert(_promoInfo.tracking(), "No preceding save_marks?");
assert(GenCollectedHeap::heap()->n_par_threads() == 0,
"Shouldn't be called if using parallel gc.");
return _promoInfo.noPromotions();
}
@ -1981,8 +1977,6 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
\
void CompactibleFreeListSpace:: \
oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
assert(GenCollectedHeap::heap()->n_par_threads() == 0, \
"Shouldn't be called (yet) during parallel part of gc."); \
_promoInfo.promoted_oops_iterate##nv_suffix(blk); \
/* \
* This also restores any displaced headers and removes the elements from \

View file

@ -438,7 +438,8 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Override: provides a DCTO_CL specific to this kind of space.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary);
HeapWord* boundary,
bool parallel);
void blk_iterate(BlkClosure* cl);
void blk_iterate_careful(BlkClosureCareful* cl);

View file

@ -39,16 +39,11 @@
void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
int n_threads) {
assert(n_threads > 0, "Error: expected n_threads > 0");
assert((n_threads == 1 && ParallelGCThreads == 0) ||
n_threads <= (int)ParallelGCThreads,
"# worker threads != # requested!");
assert(!Thread::current()->is_VM_thread() || (n_threads == 1), "There is only 1 VM thread");
assert(UseDynamicNumberOfGCThreads ||
!FLAG_IS_DEFAULT(ParallelGCThreads) ||
n_threads == (int)ParallelGCThreads,
"# worker threads != # requested!");
uint n_threads) {
assert(n_threads > 0, "expected n_threads > 0");
assert(n_threads <= ParallelGCThreads,
err_msg("n_threads: %u > ParallelGCThreads: " UINTX_FORMAT, n_threads, ParallelGCThreads));
// Make sure the LNC array is valid for the space.
jbyte** lowest_non_clean;
uintptr_t lowest_non_clean_base_chunk_index;
@ -66,7 +61,8 @@ void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegio
uint stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides, cl, ct,
process_stride(sp, mr, stride, n_strides,
cl, ct,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
@ -132,9 +128,13 @@ process_stride(Space* sp,
assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
// This function is used by the parallel card table iteration.
const bool parallel = true;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
cl->gen_boundary());
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
cl->gen_boundary(),
parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
// Process the chunk.

View file

@ -1938,15 +1938,11 @@ void ConcurrentMark::cleanup() {
HeapRegionRemSet::reset_for_cleanup_tasks();
uint n_workers;
// Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
g1h->set_par_threads();
n_workers = g1h->n_par_threads();
assert(g1h->n_par_threads() == n_workers,
"Should not have been reset");
uint n_workers = _g1h->workers()->active_workers();
g1h->workers()->run_task(&g1_par_count_task);
// Done with the parallel phase so reset to 0.
g1h->set_par_threads(0);

View file

@ -455,7 +455,7 @@ void DefNewGeneration::compute_new_size() {
}
}
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
assert(false, "NYI -- are you sure you want to call this?");
}

View file

@ -255,7 +255,7 @@ protected:
// Iteration
void object_iterate(ObjectClosure* blk);
void younger_refs_iterate(OopsInGenClosure* cl);
void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads);
void space_iterate(SpaceClosure* blk, bool usedOnly = false);

View file

@ -353,8 +353,8 @@ void CardGeneration::space_iterate(SpaceClosure* blk,
blk->do_space(space());
}
void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk, uint n_threads) {
blk->set_generation(this);
younger_refs_in_space_iterate(space(), blk);
younger_refs_in_space_iterate(space(), blk, n_threads);
blk->reset_generation();
}

View file

@ -89,7 +89,7 @@ class CardGeneration: public Generation {
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
void younger_refs_iterate(OopsInGenClosure* blk);
void younger_refs_iterate(OopsInGenClosure* blk, uint n_threads);
bool is_in(const void* p) const;

View file

@ -440,31 +440,11 @@ void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool relea
void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct) {
CardTableRS* ct,
uint n_threads) {
if (!mr.is_empty()) {
// Caller (process_roots()) claims that all GC threads
// execute this call. With UseDynamicNumberOfGCThreads now all
// active GC threads execute this call. The number of active GC
// threads needs to be passed to par_non_clean_card_iterate_work()
// to get proper partitioning and termination.
//
// This is an example of where n_par_threads() is used instead
// of workers()->active_workers(). n_par_threads can be set to 0 to
// turn off parallelism. For example when this code is called as
// part of verification during root processing then n_par_threads()
// may have been set to 0. active_workers is not overloaded with
// the meaning that it is a switch to disable parallelism and so keeps
// the meaning of the number of active gc workers. If parallelism has
// not been shut off by setting n_par_threads to 0, then n_par_threads
// should be equal to active_workers. When a different mechanism for
// shutting off parallelism is used, then active_workers can be used in
// place of n_par_threads.
int n_threads = GenCollectedHeap::heap()->n_par_threads();
bool is_par = n_threads > 0;
if (is_par) {
if (n_threads > 0) {
#if INCLUDE_ALL_GCS
assert(GenCollectedHeap::heap()->n_par_threads() ==
GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch");
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
#else // INCLUDE_ALL_GCS
fatal("Parallel gc not supported here.");
@ -472,8 +452,11 @@ void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
} else {
// clear_cl finds contiguous dirty ranges of cards to process and clear.
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary());
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
// This is the single-threaded version.
const bool parallel = false;
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
clear_cl.do_MemRegion(mr);
}

View file

@ -178,14 +178,15 @@ class CardTableModRefBS: public ModRefBarrierSet {
// region mr in the given space and apply cl to any dirty sub-regions
// of mr. Clears the dirty cards as they are processed.
void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct);
OopsInGenClosure* cl, CardTableRS* ct,
uint n_threads);
private:
// Work method used to implement non_clean_card_iterate_possibly_parallel()
// above in the parallel case.
void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl, CardTableRS* ct,
int n_threads);
uint n_threads);
protected:
// Dirty the bytes corresponding to "mr" (not all of which must be

View file

@ -102,9 +102,10 @@ void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
}
void CardTableRS::younger_refs_iterate(Generation* g,
OopsInGenClosure* blk) {
OopsInGenClosure* blk,
uint n_threads) {
_last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
g->younger_refs_iterate(blk);
g->younger_refs_iterate(blk, n_threads);
}
inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
@ -164,15 +165,8 @@ inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
}
ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) :
_dirty_card_closure(dirty_card_closure), _ct(ct) {
// Cannot yet substitute active_workers for n_par_threads
// in the case where parallelism is being turned off by
// setting n_par_threads to 0.
_is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
assert(!_is_par ||
(GenCollectedHeap::heap()->n_par_threads() ==
GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) :
_dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
}
bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
@ -272,7 +266,8 @@ void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
}
void CardTableRS::younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) {
OopsInGenClosure* cl,
uint n_threads) {
const MemRegion urasm = sp->used_region_at_save_marks();
#ifdef ASSERT
// Convert the assertion check to a warning if we are running
@ -301,7 +296,7 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
ShouldNotReachHere();
}
#endif
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
_ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
}
void CardTableRS::clear_into_younger(Generation* old_gen) {

View file

@ -56,7 +56,7 @@ class CardTableRS: public GenRemSet {
CardTableModRefBSForCTRS* _ct_bs;
virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
void verify_space(Space* s, HeapWord* gen_start);
@ -116,7 +116,7 @@ public:
// Card table entries are cleared before application; "blk" is
// responsible for dirtying if the oop is still older-to-younger after
// closure application.
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk);
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
void inline_write_ref_field_gc(void* field, oop new_val) {
jbyte* byte = _ct_bs->byte_for(field);
@ -183,7 +183,7 @@ private:
bool is_word_aligned(jbyte* entry);
public:
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct);
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par);
void do_MemRegion(MemRegion mr);
};

View file

@ -160,8 +160,7 @@ void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
// Memory state functions.
CollectedHeap::CollectedHeap() : _n_par_threads(0)
{
CollectedHeap::CollectedHeap() {
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
const size_t elements_per_word = HeapWordSize / sizeof(jint);
_filler_array_max_size = align_object_size(filler_array_hdr_size() +

View file

@ -101,7 +101,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
protected:
BarrierSet* _barrier_set;
bool _is_gc_active;
uint _n_par_threads;
unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started
@ -291,11 +290,8 @@ class CollectedHeap : public CHeapObj<mtInternal> {
}
GCCause::Cause gc_cause() { return _gc_cause; }
// Number of threads currently working on GC tasks.
uint n_par_threads() { return _n_par_threads; }
// May be overridden to set additional parallelism.
virtual void set_par_threads(uint t) { _n_par_threads = t; };
virtual void set_par_threads(uint t) { (void)t; };
// General obj/array allocation facilities.
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);

View file

@ -700,7 +700,7 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope,
// older-gen scanning.
if (level == 0) {
older_gens->set_generation(_old_gen);
rem_set()->younger_refs_iterate(_old_gen, older_gens);
rem_set()->younger_refs_iterate(_old_gen, older_gens, scope->n_threads());
older_gens->reset_generation();
}

View file

@ -77,10 +77,11 @@ public:
// 1) that are in objects allocated in "g" at the time of the last call
// to "save_Marks", and
// 2) that point to objects in younger generations.
virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk) = 0;
virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads) = 0;
virtual void younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) = 0;
OopsInGenClosure* cl,
uint n_threads) = 0;
// This method is used to notify the remembered set that "new_val" has
// been written into "field" by the garbage collector.

View file

@ -293,9 +293,10 @@ void Generation::oop_iterate(ExtendedOopClosure* cl) {
}
void Generation::younger_refs_in_space_iterate(Space* sp,
OopsInGenClosure* cl) {
OopsInGenClosure* cl,
uint n_threads) {
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
rs->younger_refs_in_space_iterate(sp, cl);
rs->younger_refs_in_space_iterate(sp, cl, n_threads);
}
class GenerationObjIterateClosure : public SpaceClosure {

View file

@ -122,7 +122,7 @@ class Generation: public CHeapObj<mtGC> {
// The iteration is only over objects allocated at the start of the
// iterations; objects allocated as a result of applying the closure are
// not included.
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl);
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
public:
// The set of possible generation kinds.
@ -526,7 +526,7 @@ class Generation: public CHeapObj<mtGC> {
// in the current generation that contain pointers to objects in younger
// generations. Objects allocated since the last "save_marks" call are
// excluded.
virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0;
virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0;
// Inform a generation that it longer contains references to objects
// in any younger generation. [e.g. Because younger gens are empty,

View file

@ -181,7 +181,8 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) {
HeapWord* boundary,
bool parallel) {
return new DirtyCardToOopClosure(this, cl, precision, boundary);
}
@ -260,7 +261,8 @@ ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure*
ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) {
HeapWord* boundary,
bool parallel) {
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
}

View file

@ -183,7 +183,8 @@ class Space: public CHeapObj<mtGC> {
// operate. ResourceArea allocated.
virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary = NULL);
HeapWord* boundary,
bool parallel);
// If "p" is in the space, returns the address of the start of the
// "block" that contains "p". We say "block" instead of "object" since
@ -629,7 +630,8 @@ class ContiguousSpace: public CompactibleSpace {
// Override.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary = NULL);
HeapWord* boundary,
bool parallel);
// Apply "blk->do_oop" to the addresses of all reference fields in objects
// starting with the _saved_mark_word, which was noted during a generation's