8079275: Remove CollectedHeap::use_parallel_gc_threads

Reviewed-by: sjohanss, kbarrett
This commit is contained in:
Stefan Karlsson 2015-05-04 15:02:37 +02:00
parent 00f9d96a8b
commit 7ede39f590
5 changed files with 9 additions and 36 deletions

View file

@ -1082,17 +1082,6 @@ size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
FreeChunk* fc = (FreeChunk*)p; FreeChunk* fc = (FreeChunk*)p;
assert(is_in_reserved(p), "Should be in space"); assert(is_in_reserved(p), "Should be in space");
// When doing a mark-sweep-compact of the CMS generation, this
// assertion may fail because prepare_for_compaction() uses
// space that is garbage to maintain information on ranges of
// live objects so that these live ranges can be moved as a whole.
// Comment out this assertion until that problem can be solved
// (i.e., that the block start calculation may look at objects
// at address below "p" in finding the object that contains "p"
// and those objects (if garbage) may have been modified to hold
// live range information.
// assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
// "Should be a block boundary");
if (FreeChunk::indicatesFreeChunk(p)) return false; if (FreeChunk::indicatesFreeChunk(p)) return false;
Klass* k = oop(p)->klass_or_null(); Klass* k = oop(p)->klass_or_null();
if (k != NULL) { if (k != NULL) {

View file

@ -225,16 +225,12 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
"Offset of FreeChunk::_prev within FreeChunk must match" "Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc"); " that of OopDesc::_klass within OopDesc");
) )
if (CollectedHeap::use_parallel_gc_threads()) {
typedef CMSParGCThreadState* CMSParGCThreadStatePtr; _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
_par_gc_thread_states = for (uint i = 0; i < ParallelGCThreads; i++) {
NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC); _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
for (uint i = 0; i < ParallelGCThreads; i++) {
_par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
}
} else {
_par_gc_thread_states = NULL;
} }
_incremental_collection_failed = false; _incremental_collection_failed = false;
// The "dilatation_factor" is the expansion that can occur on // The "dilatation_factor" is the expansion that can occur on
// account of the fact that the minimum object size in the CMS // account of the fact that the minimum object size in the CMS
@ -460,7 +456,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
_modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize), _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
-1 /* lock-free */, "No_lock" /* dummy */), -1 /* lock-free */, "No_lock" /* dummy */),
_modUnionClosure(&_modUnionTable),
_modUnionClosurePar(&_modUnionTable), _modUnionClosurePar(&_modUnionTable),
// Adjust my span to cover old (cms) gen // Adjust my span to cover old (cms) gen
_span(cmsGen->reserved()), _span(cmsGen->reserved()),
@ -2130,10 +2125,7 @@ void CMSCollector::gc_prologue(bool full) {
bool registerClosure = duringMarking; bool registerClosure = duringMarking;
ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ? _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
&_modUnionClosurePar
: &_modUnionClosure;
_cmsGen->gc_prologue_work(full, registerClosure, muc);
if (!full) { if (!full) {
stats().record_gc0_begin(); stats().record_gc0_begin();
@ -3006,7 +2998,7 @@ void CMSCollector::checkpointRootsInitialWork() {
{ {
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) { if (CMSParallelInitialMarkEnabled) {
// The parallel version. // The parallel version.
FlexibleWorkGang* workers = gch->workers(); FlexibleWorkGang* workers = gch->workers();
assert(workers != NULL, "Need parallel worker threads."); assert(workers != NULL, "Need parallel worker threads.");
@ -4348,7 +4340,7 @@ void CMSCollector::checkpointRootsFinalWork() {
// dirtied since the first checkpoint in this GC cycle and prior to // dirtied since the first checkpoint in this GC cycle and prior to
// the most recent young generation GC, minus those cleaned up by the // the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning. // concurrent precleaning.
if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { if (CMSParallelRemarkEnabled) {
GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
do_remark_parallel(); do_remark_parallel();
} else { } else {

View file

@ -647,7 +647,6 @@ class CMSCollector: public CHeapObj<mtGC> {
// Keep this textually after _markBitMap and _span; c'tor dependency. // Keep this textually after _markBitMap and _span; c'tor dependency.
ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
ModUnionClosure _modUnionClosure;
ModUnionClosurePar _modUnionClosurePar; ModUnionClosurePar _modUnionClosurePar;
// CMS abstract state machine // CMS abstract state machine

View file

@ -598,13 +598,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
#endif #endif
public: public:
// This is a convenience method that is used in cases where
// the actual number of GC worker threads is not pertinent but
// only whether there more than 0. Use of this method helps
// reduce the occurrence of ParallelGCThreads to uses where the
// actual number may be germane.
static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; }
// Copy the current allocation context statistics for the specified contexts. // Copy the current allocation context statistics for the specified contexts.
// For each context in contexts, set the corresponding entries in the totals // For each context in contexts, set the corresponding entries in the totals
// and accuracy arrays to the current values held by the statistics. Each // and accuracy arrays to the current values held by the statistics. Each

View file

@ -640,7 +640,7 @@ void GenCollectedHeap::process_roots(bool activate_scope,
// All threads execute the following. A specific chunk of buckets // All threads execute the following. A specific chunk of buckets
// from the StringTable are the individual tasks. // from the StringTable are the individual tasks.
if (weak_roots != NULL) { if (weak_roots != NULL) {
if (CollectedHeap::use_parallel_gc_threads()) { if (is_par) {
StringTable::possibly_parallel_oops_do(weak_roots); StringTable::possibly_parallel_oops_do(weak_roots);
} else { } else {
StringTable::oops_do(weak_roots); StringTable::oops_do(weak_roots);