This commit is contained in:
John Coomes 2011-03-18 09:03:43 -07:00
commit 0317c7c485
39 changed files with 675 additions and 711 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,6 +51,7 @@ define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
define_pd_global(intx, InlineSmallCode, 1500);
#ifdef _LP64
// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, ThreadStackSize, 1024);
@ -71,4 +72,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
#endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,4 +72,6 @@ define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);
// GC Ergo Flags
define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
#endif // CPU_X86_VM_GLOBALS_X86_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -292,13 +292,15 @@ void ConcurrentMarkSweepGeneration::ref_processor_init() {
void CMSCollector::ref_processor_init() {
if (_ref_processor == NULL) {
// Allocate and initialize a reference processor
_ref_processor = ReferenceProcessor::create_ref_processor(
_span, // span
_cmsGen->refs_discovery_is_atomic(), // atomic_discovery
_cmsGen->refs_discovery_is_mt(), // mt_discovery
&_is_alive_closure,
ParallelGCThreads,
ParallelRefProcEnabled);
_ref_processor =
new ReferenceProcessor(_span, // span
(ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
(int) ParallelGCThreads, // mt processing degree
_cmsGen->refs_discovery_is_mt(), // mt discovery
(int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
_cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
&_is_alive_closure, // closure for liveness info
false); // next field updates do not need write barrier
// Initialize the _ref_processor field of CMSGen
_cmsGen->set_ref_processor(_ref_processor);
@ -641,7 +643,7 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
}
// Support for multi-threaded concurrent phases
if (CollectedHeap::use_parallel_gc_threads() && CMSConcurrentMTEnabled) {
if (CMSConcurrentMTEnabled) {
if (FLAG_IS_DEFAULT(ConcGCThreads)) {
// just for now
FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
@ -1689,6 +1691,8 @@ void CMSCollector::request_full_gc(unsigned int full_gc_count) {
MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
_full_gc_requested = true;
CGC_lock->notify(); // nudge CMS thread
} else {
assert(gc_count > full_gc_count, "Error: causal loop");
}
}
@ -1988,17 +1992,16 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
// Temporarily widen the span of the weak reference processing to
// the entire heap.
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
ReferenceProcessorSpanMutator x(ref_processor(), new_span);
ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
// Temporarily, clear the "is_alive_non_header" field of the
// reference processor.
ReferenceProcessorIsAliveMutator y(ref_processor(), NULL);
ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
// Temporarily make reference _processing_ single threaded (non-MT).
ReferenceProcessorMTProcMutator z(ref_processor(), false);
ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
// Temporarily make refs discovery atomic
ReferenceProcessorAtomicMutator w(ref_processor(), true);
ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
// Temporarily make reference _discovery_ single threaded (non-MT)
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
ref_processor()->set_enqueuing_is_done(false);
ref_processor()->enable_discovery();
@ -4263,9 +4266,7 @@ bool CMSCollector::do_marking_mt(bool asynch) {
// Refs discovery is already non-atomic.
assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
// Mutate the Refs discovery so it is MT during the
// multi-threaded marking phase.
ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
conc_workers()->start_task(&tsk);
while (tsk.yielded()) {
@ -4318,6 +4319,8 @@ bool CMSCollector::do_marking_st(bool asynch) {
ResourceMark rm;
HandleMark hm;
// Temporarily make refs discovery single threaded (non-MT)
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
&_markStack, &_revisitStack, CMSYield && asynch);
// the last argument to iterate indicates whether the iteration
@ -4356,10 +4359,6 @@ void CMSCollector::preclean() {
verify_overflow_empty();
_abort_preclean = false;
if (CMSPrecleaningEnabled) {
// Precleaning is currently not MT but the reference processor
// may be set for MT. Disable it temporarily here.
ReferenceProcessor* rp = ref_processor();
ReferenceProcessorMTProcMutator z(rp, false);
_eden_chunk_index = 0;
size_t used = get_eden_used();
size_t capacity = get_eden_capacity();
@ -4502,11 +4501,16 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
_collectorState == AbortablePreclean, "incorrect state");
ResourceMark rm;
HandleMark hm;
// Precleaning is currently not MT but the reference processor
// may be set for MT. Disable it temporarily here.
ReferenceProcessor* rp = ref_processor();
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
// Do one pass of scrubbing the discovered reference lists
// to remove any reference objects with strongly-reachable
// referents.
if (clean_refs) {
ReferenceProcessor* rp = ref_processor();
CMSPrecleanRefsYieldClosure yield_cl(this);
assert(rp->span().equals(_span), "Spans should be equal");
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
@ -5576,8 +5580,10 @@ void CMSCollector::do_remark_parallel() {
// in the multi-threaded case, but we special-case n=1 here to get
// repeatable measurements of the 1-thread overhead of the parallel code.
if (n_workers > 1) {
// Make refs discovery MT-safe
ReferenceProcessorMTMutator mt(ref_processor(), true);
// Make refs discovery MT-safe, if it isn't already: it may not
// necessarily be so, since it's possible that we are doing
// ST marking.
ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
GenCollectedHeap::StrongRootsScope srs(gch);
workers->run_task(&tsk);
} else {
@ -5703,6 +5709,10 @@ public:
CMSBitMap* mark_bit_map,
AbstractWorkGang* workers,
OopTaskQueueSet* task_queues):
// XXX Should superclass AGTWOQ also know about AWG since it knows
// about the task_queues used by the AWG? Then it could initialize
// the terminator() object. See 6984287. The set_for_termination()
// below is a temporary band-aid for the regression in 6984287.
AbstractGangTaskWOopQueues("Process referents by policy in parallel",
task_queues),
_task(task),
@ -5710,6 +5720,7 @@ public:
{
assert(_collector->_span.equals(_span) && !_span.is_empty(),
"Inconsistency in _span");
set_for_termination(workers->active_workers());
}
OopTaskQueueSet* task_queues() { return queues(); }
@ -5872,8 +5883,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
rp->set_mt_degree(ParallelGCThreads);
rp->set_active_mt_degree(ParallelGCThreads);
CMSRefProcTaskExecutor task_executor(*this);
rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1133,7 +1133,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// rare that the cost of the CAS's involved is in the
// noise. That's a measurement that should be done, and
// the code simplified if that turns out to be the case.
return false;
return ConcGCThreads > 1;
}
// Override

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@ int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0;
volatile bool ConcurrentMarkSweepThread::_icms_enabled = false;
volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0;
volatile bool ConcurrentMarkSweepThread::_should_run = false;
// When icms is enabled, the icms thread is stopped until explicitly
// started.
@ -84,7 +84,7 @@ ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
}
}
_sltMonitor = SLT_lock;
set_icms_enabled(CMSIncrementalMode);
assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
}
void ConcurrentMarkSweepThread::run() {
@ -341,11 +341,11 @@ void ConcurrentMarkSweepThread::stop_icms() {
void ConcurrentMarkSweepThread::icms_wait() {
assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
if (_should_stop && icms_enabled()) {
if (_should_stop && icms_is_enabled()) {
MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
trace_state("pause_icms");
_collector->stats().stop_cms_timer();
while(!_should_run && icms_enabled()) {
while(!_should_run && icms_is_enabled()) {
iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
}
_collector->stats().start_cms_timer();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@
class ConcurrentMarkSweepGeneration;
class CMSCollector;
// The Concurrent Mark Sweep GC Thread (could be several in the future).
// The Concurrent Mark Sweep GC Thread
class ConcurrentMarkSweepThread: public ConcurrentGCThread {
friend class VMStructs;
friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship
@ -55,8 +55,6 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
static SurrogateLockerThread::SLT_msg_type _sltBuffer;
static Monitor* _sltMonitor;
ConcurrentMarkSweepThread* _next;
static bool _should_terminate;
enum CMS_flag_type {
@ -84,7 +82,7 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// Tracing messages, enabled by CMSTraceThreadState.
static inline void trace_state(const char* desc);
static volatile bool _icms_enabled; // iCMS enabled?
static volatile int _icms_disabled; // a counter to track #iCMS disable & enable
static volatile bool _should_run; // iCMS may run
static volatile bool _should_stop; // iCMS should stop
@ -214,10 +212,25 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
// Incremental mode is enabled globally by the flag CMSIncrementalMode. It
// must also be enabled/disabled dynamically to allow foreground collections.
static inline void enable_icms() { _icms_enabled = true; }
static inline void disable_icms() { _icms_enabled = false; }
static inline void set_icms_enabled(bool val) { _icms_enabled = val; }
static inline bool icms_enabled() { return _icms_enabled; }
#define ICMS_ENABLING_ASSERT \
assert((CMSIncrementalMode && _icms_disabled >= 0) || \
(!CMSIncrementalMode && _icms_disabled <= 0), "Error")
static inline void enable_icms() {
ICMS_ENABLING_ASSERT;
Atomic::dec(&_icms_disabled);
}
static inline void disable_icms() {
ICMS_ENABLING_ASSERT;
Atomic::inc(&_icms_disabled);
}
static inline bool icms_is_disabled() {
ICMS_ENABLING_ASSERT;
return _icms_disabled > 0;
}
static inline bool icms_is_enabled() {
return !icms_is_disabled();
}
};
inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -192,14 +192,18 @@ void VM_GenCollectFullConcurrent::doit() {
"total_collections() should be monotonically increasing");
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
if (gch->total_full_collections() == _full_gc_count_before) {
// Disable iCMS until the full collection is done.
// Disable iCMS until the full collection is done, and
// remember that we did so.
CMSCollector::disable_icms();
_disabled_icms = true;
// In case CMS thread was in icms_wait(), wake it up.
CMSCollector::start_icms();
// Nudge the CMS thread to start a concurrent collection.
CMSCollector::request_full_gc(_full_gc_count_before);
} else {
assert(_full_gc_count_before < gch->total_full_collections(), "Error");
FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
}
}
@ -259,6 +263,8 @@ void VM_GenCollectFullConcurrent::doit_epilogue() {
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
}
}
// Enable iCMS back.
// Enable iCMS back if we disabled it earlier.
if (_disabled_icms) {
CMSCollector::enable_icms();
}
}

View file

@ -128,11 +128,14 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
// VM operation to invoke a concurrent collection of the heap as a
// GenCollectedHeap heap.
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
bool _disabled_icms;
public:
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
unsigned int full_gc_count_before,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */) {
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
_disabled_icms(false)
{
assert(FullGCCount_lock != NULL, "Error");
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
}

View file

@ -373,7 +373,7 @@ void ConcurrentG1Refine::clean_up_cache(int worker_i,
// RSet updating while within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "incorrect worker id");
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
into_cset_dcq->enqueue(entry);
}
}

View file

@ -1828,7 +1828,7 @@ void ConcurrentMark::completeCleanup() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
_cleanup_list.verify_optional();
FreeRegionList local_free_list("Local Cleanup List");
FreeRegionList tmp_free_list("Tmp Free List");
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
@ -1842,7 +1842,7 @@ void ConcurrentMark::completeCleanup() {
HeapRegion* hr = _cleanup_list.remove_head();
assert(hr != NULL, "the list was not empty");
hr->rem_set()->clear();
local_free_list.add_as_tail(hr);
tmp_free_list.add_as_tail(hr);
// Instead of adding one region at a time to the secondary_free_list,
// we accumulate them in the local list and move them a few at a
@ -1850,20 +1850,20 @@ void ConcurrentMark::completeCleanup() {
// we do during this process. We'll also append the local list when
// _cleanup_list is empty (which means we just removed the last
// region from the _cleanup_list).
if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
_cleanup_list.is_empty()) {
if (G1ConcRegionFreeingVerbose) {
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
"appending "SIZE_FORMAT" entries to the "
"secondary_free_list, clean list still has "
SIZE_FORMAT" entries",
local_free_list.length(),
tmp_free_list.length(),
_cleanup_list.length());
}
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
g1h->secondary_free_list_add_as_tail(&local_free_list);
g1h->secondary_free_list_add_as_tail(&tmp_free_list);
SecondaryFreeList_lock->notify_all();
}
@ -1874,7 +1874,7 @@ void ConcurrentMark::completeCleanup() {
}
}
}
assert(local_free_list.is_empty(), "post-condition");
assert(tmp_free_list.is_empty(), "post-condition");
}
// Support closures for reference procssing in G1
@ -2141,21 +2141,22 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
G1CMDrainMarkingStackClosure
g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
// We use the work gang from the G1CollectedHeap and we utilize all
// the worker threads.
int active_workers = MAX2(MIN2(g1h->workers()->total_workers(), (int)_max_task_num), 1);
int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
g1h->workers(), active_workers);
if (rp->processing_is_mt()) {
// Set the degree of MT here. If the discovery is done MT, there
// may have been a different number of threads doing the discovery
// and a different number of discovered lists may have Ref objects.
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
rp->set_mt_degree(active_workers);
rp->set_active_mt_degree(active_workers);
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
@ -3182,7 +3183,7 @@ public:
template <class T> void do_oop_work(T* p) {
assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
assert(!_g1h->is_on_free_list(
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
oop obj = oopDesc::load_decode_heap_oop(p);
@ -3403,7 +3404,7 @@ void CMTask::deal_with_reference(oop obj) {
void CMTask::push(oop obj) {
HeapWord* objAddr = (HeapWord*) obj;
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
assert(!_g1h->is_on_free_list(
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
assert(!_g1h->is_obj_ill(obj), "invariant");
assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
@ -3649,7 +3650,7 @@ void CMTask::drain_local_queue(bool partially) {
(void*) obj);
assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
assert(!_g1h->is_on_free_list(
assert(!_g1h->is_on_master_free_list(
_g1h->heap_region_containing((HeapWord*) obj)), "invariant");
scan_object(obj);

View file

@ -237,9 +237,9 @@ void ConcurrentMarkThread::run() {
// The following will finish freeing up any regions that we
// found to be empty during cleanup. We'll do this part
// without joining the suspendible set. If an evacuation pause
// takes places, then we would carry on freeing regions in
// takes place, then we would carry on freeing regions in
// case they are needed by the pause. If a Full GC takes
// places, it would wait for us to process the regions
// place, it would wait for us to process the regions
// reclaimed by cleanup.
double cleanup_start_sec = os::elapsedTime();

View file

@ -479,7 +479,7 @@ G1CollectedHeap* G1CollectedHeap::_g1h;
// Private methods.
HeapRegion*
G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) {
G1CollectedHeap::new_region_try_secondary_free_list() {
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
while (!_secondary_free_list.is_empty() || free_regions_coming()) {
if (!_secondary_free_list.is_empty()) {
@ -531,7 +531,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"forced to look at the secondary_free_list");
}
res = new_region_try_secondary_free_list(word_size);
res = new_region_try_secondary_free_list();
if (res != NULL) {
return res;
}
@ -543,7 +543,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
"res == NULL, trying the secondary_free_list");
}
res = new_region_try_secondary_free_list(word_size);
res = new_region_try_secondary_free_list();
}
if (res == NULL && do_expand) {
if (expand(word_size * HeapWordSize)) {
@ -579,6 +579,9 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
size_t word_size) {
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
int first = -1;
if (num_regions == 1) {
// Only one region to allocate, no need to go through the slower
@ -600,7 +603,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
// request. If we are only allocating one region we use the common
// region allocation code (see above).
wait_while_free_regions_coming();
append_secondary_free_list_if_not_empty();
append_secondary_free_list_if_not_empty_with_lock();
if (free_regions() >= num_regions) {
first = _hrs->find_contiguous(num_regions);
@ -608,7 +611,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
for (int i = first; i < first + (int) num_regions; ++i) {
HeapRegion* hr = _hrs->at(i);
assert(hr->is_empty(), "sanity");
assert(is_on_free_list(hr), "sanity");
assert(is_on_master_free_list(hr), "sanity");
hr->set_pending_removal(true);
}
_free_list.remove_all_pending(num_regions);
@ -618,42 +621,14 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
return first;
}
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
size_t num_regions,
size_t word_size) {
assert(first != -1, "pre-condition");
assert(isHumongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
verify_region_sets_optional();
size_t num_regions =
round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
size_t x_size = expansion_regions();
size_t fs = _hrs->free_suffix();
int first = humongous_obj_allocate_find_first(num_regions, word_size);
if (first == -1) {
// The only thing we can do now is attempt expansion.
if (fs + x_size >= num_regions) {
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
// should have succeeded and we wouldn't be here.
//
// We should only be trying to expand when the free suffix is
// not sufficient for the object _and_ we have some expansion
// room available.
assert(num_regions > fs, "earlier allocation should have succeeded");
if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
first = humongous_obj_allocate_find_first(num_regions, word_size);
// If the expansion was successful then the allocation
// should have been successful.
assert(first != -1, "this should have worked");
}
}
}
if (first != -1) {
// Index of last region in the series + 1.
int last = first + (int) num_regions;
@ -766,8 +741,51 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
return new_obj;
}
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
verify_region_sets_optional();
return NULL;
size_t num_regions =
round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
size_t x_size = expansion_regions();
size_t fs = _hrs->free_suffix();
int first = humongous_obj_allocate_find_first(num_regions, word_size);
if (first == -1) {
// The only thing we can do now is attempt expansion.
if (fs + x_size >= num_regions) {
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
// should have succeeded and we wouldn't be here.
//
// We should only be trying to expand when the free suffix is
// not sufficient for the object _and_ we have some expansion
// room available.
assert(num_regions > fs, "earlier allocation should have succeeded");
if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
first = humongous_obj_allocate_find_first(num_regions, word_size);
// If the expansion was successful then the allocation
// should have been successful.
assert(first != -1, "this should have worked");
}
}
}
HeapWord* result = NULL;
if (first != -1) {
result =
humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
assert(result != NULL, "it should always return a valid result");
}
verify_region_sets_optional();
return result;
}
void
@ -1389,7 +1407,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
g1_policy()->record_full_collection_start();
wait_while_free_regions_coming();
append_secondary_free_list_if_not_empty();
append_secondary_free_list_if_not_empty_with_lock();
gc_prologue(true);
increment_total_collections(true /* full gc */);
@ -1444,7 +1462,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// how reference processing currently works in G1.
// Temporarily make reference _discovery_ single threaded (non-MT).
ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
// Temporarily make refs discovery atomic
ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
@ -2201,14 +2219,14 @@ void G1CollectedHeap::ref_processing_init() {
SharedHeap::ref_processing_init();
MemRegion mr = reserved_region();
_ref_processor = ReferenceProcessor::create_ref_processor(
mr, // span
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // degree of mt processing
ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery
(int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
false, // Reference discovery is not atomic
true, // mt_discovery
&_is_alive_closure, // is alive closure
// for efficiency
ParallelGCThreads,
ParallelRefProcEnabled,
&_is_alive_closure, // is alive closure for efficiency
true); // Setting next fields of discovered
// lists requires a barrier.
}
@ -3377,15 +3395,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TraceMemoryManagerStats tms(false /* fullGC */);
// If there are any free regions available on the secondary_free_list
// make sure we append them to the free_list. However, we don't
// have to wait for the rest of the cleanup operation to
// finish. If it's still going on that's OK. If we run out of
// regions, the region allocation code will check the
// secondary_free_list and potentially wait if more free regions
// are coming (see new_region_try_secondary_free_list()).
// If the secondary_free_list is not empty, append it to the
// free_list. No need to wait for the cleanup operation to finish;
// the region allocation code will check the secondary_free_list
// and wait if necessary. If the G1StressConcRegionFreeing flag is
// set, skip this step so that the region allocation code has to
// get entries from the secondary_free_list.
if (!G1StressConcRegionFreeing) {
append_secondary_free_list_if_not_empty();
append_secondary_free_list_if_not_empty_with_lock();
}
increment_gc_time_stamp();
@ -5199,7 +5216,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
size_t rs_lengths = 0;
while (cur != NULL) {
assert(!is_on_free_list(cur), "sanity");
assert(!is_on_master_free_list(cur), "sanity");
if (non_young) {
if (cur->is_young()) {
@ -5543,13 +5560,10 @@ void G1CollectedHeap::verify_region_sets() {
return;
}
{
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
append_secondary_free_list();
}
append_secondary_free_list_if_not_empty_with_lock();
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.

View file

@ -56,7 +56,6 @@ class HeapRegionRemSetIterator;
class ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
class ConcurrentZFThread;
typedef OverflowTaskQueue<StarTask> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
@ -64,12 +63,6 @@ typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
enum G1GCThreadGroups {
G1CRGroup = 0,
G1ZFGroup = 1,
G1CMGroup = 2
};
enum GCAllocPurpose {
GCAllocForTenured,
GCAllocForSurvived,
@ -294,9 +287,9 @@ private:
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
#define heap_locking_asserts_err_msg(__extra_message) \
#define heap_locking_asserts_err_msg(_extra_message_) \
err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
(__extra_message), \
(_extra_message_), \
BOOL_TO_STR(Heap_lock->owned_by_self()), \
BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
BOOL_TO_STR(Thread::current()->is_VM_thread()))
@ -307,11 +300,11 @@ private:
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
} while (0)
#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \
#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
do { \
assert(Heap_lock->owned_by_self() || \
(SafepointSynchronize::is_at_safepoint() && \
((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \
((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
"should be at a safepoint")); \
} while (0)
@ -338,10 +331,10 @@ private:
"should not be at a safepoint")); \
} while (0)
#define assert_at_safepoint(__should_be_vm_thread) \
#define assert_at_safepoint(_should_be_vm_thread_) \
do { \
assert(SafepointSynchronize::is_at_safepoint() && \
((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \
((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
heap_locking_asserts_err_msg("should be at a safepoint")); \
} while (0)
@ -371,35 +364,40 @@ protected:
// will check whether there's anything available in the
// secondary_free_list and/or wait for more regions to appear in that
// list, if _free_regions_coming is set.
HeapRegion* new_region_try_secondary_free_list(size_t word_size);
HeapRegion* new_region_try_secondary_free_list();
// It will try to allocate a single non-humongous HeapRegion
// sufficient for an allocation of the given word_size. If
// do_expand is true, it will attempt to expand the heap if
// necessary to satisfy the allocation request. Note that word_size
// is only used to make sure that we expand sufficiently but, given
// that the allocation request is assumed not to be humongous,
// having word_size is not strictly necessary (expanding by a single
// region will always be sufficient). But let's keep that parameter
// in case we need it in the future.
// Try to allocate a single non-humongous HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request.
HeapRegion* new_region_work(size_t word_size, bool do_expand);
// It will try to allocate a new region to be used for allocation by
// mutator threads. It will not try to expand the heap if not region
// is available.
// Try to allocate a new region to be used for allocation by a
// mutator thread. Attempt to expand the heap if no region is
// available.
HeapRegion* new_alloc_region(size_t word_size) {
return new_region_work(word_size, false /* do_expand */);
}
// It will try to allocate a new region to be used for allocation by
// a GC thread. It will try to expand the heap if no region is
// available.
// Try to allocate a new region to be used for allocation by a GC
// thread. Attempt to expand the heap if no region is available.
HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful.
int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
// Attempt to allocate an object of the given (very large) "word_size".
// Returns "NULL" on failure.
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(int first,
size_t num_regions,
size_t word_size);
// Attempt to allocate a humongous object of the given size. Return
// NULL if unsuccessful.
HeapWord* humongous_obj_allocate(size_t word_size);
// The following two methods, allocate_new_tlab() and
@ -776,7 +774,7 @@ protected:
// Invoke "save_marks" on all heap regions.
void save_marks();
// It frees a non-humongous region by initializing its contents and
// Frees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
@ -787,13 +785,13 @@ protected:
FreeRegionList* free_list,
bool par);
// It frees a humongous region by collapsing it into individual
// regions and calling free_region() for each of them. The freed
// regions will be added to the free list that's passed as a parameter
// (this is usually a local list which will be appended to the
// master free list later). The used bytes of freed regions are
// accumulated in pre_used. If par is true, the region's RSet will
// not be freed up. The assumption is that this will be done later.
// Frees a humongous region by collapsing it into individual regions
// and calling free_region() for each of them. The freed regions
// will be added to the free list that's passed as a parameter (this
// is usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
// pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
void free_humongous_region(HeapRegion* hr,
size_t* pre_used,
FreeRegionList* free_list,
@ -1046,11 +1044,11 @@ public:
#endif // HEAP_REGION_SET_FORCE_VERIFY
#ifdef ASSERT
bool is_on_free_list(HeapRegion* hr) {
bool is_on_master_free_list(HeapRegion* hr) {
return hr->containing_set() == &_free_list;
}
bool is_on_humongous_set(HeapRegion* hr) {
bool is_in_humongous_set(HeapRegion* hr) {
return hr->containing_set() == &_humongous_set;
}
#endif // ASSERT
@ -1066,7 +1064,9 @@ public:
_free_list.add_as_tail(&_secondary_free_list);
}
void append_secondary_free_list_if_not_empty() {
void append_secondary_free_list_if_not_empty_with_lock() {
// If the secondary free list looks empty there's no reason to
// take the lock and then try to append it.
if (!_secondary_free_list.is_empty()) {
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
append_secondary_free_list();

View file

@ -81,6 +81,57 @@ static double non_young_other_cost_per_region_ms_defaults[] = {
// </NEW PREDICTION>
// Help class for avoiding interleaved logging
class LineBuffer: public StackObj {
private:
static const int BUFFER_LEN = 1024;
static const int INDENT_CHARS = 3;
char _buffer[BUFFER_LEN];
int _indent_level;
int _cur;
void vappend(const char* format, va_list ap) {
int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
if (res != -1) {
_cur += res;
} else {
DEBUG_ONLY(warning("buffer too small in LineBuffer");)
_buffer[BUFFER_LEN -1] = 0;
_cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
}
}
public:
explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
_buffer[_cur] = ' ';
}
}
#ifndef PRODUCT
~LineBuffer() {
assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
}
#endif
void append(const char* format, ...) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
}
void append_and_print_cr(const char* format, ...) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
gclog_or_tty->print_cr("%s", _buffer);
_cur = _indent_level * INDENT_CHARS;
}
};
G1CollectorPolicy::G1CollectorPolicy() :
_parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
? ParallelGCThreads : 1),
@ -1016,10 +1067,8 @@ void G1CollectorPolicy::print_par_stats(int level,
bool summary) {
double min = data[0], max = data[0];
double total = 0.0;
int j;
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("[%s (ms):", str);
LineBuffer buf(level);
buf.append("[%s (ms):", str);
for (uint i = 0; i < ParallelGCThreads; ++i) {
double val = data[i];
if (val < min)
@ -1027,18 +1076,16 @@ void G1CollectorPolicy::print_par_stats(int level,
if (val > max)
max = val;
total += val;
gclog_or_tty->print(" %3.1lf", val);
buf.append(" %3.1lf", val);
}
if (summary) {
gclog_or_tty->print_cr("");
buf.append_and_print_cr("");
double avg = total / (double) ParallelGCThreads;
gclog_or_tty->print(" ");
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
buf.append(" ");
buf.append("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
avg, min, max);
}
gclog_or_tty->print_cr("]");
buf.append_and_print_cr("]");
}
void G1CollectorPolicy::print_par_sizes(int level,
@ -1047,10 +1094,8 @@ void G1CollectorPolicy::print_par_sizes(int level,
bool summary) {
double min = data[0], max = data[0];
double total = 0.0;
int j;
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("[%s :", str);
LineBuffer buf(level);
buf.append("[%s :", str);
for (uint i = 0; i < ParallelGCThreads; ++i) {
double val = data[i];
if (val < min)
@ -1058,34 +1103,28 @@ void G1CollectorPolicy::print_par_sizes(int level,
if (val > max)
max = val;
total += val;
gclog_or_tty->print(" %d", (int) val);
buf.append(" %d", (int) val);
}
if (summary) {
gclog_or_tty->print_cr("");
buf.append_and_print_cr("");
double avg = total / (double) ParallelGCThreads;
gclog_or_tty->print(" ");
for (j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
buf.append(" ");
buf.append("Sum: %d, Avg: %d, Min: %d, Max: %d",
(int)total, (int)avg, (int)min, (int)max);
}
gclog_or_tty->print_cr("]");
buf.append_and_print_cr("]");
}
void G1CollectorPolicy::print_stats (int level,
const char* str,
double value) {
for (int j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
}
void G1CollectorPolicy::print_stats (int level,
const char* str,
int value) {
for (int j = 0; j < level; ++j)
gclog_or_tty->print(" ");
gclog_or_tty->print_cr("[%s: %d]", str, value);
LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
}
double G1CollectorPolicy::avg_value (double* data) {
@ -2060,17 +2099,11 @@ void G1CollectorPolicy::count_CS_bytes_used() {
_g1->collection_set_iterate(&cs_closure);
}
static void print_indent(int level) {
for (int j = 0; j < level+1; ++j)
gclog_or_tty->print(" ");
}
void G1CollectorPolicy::print_summary (int level,
const char* str,
NumberSeq* seq) const {
double sum = seq->sum();
print_indent(level);
gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
str, sum / 1000.0, seq->avg());
}
@ -2078,8 +2111,7 @@ void G1CollectorPolicy::print_summary_sd (int level,
const char* str,
NumberSeq* seq) const {
print_summary(level, str, seq);
print_indent(level + 5);
gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
seq->num(), seq->sd(), seq->maximum());
}
@ -2087,6 +2119,7 @@ void G1CollectorPolicy::check_other_times(int level,
NumberSeq* other_times_ms,
NumberSeq* calc_other_times_ms) const {
bool should_print = false;
LineBuffer buf(level + 2);
double max_sum = MAX2(fabs(other_times_ms->sum()),
fabs(calc_other_times_ms->sum()));
@ -2095,8 +2128,7 @@ void G1CollectorPolicy::check_other_times(int level,
double sum_ratio = max_sum / min_sum;
if (sum_ratio > 1.1) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
}
double max_avg = MAX2(fabs(other_times_ms->avg()),
@ -2106,30 +2138,25 @@ void G1CollectorPolicy::check_other_times(int level,
double avg_ratio = max_avg / min_avg;
if (avg_ratio > 1.1) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
}
if (other_times_ms->sum() < -0.01) {
print_indent(level + 1);
gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
}
if (other_times_ms->avg() < -0.01) {
print_indent(level + 1);
gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
}
if (calc_other_times_ms->sum() < -0.01) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
}
if (calc_other_times_ms->avg() < -0.01) {
should_print = true;
print_indent(level + 1);
gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
}
if (should_print)
@ -2210,10 +2237,9 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
}
}
} else {
print_indent(0);
gclog_or_tty->print_cr("none");
LineBuffer(1).append_and_print_cr("none");
}
gclog_or_tty->print_cr("");
LineBuffer(0).append_and_print_cr("");
}
void G1CollectorPolicy::print_tracing_info() const {
@ -2532,7 +2558,7 @@ public:
jint regions_added = parKnownGarbageCl.marked_regions_added();
_hrSorted->incNumMarkedHeapRegions(regions_added);
if (G1PrintParCleanupStats) {
gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n",
gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
i, parKnownGarbageCl.invokes(), regions_added);
}
}

View file

@ -185,22 +185,22 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
ModRefBarrierSet* _mrbs;
CompactPoint _cp;
size_t _pre_used;
FreeRegionList _free_list;
HumongousRegionSet _humongous_proxy_set;
void free_humongous_region(HeapRegion* hr) {
HeapWord* end = hr->end();
size_t dummy_pre_used;
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
assert(hr->startsHumongous(),
"Only the start of a humongous region should be freed.");
_g1h->free_humongous_region(hr, &_pre_used, &_free_list,
_g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
&_humongous_proxy_set, false /* par */);
// Do we also need to do this for the continues humongous regions
// we just collapsed?
hr->prepare_for_compaction(&_cp);
// Also clear the part of the card table that will be unused after
// compaction.
_mrbs->clear(MemRegion(hr->compaction_top(), end));
dummy_free_list.remove_all();
}
public:
@ -208,8 +208,6 @@ public:
: _g1h(G1CollectedHeap::heap()),
_mrbs(G1CollectedHeap::heap()->mr_bs()),
_cp(NULL, cs, cs->initialize_threshold()),
_pre_used(0),
_free_list("Local Free List for G1MarkSweep"),
_humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
void update_sets() {
@ -219,7 +217,6 @@ public:
NULL, /* free_list */
&_humongous_proxy_set,
false /* par */);
_free_list.remove_all();
}
bool doHeapRegion(HeapRegion* hr) {

View file

@ -86,28 +86,6 @@ public:
bool idempotent() { return true; }
};
class IntoCSRegionClosure: public HeapRegionClosure {
IntoCSOopClosure _blk;
G1CollectedHeap* _g1;
public:
IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
_g1(g1), _blk(g1, blk) {}
bool doHeapRegion(HeapRegion* r) {
if (!r->in_collection_set()) {
_blk.set_region(r);
if (r->isHumongous()) {
if (r->startsHumongous()) {
oop obj = oop(r->bottom());
obj->oop_iterate(&_blk);
}
} else {
r->oop_before_save_marks_iterate(&_blk);
}
}
return false;
}
};
class VerifyRSCleanCardOopClosure: public OopClosure {
G1CollectedHeap* _g1;
public:
@ -329,7 +307,7 @@ public:
// is during RSet updating within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread.
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker");
assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
// 'card_ptr' contains references that point into the collection

View file

@ -53,8 +53,8 @@ class HeapRegion;
class HeapRegionSetBase;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
(__hr)->top(), (__hr)->end()
#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
(_hr_)->top(), (_hr_)->end()
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
@ -524,7 +524,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
#else // ASSERT
void set_containing_set(HeapRegionSetBase* containing_set) { }
// containing_set() is only used in asserts so there's not reason
// containing_set() is only used in asserts so there's no reason
// to provide a dummy version of it.
#endif // ASSERT
@ -535,14 +535,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
bool pending_removal() { return _pending_removal; }
void set_pending_removal(bool pending_removal) {
// We can only set pending_removal to true, if it's false and the
// region belongs to a set.
assert(!pending_removal ||
(!_pending_removal && containing_set() != NULL), "pre-condition");
// We can only set pending_removal to false, if it's true and the
// region does not belong to a set.
assert( pending_removal ||
( _pending_removal && containing_set() == NULL), "pre-condition");
if (pending_removal) {
assert(!_pending_removal && containing_set() != NULL,
"can only set pending removal to true if it's false and "
"the region belongs to a region set");
} else {
assert( _pending_removal && containing_set() == NULL,
"can only set pending removal to false if it's true and "
"the region does not belong to a region set");
}
_pending_removal = pending_removal;
}

View file

@ -165,7 +165,7 @@ int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
assert(num_so_far <= num, "post-condition");
if (num_so_far == num) {
// we find enough space for the humongous object
// we found enough space for the humongous object
assert(from <= first && first < _regions.length(), "post-condition");
assert(first < curr && (curr - first) == (int) num, "post-condition");
for (int i = first; i < first + (int) num; ++i) {

View file

@ -76,7 +76,8 @@ class HeapRegionSeq: public CHeapObj {
// that are available for allocation.
size_t free_suffix();
// Finds a contiguous set of empty regions of length num.
// Find a contiguous set of empty regions of length num and return
// the index of the first region or -1 if the search was unsuccessful.
int find_contiguous(size_t num);
// Apply the "doHeapRegion" method of "blk" to all regions in "this",

View file

@ -42,7 +42,7 @@ size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
return region_num;
}
void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) {
void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
msg->append("[%s] %s "
"ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
"cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
@ -109,30 +109,30 @@ void HeapRegionSetBase::verify() {
// for the verification calls. If we do verification without the
// appropriate locks and the set changes underneath our feet
// verification might fail and send us on a wild goose chase.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
guarantee(( is_empty() && length() == 0 && region_num() == 0 &&
total_used_bytes() == 0 && total_capacity_bytes() == 0) ||
(!is_empty() && length() >= 0 && region_num() >= 0 &&
total_used_bytes() >= 0 && total_capacity_bytes() >= 0),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
guarantee((!regions_humongous() && region_num() == length()) ||
( regions_humongous() && region_num() >= length()),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
guarantee(!regions_empty() || total_used_bytes() == 0,
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
guarantee(total_used_bytes() <= total_capacity_bytes(),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
}
void HeapRegionSetBase::verify_start() {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert(!_verify_in_progress,
hrl_ext_msg(this, "verification should not be in progress"));
hrs_ext_msg(this, "verification should not be in progress"));
// Do the basic verification first before we do the checks over the regions.
HeapRegionSetBase::verify();
@ -146,11 +146,11 @@ void HeapRegionSetBase::verify_start() {
void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert(_verify_in_progress,
hrl_ext_msg(this, "verification should be in progress"));
hrs_ext_msg(this, "verification should be in progress"));
guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification"));
guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification"));
_calc_length += 1;
if (!hr->isHumongous()) {
@ -164,28 +164,28 @@ void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
void HeapRegionSetBase::verify_end() {
// See comment in verify() about MT safety and verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert(_verify_in_progress,
hrl_ext_msg(this, "verification should be in progress"));
hrs_ext_msg(this, "verification should be in progress"));
guarantee(length() == _calc_length,
hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == "
"calc length: "SIZE_FORMAT,
name(), length(), _calc_length));
guarantee(region_num() == _calc_region_num,
hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
"calc region num: "SIZE_FORMAT,
name(), region_num(), _calc_region_num));
guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == "
"calc capacity bytes: "SIZE_FORMAT,
name(),
total_capacity_bytes(), _calc_total_capacity_bytes));
guarantee(total_used_bytes() == _calc_total_used_bytes,
hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == "
"calc used bytes: "SIZE_FORMAT,
name(), total_used_bytes(), _calc_total_used_bytes));
@ -221,9 +221,9 @@ HeapRegionSetBase::HeapRegionSetBase(const char* name)
//////////////////// HeapRegionSet ////////////////////
void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
hrl_assert_mt_safety_ok(this);
hrl_assert_mt_safety_ok(proxy_set);
hrl_assert_sets_match(this, proxy_set);
hrs_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(proxy_set);
hrs_assert_sets_match(this, proxy_set);
verify_optional();
proxy_set->verify_optional();
@ -231,19 +231,19 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
if (proxy_set->is_empty()) return;
assert(proxy_set->length() <= _length,
hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
"should be <= length: "SIZE_FORMAT,
name(), proxy_set->length(), _length));
_length -= proxy_set->length();
assert(proxy_set->region_num() <= _region_num,
hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
"should be <= region num: "SIZE_FORMAT,
name(), proxy_set->region_num(), _region_num));
_region_num -= proxy_set->region_num();
assert(proxy_set->total_used_bytes() <= _total_used_bytes,
hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
hrs_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), proxy_set->total_used_bytes(),
_total_used_bytes));
@ -257,13 +257,13 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
//////////////////// HeapRegionLinkedList ////////////////////
void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) {
void HeapRegionLinkedList::fill_in_ext_msg_extra(hrs_ext_msg* msg) {
msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail());
}
void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
hrl_assert_mt_safety_ok(this);
hrl_assert_mt_safety_ok(from_list);
hrs_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(from_list);
verify_optional();
from_list->verify_optional();
@ -283,10 +283,10 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
#endif // ASSERT
if (_tail != NULL) {
assert(length() > 0 && _head != NULL, hrl_ext_msg(this, "invariant"));
assert(length() > 0 && _head != NULL, hrs_ext_msg(this, "invariant"));
_tail->set_next(from_list->_head);
} else {
assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant"));
assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant"));
_head = from_list->_head;
}
_tail = from_list->_tail;
@ -301,12 +301,12 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
}
void HeapRegionLinkedList::remove_all() {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
verify_optional();
HeapRegion* curr = _head;
while (curr != NULL) {
hrl_assert_region_ok(this, curr, this);
hrs_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next();
curr->set_next(NULL);
@ -319,9 +319,9 @@ void HeapRegionLinkedList::remove_all() {
}
void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
hrl_assert_mt_safety_ok(this);
assert(target_count > 1, hrl_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrl_ext_msg(this, "pre-condition"));
hrs_assert_mt_safety_ok(this);
assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
verify_optional();
DEBUG_ONLY(size_t old_length = length();)
@ -330,27 +330,27 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
HeapRegion* prev = NULL;
size_t count = 0;
while (curr != NULL) {
hrl_assert_region_ok(this, curr, this);
hrs_assert_region_ok(this, curr, this);
HeapRegion* next = curr->next();
if (curr->pending_removal()) {
assert(count < target_count,
hrl_err_msg("[%s] should not come across more regions "
hrs_err_msg("[%s] should not come across more regions "
"pending for removal than target_count: "SIZE_FORMAT,
name(), target_count));
if (prev == NULL) {
assert(_head == curr, hrl_ext_msg(this, "invariant"));
assert(_head == curr, hrs_ext_msg(this, "invariant"));
_head = next;
} else {
assert(_head != curr, hrl_ext_msg(this, "invariant"));
assert(_head != curr, hrs_ext_msg(this, "invariant"));
prev->set_next(next);
}
if (next == NULL) {
assert(_tail == curr, hrl_ext_msg(this, "invariant"));
assert(_tail == curr, hrs_ext_msg(this, "invariant"));
_tail = prev;
} else {
assert(_tail != curr, hrl_ext_msg(this, "invariant"));
assert(_tail != curr, hrs_ext_msg(this, "invariant"));
}
curr->set_next(NULL);
@ -371,10 +371,10 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
}
assert(count == target_count,
hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == "
hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == "
"target_count: "SIZE_FORMAT, name(), count, target_count));
assert(length() + target_count == old_length,
hrl_err_msg("[%s] new length should be consistent "
hrs_err_msg("[%s] new length should be consistent "
"new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
"target_count: "SIZE_FORMAT,
name(), length(), old_length, target_count));
@ -385,7 +385,7 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
void HeapRegionLinkedList::verify() {
// See comment in HeapRegionSetBase::verify() about MT safety and
// verification.
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
// This will also do the basic verification too.
verify_start();
@ -399,7 +399,7 @@ void HeapRegionLinkedList::verify() {
count += 1;
guarantee(count < _unrealistically_long_length,
hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
"seems very long, is there maybe a cycle? "
"curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
"prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
@ -410,7 +410,7 @@ void HeapRegionLinkedList::verify() {
curr = curr->next();
}
guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition"));
guarantee(_tail == prev0, hrs_ext_msg(this, "post-condition"));
verify_end();
}

View file

@ -28,8 +28,8 @@
#include "gc_implementation/g1/heapRegion.hpp"
// Large buffer for some cases where the output might be larger than normal.
#define HRL_ERR_MSG_BUFSZ 512
typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
#define HRS_ERR_MSG_BUFSZ 512
typedef FormatBuffer<HRS_ERR_MSG_BUFSZ> hrs_err_msg;
// Set verification will be forced either if someone defines
// HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
@ -45,10 +45,10 @@ typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
// (e.g., length, region num, used bytes sum) plus any shared
// functionality (e.g., verification).
class hrl_ext_msg;
class hrs_ext_msg;
class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
friend class hrl_ext_msg;
friend class hrs_ext_msg;
protected:
static size_t calculate_region_num(HeapRegion* hr);
@ -104,10 +104,10 @@ protected:
virtual bool check_mt_safety() { return true; }
// fill_in_ext_msg() writes the the values of the set's attributes
// in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra()
// in the custom err_msg (hrs_ext_msg). fill_in_ext_msg_extra()
// allows subclasses to append further information.
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { }
void fill_in_ext_msg(hrl_ext_msg* msg, const char* message);
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { }
void fill_in_ext_msg(hrs_ext_msg* msg, const char* message);
// It updates the fields of the set to reflect hr being added to
// the set.
@ -170,9 +170,9 @@ public:
// the fields of the associated set. This can be very helpful in
// diagnosing failures.
class hrl_ext_msg : public hrl_err_msg {
class hrs_ext_msg : public hrs_err_msg {
public:
hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") {
hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("") {
set->fill_in_ext_msg(this, message);
}
};
@ -180,25 +180,25 @@ public:
// These two macros are provided for convenience, to keep the uses of
// these two asserts a bit more concise.
#define hrl_assert_mt_safety_ok(_set_) \
#define hrs_assert_mt_safety_ok(_set_) \
do { \
assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety")); \
assert((_set_)->check_mt_safety(), hrs_ext_msg((_set_), "MT safety")); \
} while (0)
#define hrl_assert_region_ok(_set_, _hr_, _expected_) \
#define hrs_assert_region_ok(_set_, _hr_, _expected_) \
do { \
assert((_set_)->verify_region((_hr_), (_expected_)), \
hrl_ext_msg((_set_), "region verification")); \
hrs_ext_msg((_set_), "region verification")); \
} while (0)
//////////////////// HeapRegionSet ////////////////////
#define hrl_assert_sets_match(_set1_, _set2_) \
#define hrs_assert_sets_match(_set1_, _set2_) \
do { \
assert(((_set1_)->regions_humongous() == \
(_set2_)->regions_humongous()) && \
((_set1_)->regions_empty() == (_set2_)->regions_empty()), \
hrl_err_msg("the contents of set %s and set %s should match", \
hrs_err_msg("the contents of set %s and set %s should match", \
(_set1_)->name(), (_set2_)->name())); \
} while (0)
@ -267,7 +267,7 @@ private:
HeapRegion* tail() { return _tail; }
protected:
virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg);
virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg);
// See the comment for HeapRegionSetBase::clear()
virtual void clear();
@ -309,10 +309,10 @@ public:
virtual void print_on(outputStream* out, bool print_contents = false);
};
//////////////////// HeapRegionLinkedList ////////////////////
//////////////////// HeapRegionLinkedListIterator ////////////////////
// Iterator class that provides a convenient way to iterator over the
// regions in a HeapRegionLinkedList instance.
// Iterator class that provides a convenient way to iterate over the
// regions of a HeapRegionLinkedList instance.
class HeapRegionLinkedListIterator : public StackObj {
private:

View file

@ -42,8 +42,8 @@ inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
}
inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
hrl_assert_region_ok(this, hr, NULL);
assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked"));
hrs_assert_region_ok(this, hr, NULL);
assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
update_for_addition(hr);
hr->set_containing_set(this);
@ -51,7 +51,7 @@ inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
// Assumes the caller has already verified the region.
assert(_length > 0, hrl_ext_msg(this, "pre-condition"));
assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
_length -= 1;
size_t region_num_diff;
@ -61,22 +61,22 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
region_num_diff = calculate_region_num(hr);
}
assert(region_num_diff <= _region_num,
hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" "
hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" "
"should be <= region num: "SIZE_FORMAT,
name(), region_num_diff, _region_num));
_region_num -= region_num_diff;
size_t used_bytes = hr->used();
assert(used_bytes <= _total_used_bytes,
hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
hrs_err_msg("[%s] region's used bytes: "SIZE_FORMAT" "
"should be <= used bytes: "SIZE_FORMAT,
name(), used_bytes, _total_used_bytes));
_total_used_bytes -= used_bytes;
}
inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
hrl_assert_region_ok(this, hr, this);
assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked"));
hrs_assert_region_ok(this, hr, this);
assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
hr->set_containing_set(NULL);
update_for_removal(hr);
@ -85,13 +85,13 @@ inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
//////////////////// HeapRegionSet ////////////////////
inline void HeapRegionSet::add(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
// add_internal() will verify the region.
add_internal(hr);
}
inline void HeapRegionSet::remove(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
// remove_internal() will verify the region.
remove_internal(hr);
}
@ -101,8 +101,8 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
// No need to fo the MT safety check here given that this method
// does not update the contents of the set but instead accumulates
// the changes in proxy_set which is assumed to be thread-local.
hrl_assert_sets_match(this, proxy_set);
hrl_assert_region_ok(this, hr, this);
hrs_assert_sets_match(this, proxy_set);
hrs_assert_region_ok(this, hr, this);
hr->set_containing_set(NULL);
proxy_set->update_for_addition(hr);
@ -111,10 +111,10 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
//////////////////// HeapRegionLinkedList ////////////////////
inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
assert((length() == 0 && _head == NULL && _tail == NULL) ||
(length() > 0 && _head != NULL && _tail != NULL),
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
// add_internal() will verify the region.
add_internal(hr);
@ -128,10 +128,10 @@ inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
}
inline HeapRegion* HeapRegionLinkedList::remove_head() {
hrl_assert_mt_safety_ok(this);
assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty"));
hrs_assert_mt_safety_ok(this);
assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty"));
assert(length() > 0 && _head != NULL && _tail != NULL,
hrl_ext_msg(this, "invariant"));
hrs_ext_msg(this, "invariant"));
// We need to unlink it first.
HeapRegion* hr = _head;
@ -147,7 +147,7 @@ inline HeapRegion* HeapRegionLinkedList::remove_head() {
}
inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() {
hrl_assert_mt_safety_ok(this);
hrs_assert_mt_safety_ok(this);
if (!is_empty()) {
return remove_head();

View file

@ -52,7 +52,7 @@ bool MasterFreeRegionList::check_mt_safety() {
FreeList_lock->owned_by_self())) ||
(!SafepointSynchronize::is_at_safepoint() &&
Heap_lock->owned_by_self()),
hrl_ext_msg(this, "master free list MT safety protocol"));
hrs_ext_msg(this, "master free list MT safety protocol"));
return FreeRegionList::check_mt_safety();
}
@ -65,7 +65,7 @@ bool SecondaryFreeRegionList::check_mt_safety() {
// while holding the SecondaryFreeList_lock.
guarantee(SecondaryFreeList_lock->owned_by_self(),
hrl_ext_msg(this, "secondary free list MT safety protocol"));
hrs_ext_msg(this, "secondary free list MT safety protocol"));
return FreeRegionList::check_mt_safety();
}
@ -81,7 +81,7 @@ const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
return HeapRegionSet::verify_region_extra(hr);
}
//////////////////// HumongousRegionSet ////////////////////
//////////////////// MasterHumongousRegionSet ////////////////////
bool MasterHumongousRegionSet::check_mt_safety() {
// Master Humongous Set MT safety protocol:
@ -97,6 +97,6 @@ bool MasterHumongousRegionSet::check_mt_safety() {
OldSets_lock->owned_by_self())) ||
(!SafepointSynchronize::is_at_safepoint() &&
Heap_lock->owned_by_self()),
hrl_ext_msg(this, "master humongous set MT safety protocol"));
hrs_ext_msg(this, "master humongous set MT safety protocol"));
return HumongousRegionSet::check_mt_safety();
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1530,13 +1530,15 @@ void ParNewGeneration::ref_processor_init()
{
if (_ref_processor == NULL) {
// Allocate and initialize a reference processor
_ref_processor = ReferenceProcessor::create_ref_processor(
_reserved, // span
_ref_processor =
new ReferenceProcessor(_reserved, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
refs_discovery_is_mt(), // mt discovery
(int) ParallelGCThreads, // mt discovery degree
refs_discovery_is_atomic(), // atomic_discovery
refs_discovery_is_mt(), // mt_discovery
NULL, // is_alive_non_header
ParallelGCThreads,
ParallelRefProcEnabled);
false); // write barrier for next field updates
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,9 +58,7 @@ CollectorCounters* PSMarkSweep::_counters = NULL;
void PSMarkSweep::initialize() {
MemRegion mr = Universe::heap()->reserved_region();
_ref_processor = new ReferenceProcessor(mr,
true, // atomic_discovery
false); // mt_discovery
_ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
_counters = new CollectorCounters("PSMarkSweep", 1);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -827,13 +827,15 @@ void PSParallelCompact::post_initialize() {
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
MemRegion mr = heap->reserved_region();
_ref_processor = ReferenceProcessor::create_ref_processor(
mr, // span
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
true, // mt_discovery
&_is_alive_closure,
ParallelGCThreads,
ParallelRefProcEnabled);
&_is_alive_closure, // non-header is alive closure
false); // write barrier for next field updates
_counters = new CollectorCounters("PSParallelCompact", 1);
// Initialize static fields in ParCompactionManager.

View file

@ -411,7 +411,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
template <class T> void PSPromotionManager::process_array_chunk_work(
oop obj,
int start, int end) {
assert(start < end, "invariant");
assert(start <= end, "invariant");
T* const base = (T*)objArrayOop(obj)->base();
T* p = base + start;
T* const chunk_end = base + end;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -796,13 +796,15 @@ void PSScavenge::initialize() {
// Initialize ref handling object for scavenging.
MemRegion mr = young_gen->reserved();
_ref_processor = ReferenceProcessor::create_ref_processor(
mr, // span
_ref_processor =
new ReferenceProcessor(mr, // span
ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
(int) ParallelGCThreads, // mt processing degree
true, // mt discovery
(int) ParallelGCThreads, // mt discovery degree
true, // atomic_discovery
true, // mt_discovery
NULL, // is_alive_non_header
ParallelGCThreads,
ParallelRefProcEnabled);
NULL, // header provides liveness info
false); // next field updates do not need write barrier
// Cache the cardtable
BarrierSet* bs = Universe::heap()->barrier_set();

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -293,10 +293,11 @@ void GenCollectorPolicy::initialize_size_info() {
// Determine maximum size of gen0
size_t max_new_size = 0;
if (FLAG_IS_CMDLINE(MaxNewSize)) {
if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
if (MaxNewSize < min_alignment()) {
max_new_size = min_alignment();
} else if (MaxNewSize >= max_heap_byte_size()) {
}
if (MaxNewSize >= max_heap_byte_size()) {
max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
min_alignment());
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
@ -333,7 +334,7 @@ void GenCollectorPolicy::initialize_size_info() {
assert(max_new_size > 0, "All paths should set max_new_size");
// Given the maximum gen0 size, determine the initial and
// minimum sizes.
// minimum gen0 sizes.
if (max_heap_byte_size() == min_heap_byte_size()) {
// The maximum and minimum heap sizes are the same so
@ -396,7 +397,7 @@ void GenCollectorPolicy::initialize_size_info() {
}
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}
@ -448,7 +449,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// At this point the minimum, initial and maximum sizes
// of the overall heap and of gen0 have been determined.
// The maximum gen1 size can be determined from the maximum gen0
// and maximum heap size since not explicit flags exits
// and maximum heap size since no explicit flags exits
// for setting the gen1 maximum.
_max_gen1_size = max_heap_byte_size() - _max_gen0_size;
_max_gen1_size =
@ -500,7 +501,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
min_heap_byte_size(), OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}
@ -509,7 +510,7 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
initial_heap_byte_size(), OldSize)) {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen0 " SIZE_FORMAT " Initial gen0 "
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size());
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,14 +83,11 @@ void Generation::print_heap_change(size_t prev_used) const {
}
// By default we get a single threaded default reference processor;
// generations needing multi-threaded refs discovery override this method.
// generations needing multi-threaded refs processing or discovery override this method.
void Generation::ref_processor_init() {
assert(_ref_processor == NULL, "a reference processor already exists");
assert(!_reserved.is_empty(), "empty generation?");
_ref_processor =
new ReferenceProcessor(_reserved, // span
refs_discovery_is_atomic(), // atomic_discovery
refs_discovery_is_mt()); // mt_discovery
_ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor
if (_ref_processor == NULL) {
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,40 +102,17 @@ void ReferenceProcessor::init_statics() {
"Unrecongnized RefDiscoveryPolicy");
}
ReferenceProcessor*
ReferenceProcessor::create_ref_processor(MemRegion span,
bool atomic_discovery,
bool mt_discovery,
BoolObjectClosure* is_alive_non_header,
int parallel_gc_threads,
bool mt_processing,
bool dl_needs_barrier) {
int mt_degree = 1;
if (parallel_gc_threads > 1) {
mt_degree = parallel_gc_threads;
}
ReferenceProcessor* rp =
new ReferenceProcessor(span, atomic_discovery,
mt_discovery, mt_degree,
mt_processing && (parallel_gc_threads > 0),
dl_needs_barrier);
if (rp == NULL) {
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
}
rp->set_is_alive_non_header(is_alive_non_header);
rp->setup_policy(false /* default soft ref policy */);
return rp;
}
ReferenceProcessor::ReferenceProcessor(MemRegion span,
bool atomic_discovery,
bool mt_discovery,
int mt_degree,
bool mt_processing,
int mt_processing_degree,
bool mt_discovery,
int mt_discovery_degree,
bool atomic_discovery,
BoolObjectClosure* is_alive_non_header,
bool discovered_list_needs_barrier) :
_discovering_refs(false),
_enqueuing_is_done(false),
_is_alive_non_header(NULL),
_is_alive_non_header(is_alive_non_header),
_discovered_list_needs_barrier(discovered_list_needs_barrier),
_bs(NULL),
_processing_is_mt(mt_processing),
@ -144,8 +121,8 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
_span = span;
_discovery_is_atomic = atomic_discovery;
_discovery_is_mt = mt_discovery;
_num_q = mt_degree;
_max_num_q = mt_degree;
_num_q = MAX2(1, mt_processing_degree);
_max_num_q = MAX2(_num_q, mt_discovery_degree);
_discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
if (_discoveredSoftRefs == NULL) {
vm_exit_during_initialization("Could not allocated RefProc Array");
@ -163,6 +140,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
if (discovered_list_needs_barrier) {
_bs = Universe::heap()->barrier_set();
}
setup_policy(false /* default soft ref policy */);
}
#ifndef PRODUCT
@ -405,15 +383,14 @@ public:
{ }
virtual void work(unsigned int work_id) {
assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds");
assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
// Simplest first cut: static partitioning.
int index = work_id;
// The increment on "index" must correspond to the maximum number of queues
// (n_queues) with which that ReferenceProcessor was created. That
// is because of the "clever" way the discovered references lists were
// allocated and are indexed into. That number is ParallelGCThreads
// currently. Assert that.
assert(_n_queues == (int) ParallelGCThreads, "Different number not expected");
// allocated and are indexed into.
assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
for (int j = 0;
j < subclasses_of_ref;
j++, index += _n_queues) {
@ -672,7 +649,7 @@ ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
}
}
NOT_PRODUCT(
if (PrintGCDetails && TraceReferenceGC) {
if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
"Refs in discovered list " INTPTR_FORMAT,
iter.removed(), iter.processed(), (address)refs_list.head());
@ -711,7 +688,7 @@ ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
// Now close the newly reachable set
complete_gc->do_void();
NOT_PRODUCT(
if (PrintGCDetails && TraceReferenceGC) {
if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
"Refs in discovered list " INTPTR_FORMAT,
iter.removed(), iter.processed(), (address)refs_list.head());
@ -951,7 +928,7 @@ ReferenceProcessor::process_discovered_reflist(
}
if (PrintReferenceGC && PrintGCDetails) {
size_t total = 0;
for (int i = 0; i < _num_q; ++i) {
for (int i = 0; i < _max_num_q; ++i) {
total += refs_lists[i].length();
}
gclog_or_tty->print(", %u refs", total);
@ -967,7 +944,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
task_executor->execute(phase1);
} else {
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
process_phase1(refs_lists[i], policy,
is_alive, keep_alive, complete_gc);
}
@ -983,7 +960,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
task_executor->execute(phase2);
} else {
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
}
}
@ -994,7 +971,7 @@ ReferenceProcessor::process_discovered_reflist(
RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
task_executor->execute(phase3);
} else {
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
process_phase3(refs_lists[i], clear_referent,
is_alive, keep_alive, complete_gc);
}
@ -1008,7 +985,7 @@ void ReferenceProcessor::clean_up_discovered_references() {
// for (int j = 0; j < _num_q; j++) {
// int index = i * _max_num_q + j;
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) {
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
gclog_or_tty->print_cr(
"\nScrubbing %s discovered list of Null referents",
list_name(i));
@ -1350,7 +1327,7 @@ void ReferenceProcessor::preclean_discovered_references(
{
TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
}
@ -1363,7 +1340,7 @@ void ReferenceProcessor::preclean_discovered_references(
{
TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
}
@ -1376,7 +1353,7 @@ void ReferenceProcessor::preclean_discovered_references(
{
TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
for (int i = 0; i < _max_num_q; i++) {
if (yield->should_return()) {
return;
}
@ -1433,7 +1410,7 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
complete_gc->do_void();
NOT_PRODUCT(
if (PrintGCDetails && PrintReferenceGC) {
if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
"Refs in discovered list " INTPTR_FORMAT,
iter.removed(), iter.processed(), (address)refs_list.head());

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -71,7 +71,7 @@ class ReferenceProcessor : public CHeapObj {
bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when
// reference processing is MT.
int _next_id; // round-robin counter in
int _next_id; // round-robin mod _num_q counter in
// support of work distribution
// For collectors that do not keep GC marking information
@ -103,7 +103,8 @@ class ReferenceProcessor : public CHeapObj {
public:
int num_q() { return _num_q; }
void set_mt_degree(int v) { _num_q = v; }
int max_num_q() { return _max_num_q; }
void set_active_mt_degree(int v) { _num_q = v; }
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
static oop sentinel_ref() { return _sentinelRef; }
static oop* adr_sentinel_ref() { return &_sentinelRef; }
@ -216,6 +217,7 @@ class ReferenceProcessor : public CHeapObj {
VoidClosure* complete_gc,
YieldClosure* yield);
// round-robin mod _num_q (not: _not_ mode _max_num_q)
int next_id() {
int id = _next_id;
if (++_next_id == _num_q) {
@ -258,20 +260,12 @@ class ReferenceProcessor : public CHeapObj {
_next_id(0)
{ }
ReferenceProcessor(MemRegion span, bool atomic_discovery,
bool mt_discovery,
int mt_degree = 1,
bool mt_processing = false,
bool discovered_list_needs_barrier = false);
// Allocates and initializes a reference processor.
static ReferenceProcessor* create_ref_processor(
MemRegion span,
bool atomic_discovery,
bool mt_discovery,
// Default parameters give you a vanilla reference processor.
ReferenceProcessor(MemRegion span,
bool mt_processing = false, int mt_processing_degree = 1,
bool mt_discovery = false, int mt_discovery_degree = 1,
bool atomic_discovery = true,
BoolObjectClosure* is_alive_non_header = NULL,
int parallel_gc_threads = 1,
bool mt_processing = false,
bool discovered_list_needs_barrier = false);
// RefDiscoveryPolicy values
@ -397,20 +391,20 @@ class ReferenceProcessorSpanMutator: StackObj {
// A utility class to temporarily change the MT'ness of
// reference discovery for the given ReferenceProcessor
// in the scope that contains it.
class ReferenceProcessorMTMutator: StackObj {
class ReferenceProcessorMTDiscoveryMutator: StackObj {
private:
ReferenceProcessor* _rp;
bool _saved_mt;
public:
ReferenceProcessorMTMutator(ReferenceProcessor* rp,
ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
bool mt):
_rp(rp) {
_saved_mt = _rp->discovery_is_mt();
_rp->set_mt_discovery(mt);
}
~ReferenceProcessorMTMutator() {
~ReferenceProcessorMTDiscoveryMutator() {
_rp->set_mt_discovery(_saved_mt);
}
};

View file

@ -242,6 +242,7 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
{ "MaxLiveObjectEvacuationRatio",
JDK_Version::jdk_update(6,24), JDK_Version::jdk(8) },
{ "ForceSharedSpaces", JDK_Version::jdk_update(6,25), JDK_Version::jdk(8) },
{ NULL, JDK_Version(0), JDK_Version(0) }
};
@ -1003,28 +1004,6 @@ static void no_shared_spaces() {
}
}
void Arguments::check_compressed_oops_compat() {
#ifdef _LP64
assert(UseCompressedOops, "Precondition");
// Is it on by default or set on ergonomically
bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops);
// If dumping an archive or forcing its use, disable compressed oops if possible
if (DumpSharedSpaces || RequireSharedSpaces) {
if (is_on_by_default) {
FLAG_SET_DEFAULT(UseCompressedOops, false);
return;
} else {
vm_exit_during_initialization(
"Class Data Sharing is not supported with compressed oops yet", NULL);
}
} else if (UseSharedSpaces) {
// UseSharedSpaces is on by default. With compressed oops, we turn it off.
FLAG_SET_DEFAULT(UseSharedSpaces, false);
}
#endif
}
void Arguments::set_tiered_flags() {
// With tiered, set default policy to AdvancedThresholdPolicy, which is 3.
if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
@ -1123,40 +1102,28 @@ void Arguments::set_cms_and_parnew_gc_flags() {
set_parnew_gc_flags();
}
// Now make adjustments for CMS
size_t young_gen_per_worker;
intx new_ratio;
size_t min_new_default;
intx tenuring_default;
if (CMSUseOldDefaults) { // old defaults: "old" as of 6.0
if FLAG_IS_DEFAULT(CMSYoungGenPerWorker) {
FLAG_SET_ERGO(intx, CMSYoungGenPerWorker, 4*M);
}
young_gen_per_worker = 4*M;
new_ratio = (intx)15;
min_new_default = 4*M;
tenuring_default = (intx)0;
} else { // new defaults: "new" as of 6.0
young_gen_per_worker = CMSYoungGenPerWorker;
new_ratio = (intx)7;
min_new_default = 16*M;
tenuring_default = (intx)4;
}
// MaxHeapSize is aligned down in collectorPolicy
size_t max_heap = align_size_down(MaxHeapSize,
CardTableRS::ct_max_alignment_constraint());
// Preferred young gen size for "short" pauses
// Now make adjustments for CMS
intx tenuring_default = (intx)6;
size_t young_gen_per_worker = CMSYoungGenPerWorker;
// Preferred young gen size for "short" pauses:
// upper bound depends on # of threads and NewRatio.
const uintx parallel_gc_threads =
(ParallelGCThreads == 0 ? 1 : ParallelGCThreads);
const size_t preferred_max_new_size_unaligned =
ScaleForWordSize(young_gen_per_worker * parallel_gc_threads);
const size_t preferred_max_new_size =
MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * parallel_gc_threads));
size_t preferred_max_new_size =
align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
// Unless explicitly requested otherwise, size young gen
// for "short" pauses ~ 4M*ParallelGCThreads
// for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
// If either MaxNewSize or NewRatio is set on the command line,
// assume the user is trying to set the size of the young gen.
if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
// Set MaxNewSize to our calculated preferred_max_new_size unless
@ -1169,49 +1136,13 @@ void Arguments::set_cms_and_parnew_gc_flags() {
}
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
tty->print_cr("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
}
// Unless explicitly requested otherwise, prefer a large
// Old to Young gen size so as to shift the collection load
// to the old generation concurrent collector
// If this is only guarded by FLAG_IS_DEFAULT(NewRatio)
// then NewSize and OldSize may be calculated. That would
// generally lead to some differences with ParNewGC for which
// there was no obvious reason. Also limit to the case where
// MaxNewSize has not been set.
FLAG_SET_ERGO(intx, NewRatio, MAX2(NewRatio, new_ratio));
// Code along this path potentially sets NewSize and OldSize
// Calculate the desired minimum size of the young gen but if
// NewSize has been set on the command line, use it here since
// it should be the final value.
size_t min_new;
if (FLAG_IS_DEFAULT(NewSize)) {
min_new = align_size_up(ScaleForWordSize(min_new_default),
os::vm_page_size());
} else {
min_new = NewSize;
}
size_t prev_initial_size = InitialHeapSize;
if (prev_initial_size != 0 && prev_initial_size < min_new + OldSize) {
FLAG_SET_ERGO(uintx, InitialHeapSize, min_new + OldSize);
// Currently minimum size and the initial heap sizes are the same.
set_min_heap_size(InitialHeapSize);
if (PrintGCDetails && Verbose) {
warning("Initial heap size increased to " SIZE_FORMAT " M from "
SIZE_FORMAT " M; use -XX:NewSize=... for finer control.",
InitialHeapSize/M, prev_initial_size/M);
}
}
// MaxHeapSize is aligned down in collectorPolicy
size_t max_heap =
align_size_down(MaxHeapSize,
CardTableRS::ct_max_alignment_constraint());
assert(max_heap >= InitialHeapSize, "Error");
assert(max_heap >= NewSize, "Error");
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
@ -1220,7 +1151,11 @@ void Arguments::set_cms_and_parnew_gc_flags() {
" max_heap: " SIZE_FORMAT,
min_heap_size(), InitialHeapSize, max_heap);
}
if (max_heap > min_new) {
size_t min_new = preferred_max_new_size;
if (FLAG_IS_CMDLINE(NewSize)) {
min_new = NewSize;
}
if (max_heap > min_new && min_heap_size() > min_new) {
// Unless explicitly requested otherwise, make young gen
// at least min_new, and at most preferred_max_new_size.
if (FLAG_IS_DEFAULT(NewSize)) {
@ -1228,18 +1163,17 @@ void Arguments::set_cms_and_parnew_gc_flags() {
FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize);
tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
}
}
// Unless explicitly requested otherwise, size old gen
// so that it's at least 3X of NewSize to begin with;
// later NewRatio will decide how it grows; see above.
// so it's NewRatio x of NewSize.
if (FLAG_IS_DEFAULT(OldSize)) {
if (max_heap > NewSize) {
FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize));
FLAG_SET_ERGO(uintx, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize);
tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
}
}
}
@ -1383,7 +1317,7 @@ bool Arguments::should_auto_select_low_pause_collector() {
void Arguments::set_ergonomics_flags() {
// Parallel GC is not compatible with sharing. If one specifies
// that they want sharing explicitly, do not set ergonomics flags.
if (DumpSharedSpaces || ForceSharedSpaces) {
if (DumpSharedSpaces || RequireSharedSpaces) {
return;
}
@ -1690,13 +1624,13 @@ bool Arguments::verify_interval(uintx val, uintx min,
}
bool Arguments::verify_min_value(intx val, intx min, const char* name) {
// Returns true if given value is greater than specified min threshold
// Returns true if given value is at least specified min threshold
// false, otherwise.
if (val >= min ) {
return true;
}
jio_fprintf(defaultStream::error_stream(),
"%s of " INTX_FORMAT " is invalid; must be greater than " INTX_FORMAT "\n",
"%s of " INTX_FORMAT " is invalid; must be at least " INTX_FORMAT "\n",
name, val, min);
return false;
}
@ -1846,33 +1780,6 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
// Check whether user-specified sharing option conflicts with GC or page size.
// Both sharing and large pages are enabled by default on some platforms;
// large pages override sharing only if explicitly set on the command line.
const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode ||
UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC ||
UseLargePages && FLAG_IS_CMDLINE(UseLargePages);
if (cannot_share) {
// Either force sharing on by forcing the other options off, or
// force sharing off.
if (DumpSharedSpaces || ForceSharedSpaces) {
jio_fprintf(defaultStream::error_stream(),
"Using Serial GC and default page size because of %s\n",
ForceSharedSpaces ? "-Xshare:on" : "-Xshare:dump");
force_serial_gc();
FLAG_SET_DEFAULT(UseLargePages, false);
} else {
if (UseSharedSpaces && Verbose) {
jio_fprintf(defaultStream::error_stream(),
"Turning off use of shared archive because of "
"choice of garbage collector or large pages\n");
}
no_shared_spaces();
}
} else if (UseLargePages && (UseSharedSpaces || DumpSharedSpaces)) {
FLAG_SET_DEFAULT(UseLargePages, false);
}
status = status && check_gc_consistency();
status = status && check_stack_pages();
@ -1950,6 +1857,8 @@ bool Arguments::check_vm_args_consistency() {
status = false;
}
status = status && verify_min_value(ParGCArrayScanChunk, 1, "ParGCArrayScanChunk");
#ifndef SERIALGC
if (UseG1GC) {
status = status && verify_percentage(InitiatingHeapOccupancyPercent,
@ -2413,9 +2322,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
} else if (match_option(option, "-Xshare:on", &tail)) {
FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true);
#ifdef TIERED
FLAG_SET_CMDLINE(bool, ForceSharedSpaces, true);
#endif // TIERED
// -Xshare:auto
} else if (match_option(option, "-Xshare:auto", &tail)) {
FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
@ -2912,6 +2818,36 @@ jint Arguments::parse_options_environment_variable(const char* name, SysClassPat
return JNI_OK;
}
void Arguments::set_shared_spaces_flags() {
// Check whether class data sharing settings conflict with GC, compressed oops
// or page size, and fix them up. Explicit sharing options override other
// settings.
const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode ||
UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC ||
UseCompressedOops || UseLargePages && FLAG_IS_CMDLINE(UseLargePages);
const bool must_share = DumpSharedSpaces || RequireSharedSpaces;
const bool might_share = must_share || UseSharedSpaces;
if (cannot_share) {
if (must_share) {
warning("selecting serial gc and disabling large pages %s"
"because of %s", "" LP64_ONLY("and compressed oops "),
DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on");
force_serial_gc();
FLAG_SET_CMDLINE(bool, UseLargePages, false);
LP64_ONLY(FLAG_SET_CMDLINE(bool, UseCompressedOops, false));
} else {
if (UseSharedSpaces && Verbose) {
warning("turning off use of shared archive because of "
"choice of garbage collector or large pages");
}
no_shared_spaces();
}
} else if (UseLargePages && might_share) {
// Disable large pages to allow shared spaces. This is sub-optimal, since
// there may not even be a shared archive to use.
FLAG_SET_DEFAULT(UseLargePages, false);
}
}
// Parse entry point called from JNI_CreateJavaVM
@ -3059,9 +2995,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
// Set flags based on ergonomics.
set_ergonomics_flags();
if (UseCompressedOops) {
check_compressed_oops_compat();
}
set_shared_spaces_flags();
// Check the GC selections again.
if (!check_gc_consistency()) {
@ -3079,23 +3013,18 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
}
#ifndef KERNEL
if (UseConcMarkSweepGC) {
// Set flags for CMS and ParNew. Check UseConcMarkSweep first
// to ensure that when both UseConcMarkSweepGC and UseParNewGC
// are true, we don't call set_parnew_gc_flags() as well.
set_cms_and_parnew_gc_flags();
} else {
// Set heap size based on available physical memory
set_heap_size();
// Set per-collector flags
if (UseParallelGC || UseParallelOldGC) {
set_parallel_gc_flags();
} else if (UseParNewGC) {
} else if (UseConcMarkSweepGC) { // should be done before ParNew check below
set_cms_and_parnew_gc_flags();
} else if (UseParNewGC) { // skipped if CMS is set above
set_parnew_gc_flags();
} else if (UseG1GC) {
set_g1_gc_flags();
}
}
#endif // KERNEL
#ifdef SERIALGC

View file

@ -301,8 +301,6 @@ class Arguments : AllStatic {
// Tiered
static void set_tiered_flags();
// Check compressed oops compatibility with other flags
static void check_compressed_oops_compat();
// CMS/ParNew garbage collectors
static void set_parnew_gc_flags();
static void set_cms_and_parnew_gc_flags();
@ -312,6 +310,7 @@ class Arguments : AllStatic {
static void set_g1_gc_flags();
// GC ergonomics
static void set_ergonomics_flags();
static void set_shared_spaces_flags();
// Setup heap size
static void set_heap_size();
// Based on automatic selection criteria, should the

View file

@ -1540,12 +1540,8 @@ class CommandLineFlags {
product(bool, AlwaysPreTouch, false, \
"It forces all freshly committed pages to be pre-touched.") \
\
product(bool, CMSUseOldDefaults, false, \
"A flag temporarily introduced to allow reverting to some " \
"older default settings; older as of 6.0") \
\
product(intx, CMSYoungGenPerWorker, 16*M, \
"The amount of young gen chosen by default per GC worker " \
product_pd(intx, CMSYoungGenPerWorker, \
"The maximum size of young gen chosen by default per GC worker " \
"thread available") \
\
product(bool, GCOverheadReporting, false, \
@ -3653,9 +3649,6 @@ class CommandLineFlags {
product(bool, RequireSharedSpaces, false, \
"Require shared spaces in the permanent generation") \
\
product(bool, ForceSharedSpaces, false, \
"Require shared spaces in the permanent generation") \
\
product(bool, DumpSharedSpaces, false, \
"Special mode: JVM reads a class list, loads classes, builds " \
"shared spaces, and dumps the shared spaces to a file to be " \

View file

@ -25,6 +25,7 @@
#ifndef SHARE_VM_UTILITIES_DEBUG_HPP
#define SHARE_VM_UTILITIES_DEBUG_HPP
#include "prims/jvm.h"
#include "utilities/globalDefinitions.hpp"
#include <stdarg.h>
@ -48,7 +49,7 @@ template <size_t bufsz>
FormatBuffer<bufsz>::FormatBuffer(const char * format, ...) {
va_list argp;
va_start(argp, format);
vsnprintf(_buf, bufsz, format, argp);
jio_vsnprintf(_buf, bufsz, format, argp);
va_end(argp);
}
@ -61,7 +62,7 @@ void FormatBuffer<bufsz>::append(const char* format, ...) {
va_list argp;
va_start(argp, format);
vsnprintf(buf_end, bufsz - len, format, argp);
jio_vsnprintf(buf_end, bufsz - len, format, argp);
va_end(argp);
}

View file

@ -1185,7 +1185,7 @@ inline int build_int_from_shorts( jushort low, jushort high ) {
// '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll"
// (in ILP32).
#define BOOL_TO_STR(__b) (__b) ? "true" : "false"
#define BOOL_TO_STR(_b_) ((_b_) ? "true" : "false")
// Format 32-bit quantities.
#define INT32_FORMAT "%d"

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,20 @@
# include "thread_windows.inline.hpp"
#endif
// Task class hierarchy:
// AbstractGangTask
// AbstractGangTaskWOopQueues
//
// Gang/Group class hierarchy:
// AbstractWorkGang
// WorkGang
// FlexibleWorkGang
// YieldingFlexibleWorkGang (defined in another file)
//
// Worker class hierarchy:
// GangWorker (subclass of WorkerThread)
// YieldingFlexibleGangWorker (defined in another file)
// Forward declarations of classes defined here
class WorkGang;