6944166: G1: explicit GCs are not always handled correctly

G1 was not handling explicit GCs correctly in many ways. It does now. See the CR for the list of improvements contained in this changeset.

Reviewed-by: iveresov, ysr, johnc
This commit is contained in:
Antonios Printezis 2010-06-28 14:13:17 -04:00
parent 968deb7658
commit dfc84e8c89
12 changed files with 291 additions and 71 deletions

View file

@ -809,7 +809,8 @@ public:
}
};
void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
void G1CollectedHeap::do_collection(bool explicit_gc,
bool clear_all_soft_refs,
size_t word_size) {
if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
@ -821,10 +822,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
Universe::print_heap_before_gc();
}
if (full && DisableExplicitGC) {
return;
}
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread");
@ -837,9 +834,11 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
IsGCActiveMark x;
// Timing
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
assert(!system_gc || explicit_gc, "invariant");
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
TraceTime t(full ? "Full GC (System.gc())" : "Full GC",
TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
PrintGC, true, gclog_or_tty);
TraceMemoryManagerStats tms(true /* fullGC */);
@ -944,7 +943,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
heap_region_iterate(&rs_clear);
// Resize the heap if necessary.
resize_if_necessary_after_full_collection(full ? 0 : word_size);
resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
if (_cg1r->use_cache()) {
_cg1r->clear_and_record_card_counts();
@ -1009,13 +1008,18 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
"young list should be empty at this point");
}
// Update the number of full collections that have been completed.
increment_full_collections_completed(false /* outer */);
if (PrintHeapAtGC) {
Universe::print_heap_after_gc();
}
}
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
do_collection(true, clear_all_soft_refs, 0);
do_collection(true, /* explicit_gc */
clear_all_soft_refs,
0 /* word_size */);
}
// This code is mostly copied from TenuredGeneration.
@ -1331,6 +1335,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_young_list(new YoungList(this)),
_gc_time_stamp(0),
_surviving_young_words(NULL),
_full_collections_completed(0),
_in_cset_fast_test(NULL),
_in_cset_fast_test_base(NULL),
_dirty_cards_region_list(NULL) {
@ -1689,6 +1694,51 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
return car->free();
}
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
}
void G1CollectedHeap::increment_full_collections_completed(bool outer) {
MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
// We have already incremented _total_full_collections at the start
// of the GC, so total_full_collections() represents how many full
// collections have been started.
unsigned int full_collections_started = total_full_collections();
// Given that this method is called at the end of a Full GC or of a
// concurrent cycle, and those can be nested (i.e., a Full GC can
// interrupt a concurrent cycle), the number of full collections
// completed should be either one (in the case where there was no
// nesting) or two (when a Full GC interrupted a concurrent cycle)
// behind the number of full collections started.
// This is the case for the inner caller, i.e. a Full GC.
assert(outer ||
(full_collections_started == _full_collections_completed + 1) ||
(full_collections_started == _full_collections_completed + 2),
err_msg("for inner caller: full_collections_started = %u "
"is inconsistent with _full_collections_completed = %u",
full_collections_started, _full_collections_completed));
// This is the case for the outer caller, i.e. the concurrent cycle.
assert(!outer ||
(full_collections_started == _full_collections_completed + 1),
err_msg("for outer caller: full_collections_started = %u "
"is inconsistent with _full_collections_completed = %u",
full_collections_started, _full_collections_completed));
_full_collections_completed += 1;
// This notify_all() will ensure that a thread that called
// System.gc() with (with ExplicitGCInvokesConcurrent set or not)
// and it's waiting for a full GC to finish will be woken up. It is
// waiting in VM_G1IncCollectionPause::doit_epilogue().
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
assert(Thread::current()->is_VM_thread(), "Precondition#1");
assert(Heap_lock->is_locked(), "Precondition#2");
@ -1709,25 +1759,41 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
// The caller doesn't have the Heap_lock
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
int gc_count_before;
unsigned int gc_count_before;
unsigned int full_gc_count_before;
{
MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock
gc_count_before = SharedHeap::heap()->total_collections();
full_gc_count_before = SharedHeap::heap()->total_full_collections();
// Don't want to do a GC until cleanup is completed.
wait_for_cleanup_complete();
} // We give up heap lock; VMThread::execute gets it back below
switch (cause) {
case GCCause::_scavenge_alot: {
// Do an incremental pause, which might sometimes be abandoned.
VM_G1IncCollectionPause op(gc_count_before, cause);
// We give up heap lock; VMThread::execute gets it back below
}
if (should_do_concurrent_full_gc(cause)) {
// Schedule an initial-mark evacuation pause that will start a
// concurrent cycle.
VM_G1IncCollectionPause op(gc_count_before,
true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
VMThread::execute(&op);
} else {
if (cause == GCCause::_gc_locker
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause.
VM_G1IncCollectionPause op(gc_count_before,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
cause);
VMThread::execute(&op);
break;
}
default: {
// In all other cases, we currently do a full gc.
VM_G1CollectFull op(gc_count_before, cause);
} else {
// Schedule a Full GC.
VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
VMThread::execute(&op);
}
}
@ -1989,6 +2055,11 @@ void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
HeapRegionClosure *cl) {
if (r == NULL) {
// The CSet is empty so there's nothing to do.
return;
}
assert(r->in_collection_set(),
"Start region must be a member of the collection set.");
HeapRegion* cur = r;
@ -2481,11 +2552,13 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
}
void G1CollectedHeap::do_collection_pause() {
assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock");
// Read the GC count while holding the Heap_lock
// we need to do this _before_ wait_for_cleanup_complete(), to
// ensure that we do not give up the heap lock and potentially
// pick up the wrong count
int gc_count_before = SharedHeap::heap()->total_collections();
unsigned int gc_count_before = SharedHeap::heap()->total_collections();
// Don't want to do a GC pause while cleanup is being completed!
wait_for_cleanup_complete();
@ -2493,7 +2566,10 @@ void G1CollectedHeap::do_collection_pause() {
g1_policy()->record_stop_world_start();
{
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_G1IncCollectionPause op(gc_count_before);
VM_G1IncCollectionPause op(gc_count_before,
false, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(),
GCCause::_g1_inc_collection_pause);
VMThread::execute(&op);
}
}
@ -2612,7 +2688,7 @@ struct PrepareForRSScanningClosure : public HeapRegionClosure {
};
void
G1CollectedHeap::do_collection_pause_at_safepoint() {
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
if (GC_locker::check_active_before_gc()) {
return; // GC is disabled (e.g. JNI GetXXXCritical operation)
}
@ -2637,8 +2713,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
else
strcat(verbose_str, "(partial)");
}
if (g1_policy()->during_initial_mark_pause())
if (g1_policy()->during_initial_mark_pause()) {
strcat(verbose_str, " (initial-mark)");
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_total_full_collections();
}
// if PrintGCDetails is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output
@ -2661,7 +2741,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
"young list should be well formed");
}
bool abandoned = false;
{ // Call to jvmpi::post_class_unload_events must occur outside of active GC
IsGCActiveMark x;
@ -2743,7 +2822,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
// Now choose the CS. We may abandon a pause if we find no
// region that will fit in the MMU pause.
bool abandoned = g1_policy()->choose_collection_set();
bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms);
// Nothing to do if we were unable to choose a collection set.
if (!abandoned) {