8151085: Change G1 concurrent timer and tracer measuring time

Change measuring time for G1 concurrent phases and reporting heap summary

Reviewed-by: brutisso, sjohanss
This commit is contained in:
Sangheon Kim 2016-03-17 11:18:52 -07:00
parent 50b0262b13
commit 9f63ca85a7
9 changed files with 63 additions and 142 deletions

View file

@ -98,11 +98,11 @@ class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc,
G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
GCTraceConcTimeImpl<LogLevel::Info, LogTag::_gc, LogTag::_marking>(title),
_cm(cm) {
_cm->register_concurrent_phase_start(title);
_cm->gc_timer_cm()->register_gc_concurrent_start(title);
}
~G1ConcPhaseTimer() {
_cm->register_concurrent_phase_end();
_cm->gc_timer_cm()->register_gc_concurrent_end();
}
};
@ -119,6 +119,10 @@ void ConcurrentMarkThread::run_service() {
break;
}
GCIdMark gc_id_mark;
cm()->concurrent_cycle_start();
assert(GCId::current() != GCId::undefined(), "GC id should have been set up by the initial mark GC.");
GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
@ -279,7 +283,8 @@ void ConcurrentMarkThread::run_service() {
{
SuspendibleThreadSetJoiner sts_join;
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
g1h->register_concurrent_cycle_end();
cm()->concurrent_cycle_end();
}
}
_cm->root_regions()->cancel_scan();

View file

@ -1769,15 +1769,12 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_expand_heap_after_alloc_failure(true),
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_heap_summary_sent(false),
_in_cset_fast_test(),
_dirty_cards_region_list(NULL),
_worker_cset_start_region(NULL),
_worker_cset_start_region_time_stamp(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
@ -2318,52 +2315,6 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
FullGCCount_lock->notify_all();
}
void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
GCIdMarkAndRestore conc_gc_id_mark;
collector_state()->set_concurrent_cycle_started(true);
_gc_timer_cm->register_gc_start(start_time);
_gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
trace_heap_before_gc(_gc_tracer_cm);
_cmThread->set_gc_id(GCId::current());
}
void G1CollectedHeap::register_concurrent_cycle_end() {
if (collector_state()->concurrent_cycle_started()) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
// ConcurrentGCTimer will be ended as well.
_cm->register_concurrent_gc_end_and_stop_timer();
} else {
_gc_timer_cm->register_gc_end();
}
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
// Clear state variables to prepare for the next concurrent cycle.
collector_state()->set_concurrent_cycle_started(false);
_heap_summary_sent = false;
}
}
void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
if (collector_state()->concurrent_cycle_started()) {
// This function can be called when:
// the cleanup pause is run
// the concurrent cycle is aborted before the cleanup pause.
// the concurrent cycle is aborted after the cleanup pause,
// but before the concurrent cycle end has been registered.
// Make sure that we only send the heap information once.
if (!_heap_summary_sent) {
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
trace_heap_after_gc(_gc_tracer_cm);
_heap_summary_sent = true;
}
}
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
@ -2854,12 +2805,14 @@ G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
size_t eden_used_bytes = young_list->eden_used_bytes();
size_t survivor_used_bytes = young_list->survivor_used_bytes();
size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
size_t eden_capacity_bytes =
(g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
VirtualSpaceSummary heap_summary = create_heap_space_summary();
return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions());
return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
eden_capacity_bytes, survivor_used_bytes, num_regions());
}
G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
@ -2877,7 +2830,6 @@ void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
gc_tracer->report_metaspace_summary(when, metaspace_summary);
}
G1CollectedHeap* G1CollectedHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
@ -3250,7 +3202,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_old_marking_cycles_started();
register_concurrent_cycle_start(_gc_timer_stw->gc_start());
_cm->gc_tracer_cm()->set_gc_cause(gc_cause());
}
_gc_tracer_stw->report_yc_type(collector_state()->yc_type());

View file

@ -73,11 +73,9 @@ class HeapRegionRemSetIterator;
class G1ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
class ConcurrentGCTimer;
class GenerationCounters;
class STWGCTimer;
class G1NewTracer;
class G1OldTracer;
class EvacuationFailedInfo;
class nmethod;
class Ticks;
@ -270,8 +268,6 @@ private:
// concurrent cycles) we have completed.
volatile uint _old_marking_cycles_completed;
bool _heap_summary_sent;
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
// allocating a number of dead regions. This way we can induce very
@ -622,10 +618,6 @@ public:
return _old_marking_cycles_completed;
}
void register_concurrent_cycle_start(const Ticks& start_time);
void register_concurrent_cycle_end();
void trace_heap_after_concurrent_cycle();
G1HRPrinter* hr_printer() { return &_hr_printer; }
// Allocates a new heap region instance.
@ -900,9 +892,7 @@ protected:
ReferenceProcessor* _ref_processor_stw;
STWGCTimer* _gc_timer_stw;
ConcurrentGCTimer* _gc_timer_cm;
G1OldTracer* _gc_tracer_cm;
G1NewTracer* _gc_tracer_stw;
// During reference object discovery, the _is_alive_non_header
@ -1036,9 +1026,6 @@ public:
// The Concurrent Marking reference processor...
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
virtual size_t capacity() const;
virtual size_t used() const;
// This should be called when we're not holding the heap lock. The

View file

@ -72,7 +72,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
bool _in_marking_window;
bool _in_marking_window_im;
bool _concurrent_cycle_started;
bool _full_collection;
public:
@ -88,7 +87,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
_mark_in_progress(false),
_in_marking_window(false),
_in_marking_window_im(false),
_concurrent_cycle_started(false),
_full_collection(false) {}
// Setters
@ -101,7 +99,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
void set_mark_in_progress(bool v) { _mark_in_progress = v; }
void set_in_marking_window(bool v) { _in_marking_window = v; }
void set_in_marking_window_im(bool v) { _in_marking_window_im = v; }
void set_concurrent_cycle_started(bool v) { _concurrent_cycle_started = v; }
void set_full_collection(bool v) { _full_collection = v; }
// Getters
@ -114,7 +111,6 @@ class G1CollectorState VALUE_OBJ_CLASS_SPEC {
bool mark_in_progress() const { return _mark_in_progress; }
bool in_marking_window() const { return _in_marking_window; }
bool in_marking_window_im() const { return _in_marking_window_im; }
bool concurrent_cycle_started() const { return _concurrent_cycle_started; }
bool full_collection() const { return _full_collection; }
// Composite booleans (clients worry about flickering)

View file

@ -377,7 +377,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper*
_has_aborted(false),
_restart_for_overflow(false),
_concurrent_marking_in_progress(false),
_concurrent_phase_status(ConcPhaseNotStarted),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
_gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
// _verbose_level set below
@ -1011,44 +1012,24 @@ void G1ConcurrentMark::scan_root_regions() {
}
}
void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
uint old_val = 0;
do {
old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
} while (old_val != ConcPhaseNotStarted);
_g1h->gc_timer_cm()->register_gc_concurrent_start(title);
void G1ConcurrentMark::concurrent_cycle_start() {
_gc_timer_cm->register_gc_start();
_gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
_g1h->trace_heap_before_gc(_gc_tracer_cm);
}
void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
if (_concurrent_phase_status == ConcPhaseNotStarted) {
return;
void G1ConcurrentMark::concurrent_cycle_end() {
_g1h->trace_heap_after_gc(_gc_tracer_cm);
if (has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
}
uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
if (old_val == ConcPhaseStarted) {
_g1h->gc_timer_cm()->register_gc_concurrent_end();
// If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
// We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
// starting a new concurrent phase by 'ConcurrentMarkThread'.
if (end_timer) {
_g1h->gc_timer_cm()->register_gc_end();
}
old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
} else {
do {
// Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
os::naked_short_sleep(1);
} while (_concurrent_phase_status != ConcPhaseNotStarted);
}
}
_gc_timer_cm->register_gc_end();
void G1ConcurrentMark::register_concurrent_phase_end() {
register_concurrent_phase_end_common(false);
}
void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
register_concurrent_phase_end_common(true);
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
}
void G1ConcurrentMark::mark_from_roots() {
@ -1127,7 +1108,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
reset_marking_state();
} else {
{
GCTraceTime(Debug, gc) trace("Aggregate Data", g1h->gc_timer_cm());
GCTraceTime(Debug, gc) trace("Aggregate Data", _gc_timer_cm);
// Aggregate the per-task counting data that we have accumulated
// while marking.
@ -1166,7 +1147,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
g1p->record_concurrent_mark_remark_end();
G1CMIsAliveClosure is_alive(g1h);
g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
}
// Base class of the closures that finalize and verify the
@ -1755,8 +1736,6 @@ void G1ConcurrentMark::cleanup() {
// sure we update the old gen/space data.
g1h->g1mm()->update_sizes();
g1h->allocation_context_stats().update_after_mark();
g1h->trace_heap_after_concurrent_cycle();
}
void G1ConcurrentMark::complete_cleanup() {
@ -2048,7 +2027,7 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
{
GCTraceTime(Debug, gc) trace("Reference Processing", g1h->gc_timer_cm());
GCTraceTime(Debug, gc) trace("Reference Processing", _gc_timer_cm);
ReferenceProcessor* rp = g1h->ref_processor_cm();
@ -2105,8 +2084,8 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
&g1_keep_alive,
&g1_drain_mark_stack,
executor,
g1h->gc_timer_cm());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
_gc_timer_cm);
_gc_tracer_cm->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the
@ -2138,24 +2117,24 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// Unload Klasses, String, Symbols, Code Cache, etc.
{
GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
GCTraceTime(Debug, gc) trace("Unloading", _gc_timer_cm);
if (ClassUnloadingWithConcurrentMark) {
bool purged_classes;
{
GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
GCTraceTime(Trace, gc) trace("System Dictionary Unloading", _gc_timer_cm);
purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
}
{
GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
GCTraceTime(Trace, gc) trace("Parallel Unloading", _gc_timer_cm);
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
GCTraceTime(Trace, gc) trace("String Deduplication Unlink", _gc_timer_cm);
G1StringDedup::unlink(&g1_is_alive);
}
}
@ -2276,7 +2255,7 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
GCTraceTime(Debug, gc) trace("Finalize Marking", _gc_timer_cm);
g1h->ensure_parsability(false);
@ -2632,10 +2611,6 @@ void G1ConcurrentMark::abort() {
satb_mq_set.set_active_all_threads(
false, /* new active value */
satb_mq_set.is_active() /* expected_active */);
_g1h->trace_heap_after_concurrent_cycle();
_g1h->register_concurrent_cycle_end();
}
static void print_ms_time_info(const char* prefix, const char* name,

View file

@ -34,6 +34,8 @@ class G1CollectedHeap;
class G1CMBitMap;
class G1CMTask;
class G1ConcurrentMark;
class ConcurrentGCTimer;
class G1OldTracer;
typedef GenericTaskQueue<oop, mtGC> G1CMTaskQueue;
typedef GenericTaskQueueSet<G1CMTaskQueue, mtGC> G1CMTaskQueueSet;
@ -349,17 +351,9 @@ protected:
// time of remark.
volatile bool _concurrent_marking_in_progress;
// There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort())
// to call ConcurrentGCTimer::register_gc_concurrent_end().
// And this variable is used to keep track of concurrent phase.
volatile uint _concurrent_phase_status;
// Concurrent phase is not yet started.
static const uint ConcPhaseNotStarted = 0;
// Concurrent phase is started.
static const uint ConcPhaseStarted = 1;
// Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase.
// So other thread should wait until the status to be changed to ConcPhaseNotStarted.
static const uint ConcPhaseStopping = 2;
ConcurrentGCTimer* _gc_timer_cm;
G1OldTracer* _gc_tracer_cm;
// All of these times are in ms
NumberSeq _init_times;
@ -530,10 +524,8 @@ public:
_concurrent_marking_in_progress = false;
}
void register_concurrent_phase_start(const char* title);
void register_concurrent_phase_end();
// Ends both concurrent phase and timer.
void register_concurrent_gc_end_and_stop_timer();
void concurrent_cycle_start();
void concurrent_cycle_end();
void update_accum_task_vtime(int i, double vtime) {
_accum_task_vtime[i] += vtime;
@ -735,6 +727,9 @@ public:
return _completed_initialization;
}
ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
protected:
// Clear all the per-task bitmaps and arrays used to store the
// counting data.

View file

@ -216,7 +216,7 @@ void VM_CGC_Operation::doit() {
GCIdMark gc_id_mark(_gc_id);
GCTraceCPUTime tcpu;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Info, gc) t(_printGCMessage, g1h->gc_timer_cm(), GCCause::_no_gc, true);
GCTraceTime(Info, gc) t(_printGCMessage, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true);
IsGCActiveMark x;
_cl->do_void();
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -242,4 +242,12 @@ void G1NewTracer::report_adaptive_ihop_statistics(size_t threshold,
prediction_active);
}
void G1OldTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
_shared_gc_info.set_start_timestamp(timestamp);
}
void G1OldTracer::set_gc_cause(GCCause::Cause cause) {
_shared_gc_info.set_cause(cause);
}
#endif

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -126,7 +126,7 @@ class GCTracer : public ResourceObj {
protected:
GCTracer(GCName name) : _shared_gc_info(name) {}
void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
virtual void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
private:
@ -297,8 +297,11 @@ class CMSTracer : public OldGCTracer {
};
class G1OldTracer : public OldGCTracer {
protected:
void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp);
public:
G1OldTracer() : OldGCTracer(G1Old) {}
void set_gc_cause(GCCause::Cause cause);
};
#endif // SHARE_VM_GC_SHARED_GCTRACE_HPP