8134953: Make the GC ID available in a central place

Reviewed-by: pliden, jmasa
This commit is contained in:
Bengt Rutisson 2015-09-30 09:07:21 +02:00
parent d516b42238
commit 003892f897
41 changed files with 253 additions and 291 deletions

View file

@ -1593,7 +1593,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id()); GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
// Temporarily widen the span of the weak reference processing to // Temporarily widen the span of the weak reference processing to
// the entire heap. // the entire heap.
@ -2825,7 +2825,6 @@ class CMSPhaseAccounting: public StackObj {
public: public:
CMSPhaseAccounting(CMSCollector *collector, CMSPhaseAccounting(CMSCollector *collector,
const char *phase, const char *phase,
const GCId gc_id,
bool print_cr = true); bool print_cr = true);
~CMSPhaseAccounting(); ~CMSPhaseAccounting();
@ -2834,7 +2833,6 @@ class CMSPhaseAccounting: public StackObj {
const char *_phase; const char *_phase;
elapsedTimer _wallclock; elapsedTimer _wallclock;
bool _print_cr; bool _print_cr;
const GCId _gc_id;
public: public:
// Not MT-safe; so do not pass around these StackObj's // Not MT-safe; so do not pass around these StackObj's
@ -2850,15 +2848,14 @@ class CMSPhaseAccounting: public StackObj {
CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
const char *phase, const char *phase,
const GCId gc_id,
bool print_cr) : bool print_cr) :
_collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) { _collector(collector), _phase(phase), _print_cr(print_cr) {
if (PrintCMSStatistics != 0) { if (PrintCMSStatistics != 0) {
_collector->resetYields(); _collector->resetYields();
} }
if (PrintGCDetails) { if (PrintGCDetails) {
gclog_or_tty->gclog_stamp(_gc_id); gclog_or_tty->gclog_stamp();
gclog_or_tty->print_cr("[%s-concurrent-%s-start]", gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
_collector->cmsGen()->short_name(), _phase); _collector->cmsGen()->short_name(), _phase);
} }
@ -2872,7 +2869,7 @@ CMSPhaseAccounting::~CMSPhaseAccounting() {
_collector->stopTimer(); _collector->stopTimer();
_wallclock.stop(); _wallclock.stop();
if (PrintGCDetails) { if (PrintGCDetails) {
gclog_or_tty->gclog_stamp(_gc_id); gclog_or_tty->gclog_stamp();
gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
_collector->cmsGen()->short_name(), _collector->cmsGen()->short_name(),
_phase, _collector->timerValue(), _wallclock.seconds()); _phase, _collector->timerValue(), _wallclock.seconds());
@ -2951,7 +2948,7 @@ void CMSCollector::checkpointRootsInitialWork() {
setup_cms_unloading_and_verification_state(); setup_cms_unloading_and_verification_state();
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) PrintGCDetails && Verbose, true, _gc_timer_cm);)
// Reset all the PLAB chunk arrays if necessary. // Reset all the PLAB chunk arrays if necessary.
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@ -3054,7 +3051,7 @@ bool CMSCollector::markFromRoots() {
CMSTokenSyncWithLocks ts(true, bitMapLock()); CMSTokenSyncWithLocks ts(true, bitMapLock());
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
bool res = markFromRootsWork(); bool res = markFromRootsWork();
if (res) { if (res) {
_collectorState = Precleaning; _collectorState = Precleaning;
@ -3751,7 +3748,7 @@ void CMSCollector::preclean() {
_start_sampling = false; _start_sampling = false;
} }
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
} }
CMSTokenSync x(true); // is cms thread CMSTokenSync x(true); // is cms thread
@ -3780,7 +3777,7 @@ void CMSCollector::abortable_preclean() {
// we will never do an actual abortable preclean cycle. // we will never do an actual abortable preclean cycle.
if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
// We need more smarts in the abortable preclean // We need more smarts in the abortable preclean
// loop below to deal with cases where allocation // loop below to deal with cases where allocation
// in young gen is very very slow, and our precleaning // in young gen is very very slow, and our precleaning
@ -3925,7 +3922,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
rp->preclean_discovered_references( rp->preclean_discovered_references(
rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
gc_timer, _gc_tracer_cm->gc_id()); gc_timer);
} }
if (clean_survivor) { // preclean the active survivor space(s) if (clean_survivor) { // preclean the active survivor space(s)
@ -4261,7 +4258,7 @@ void CMSCollector::checkpointRootsFinal() {
// expect it to be false and set to true // expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false); FlagSetting fl(gch->_is_gc_active, false);
NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) PrintGCDetails && Verbose, true, _gc_timer_cm);)
gch->do_collection(true, // full (i.e. force, see below) gch->do_collection(true, // full (i.e. force, see below)
false, // !clear_all_soft_refs false, // !clear_all_soft_refs
0, // size 0, // size
@ -4279,7 +4276,7 @@ void CMSCollector::checkpointRootsFinal() {
} }
void CMSCollector::checkpointRootsFinalWork() { void CMSCollector::checkpointRootsFinalWork() {
NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
assert(haveFreelistLocks(), "must have free list locks"); assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock()); assert_lock_strong(bitMapLock());
@ -4329,11 +4326,10 @@ void CMSCollector::checkpointRootsFinalWork() {
// the most recent young generation GC, minus those cleaned up by the // the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning. // concurrent precleaning.
if (CMSParallelRemarkEnabled) { if (CMSParallelRemarkEnabled) {
GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
do_remark_parallel(); do_remark_parallel();
} else { } else {
GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
_gc_timer_cm, _gc_tracer_cm->gc_id());
do_remark_non_parallel(); do_remark_non_parallel();
} }
} }
@ -4341,7 +4337,7 @@ void CMSCollector::checkpointRootsFinalWork() {
verify_overflow_empty(); verify_overflow_empty();
{ {
NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
refProcessingWork(); refProcessingWork();
} }
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5116,7 +5112,7 @@ void CMSCollector::do_remark_non_parallel() {
NULL, // space is set further below NULL, // space is set further below
&_markBitMap, &_markStack, &mrias_cl); &_markBitMap, &_markStack, &mrias_cl);
{ {
GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
// Iterate over the dirty cards, setting the corresponding bits in the // Iterate over the dirty cards, setting the corresponding bits in the
// mod union table. // mod union table.
{ {
@ -5153,7 +5149,7 @@ void CMSCollector::do_remark_non_parallel() {
Universe::verify(); Universe::verify();
} }
{ {
GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5175,7 +5171,7 @@ void CMSCollector::do_remark_non_parallel() {
} }
{ {
GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5194,7 +5190,7 @@ void CMSCollector::do_remark_non_parallel() {
} }
{ {
GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
verify_work_stacks_empty(); verify_work_stacks_empty();
@ -5403,7 +5399,7 @@ void CMSCollector::refProcessingWork() {
_span, &_markBitMap, &_markStack, _span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */); &cmsKeepAliveClosure, false /* !preclean */);
{ {
GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
ReferenceProcessorStats stats; ReferenceProcessorStats stats;
if (rp->processing_is_mt()) { if (rp->processing_is_mt()) {
@ -5428,15 +5424,13 @@ void CMSCollector::refProcessingWork() {
&cmsKeepAliveClosure, &cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure, &cmsDrainMarkingStackClosure,
&task_executor, &task_executor,
_gc_timer_cm, _gc_timer_cm);
_gc_tracer_cm->gc_id());
} else { } else {
stats = rp->process_discovered_references(&_is_alive_closure, stats = rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure, &cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure, &cmsDrainMarkingStackClosure,
NULL, NULL,
_gc_timer_cm, _gc_timer_cm);
_gc_tracer_cm->gc_id());
} }
_gc_tracer_cm->report_gc_reference_stats(stats); _gc_tracer_cm->report_gc_reference_stats(stats);
@ -5447,7 +5441,7 @@ void CMSCollector::refProcessingWork() {
if (should_unload_classes()) { if (should_unload_classes()) {
{ {
GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
// Unload classes and purge the SystemDictionary. // Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@ -5460,13 +5454,13 @@ void CMSCollector::refProcessingWork() {
} }
{ {
GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
// Clean up unreferenced symbols in symbol table. // Clean up unreferenced symbols in symbol table.
SymbolTable::unlink(); SymbolTable::unlink();
} }
{ {
GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
// Delete entries for dead interned strings. // Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure); StringTable::unlink(&_is_alive_closure);
} }
@ -5534,7 +5528,7 @@ void CMSCollector::sweep() {
_intra_sweep_timer.start(); _intra_sweep_timer.start();
{ {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
// First sweep the old gen // First sweep the old gen
{ {
CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
@ -5719,7 +5713,7 @@ void CMSCollector::reset(bool concurrent) {
// Clear the mark bitmap (no grey objects to start with) // Clear the mark bitmap (no grey objects to start with)
// for the next cycle. // for the next cycle.
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails); CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
HeapWord* curAddr = _markBitMap.startWord(); HeapWord* curAddr = _markBitMap.startWord();
while (curAddr < _markBitMap.endWord()) { while (curAddr < _markBitMap.endWord()) {
@ -5771,7 +5765,7 @@ void CMSCollector::reset(bool concurrent) {
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id()); GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters()); TraceCollectorStats tcs(counters());
switch (op) { switch (op) {

View file

@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp" #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp" #include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genCollectedHeap.hpp"
#include "oops/instanceRefKlass.hpp" #include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
@ -124,6 +125,7 @@ void ConcurrentMarkSweepThread::run() {
while (!_should_terminate) { while (!_should_terminate) {
sleepBeforeNextCycle(); sleepBeforeNextCycle();
if (_should_terminate) break; if (_should_terminate) break;
GCIdMark gc_id_mark;
GCCause::Cause cause = _collector->_full_gc_requested ? GCCause::Cause cause = _collector->_full_gc_requested ?
_collector->_full_gc_cause : GCCause::_cms_concurrent_mark; _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
_collector->collect_in_background(cause); _collector->collect_in_background(cause);

View file

@ -896,7 +896,7 @@ void ParNewGeneration::collect(bool full,
size_policy->minor_collection_begin(); size_policy->minor_collection_begin();
} }
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id()); GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
// Capture heap used before collection (for printing). // Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used(); size_t gch_prev_used = gch->used();
@ -959,13 +959,13 @@ void ParNewGeneration::collect(bool full,
ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
stats = rp->process_discovered_references(&is_alive, &keep_alive, stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, &task_executor, &evacuate_followers, &task_executor,
_gc_timer, _gc_tracer.gc_id()); _gc_timer);
} else { } else {
thread_state_set.flush(); thread_state_set.flush();
gch->save_marks(); gch->save_marks();
stats = rp->process_discovered_references(&is_alive, &keep_alive, stats = rp->process_discovered_references(&is_alive, &keep_alive,
&evacuate_followers, NULL, &evacuate_followers, NULL,
_gc_timer, _gc_tracer.gc_id()); _gc_timer);
} }
_gc_tracer.report_gc_reference_stats(stats); _gc_tracer.report_gc_reference_stats(stats);
if (!promotion_failed()) { if (!promotion_failed()) {

View file

@ -58,7 +58,7 @@ void VM_CMS_Operation::release_and_notify_pending_list_lock() {
void VM_CMS_Operation::verify_before_gc() { void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC && if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
HandleMark hm; HandleMark hm;
FreelistLocker x(_collector); FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -70,7 +70,7 @@ void VM_CMS_Operation::verify_before_gc() {
void VM_CMS_Operation::verify_after_gc() { void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC && if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
HandleMark hm; HandleMark hm;
FreelistLocker x(_collector); FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -134,6 +134,7 @@ void VM_CMS_Initial_Mark::doit() {
return; return;
} }
HS_PRIVATE_CMS_INITMARK_BEGIN(); HS_PRIVATE_CMS_INITMARK_BEGIN();
GCIdMark gc_id_mark(_gc_id);
_collector->_gc_timer_cm->register_gc_pause_start("Initial Mark"); _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
@ -161,6 +162,7 @@ void VM_CMS_Final_Remark::doit() {
return; return;
} }
HS_PRIVATE_CMS_REMARK_BEGIN(); HS_PRIVATE_CMS_REMARK_BEGIN();
GCIdMark gc_id_mark(_gc_id);
_collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");

View file

@ -27,6 +27,7 @@
#include "gc/cms/concurrentMarkSweepGeneration.hpp" #include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/shared/gcCause.hpp" #include "gc/shared/gcCause.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/vmGCOperations.hpp" #include "gc/shared/vmGCOperations.hpp"
#include "runtime/vm_operations.hpp" #include "runtime/vm_operations.hpp"
@ -53,6 +54,7 @@ class VM_CMS_Operation: public VM_Operation {
protected: protected:
CMSCollector* _collector; // associated collector CMSCollector* _collector; // associated collector
bool _prologue_succeeded; // whether doit_prologue succeeded bool _prologue_succeeded; // whether doit_prologue succeeded
uint _gc_id;
bool lost_race() const; bool lost_race() const;
@ -63,7 +65,8 @@ class VM_CMS_Operation: public VM_Operation {
public: public:
VM_CMS_Operation(CMSCollector* collector): VM_CMS_Operation(CMSCollector* collector):
_collector(collector), _collector(collector),
_prologue_succeeded(false) {} _prologue_succeeded(false),
_gc_id(GCId::current()) {}
~VM_CMS_Operation() {} ~VM_CMS_Operation() {}
// The legal collector state for executing this CMS op. // The legal collector state for executing this CMS op.

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/cms/yieldingWorkgroup.hpp" #include "gc/cms/yieldingWorkgroup.hpp"
#include "gc/shared/gcId.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id) YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id)
@ -340,6 +341,7 @@ void YieldingFlexibleGangWorker::loop() {
// Now, release the gang mutex and do the work. // Now, release the gang mutex and do the work.
{ {
MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag); MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
GCIdMark gc_id_mark(data.task()->gc_id());
data.task()->work(id); // This might include yielding data.task()->work(id); // This might include yielding
} }
// Reacquire monitor and note completion of this worker // Reacquire monitor and note completion of this worker

View file

@ -41,6 +41,7 @@
#include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/suspendibleThreadSet.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.hpp" #include "gc/shared/gcTraceTime.hpp"
@ -520,7 +521,6 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev
_has_overflown(false), _has_overflown(false),
_concurrent(false), _concurrent(false),
_has_aborted(false), _has_aborted(false),
_aborted_gc_id(GCId::undefined()),
_restart_for_overflow(false), _restart_for_overflow(false),
_concurrent_marking_in_progress(false), _concurrent_marking_in_progress(false),
@ -991,7 +991,7 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
force_overflow()->update(); force_overflow()->update();
if (G1Log::fine()) { if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(concurrent_gc_id()); gclog_or_tty->gclog_stamp();
gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
} }
} }
@ -1181,7 +1181,7 @@ void ConcurrentMark::scanRootRegions() {
// should not attempt to do any further work. // should not attempt to do any further work.
if (root_regions()->scan_in_progress()) { if (root_regions()->scan_in_progress()) {
if (G1Log::fine()) { if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(concurrent_gc_id()); gclog_or_tty->gclog_stamp();
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
} }
@ -1195,7 +1195,7 @@ void ConcurrentMark::scanRootRegions() {
_parallel_workers->run_task(&task); _parallel_workers->run_task(&task);
if (G1Log::fine()) { if (G1Log::fine()) {
gclog_or_tty->gclog_stamp(concurrent_gc_id()); gclog_or_tty->gclog_stamp();
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
} }
@ -1246,8 +1246,7 @@ class G1CMTraceTime : public StackObj {
public: public:
G1CMTraceTime(const char* title, bool doit) G1CMTraceTime(const char* title, bool doit)
: _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(), : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {
G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
} }
}; };
@ -2392,8 +2391,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
&g1_keep_alive, &g1_keep_alive,
&g1_drain_mark_stack, &g1_drain_mark_stack,
executor, executor,
g1h->gc_timer_cm(), g1h->gc_timer_cm());
concurrent_gc_id());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats); g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack // The do_oop work routines of the keep_alive and drain_marking_stack
@ -2989,8 +2987,6 @@ void ConcurrentMark::abort() {
} }
_first_overflow_barrier_sync.abort(); _first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort(); _second_overflow_barrier_sync.abort();
_aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
_has_aborted = true; _has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
@ -3005,13 +3001,6 @@ void ConcurrentMark::abort() {
_g1h->register_concurrent_cycle_end(); _g1h->register_concurrent_cycle_end();
} }
const GCId& ConcurrentMark::concurrent_gc_id() {
if (has_aborted()) {
return _aborted_gc_id;
}
return _g1h->gc_tracer_cm()->gc_id();
}
static void print_ms_time_info(const char* prefix, const char* name, static void print_ms_time_info(const char* prefix, const char* name,
NumberSeq& ns) { NumberSeq& ns) {
gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",

View file

@ -28,7 +28,6 @@
#include "classfile/javaClasses.hpp" #include "classfile/javaClasses.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/heapRegionSet.hpp" #include "gc/g1/heapRegionSet.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/taskqueue.hpp" #include "gc/shared/taskqueue.hpp"
class G1CollectedHeap; class G1CollectedHeap;
@ -425,7 +424,6 @@ protected:
volatile bool _concurrent; volatile bool _concurrent;
// Set at the end of a Full GC so that marking aborts // Set at the end of a Full GC so that marking aborts
volatile bool _has_aborted; volatile bool _has_aborted;
GCId _aborted_gc_id;
// Used when remark aborts due to an overflow to indicate that // Used when remark aborts due to an overflow to indicate that
// another concurrent marking phase should start // another concurrent marking phase should start
@ -768,8 +766,6 @@ public:
bool has_aborted() { return _has_aborted; } bool has_aborted() { return _has_aborted; }
const GCId& concurrent_gc_id();
// This prints the global/local fingers. It is used for debugging. // This prints the global/local fingers. It is used for debugging.
NOT_PRODUCT(void print_finger();) NOT_PRODUCT(void print_finger();)

View file

@ -30,6 +30,7 @@
#include "gc/g1/g1MMUTracker.hpp" #include "gc/g1/g1MMUTracker.hpp"
#include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/suspendibleThreadSet.hpp"
#include "gc/g1/vm_operations_g1.hpp" #include "gc/g1/vm_operations_g1.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "runtime/vmThread.hpp" #include "runtime/vmThread.hpp"
@ -85,7 +86,7 @@ void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...
SuspendibleThreadSetJoiner sts_joiner(join_sts); SuspendibleThreadSetJoiner sts_joiner(join_sts);
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->gclog_stamp();
gclog_or_tty->vprint_cr(fmt, args); gclog_or_tty->vprint_cr(fmt, args);
va_end(args); va_end(args);
} }
@ -108,6 +109,7 @@ void ConcurrentMarkThread::run() {
break; break;
} }
GCIdMark gc_id_mark;
{ {
ResourceMark rm; ResourceMark rm;
HandleMark hm; HandleMark hm;

View file

@ -53,6 +53,7 @@
#include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/suspendibleThreadSet.hpp"
#include "gc/g1/vm_operations_g1.hpp" #include "gc/g1/vm_operations_g1.hpp"
#include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
@ -1450,6 +1451,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
gc_timer->register_gc_start(); gc_timer->register_gc_start();
SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer(); SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
GCIdMark gc_id_mark;
gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start()); gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
SvcGCMarker sgcm(SvcGCMarker::FULL); SvcGCMarker sgcm(SvcGCMarker::FULL);
@ -1476,7 +1478,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
{ {
GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id()); GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceCollectorStats tcs(g1mm()->full_collection_counters());
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
@ -3894,7 +3896,7 @@ void G1CollectedHeap::log_gc_header() {
return; return;
} }
gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id()); gclog_or_tty->gclog_stamp();
GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
.append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)") .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
@ -3952,6 +3954,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_gc_timer_stw->register_gc_start(); _gc_timer_stw->register_gc_start();
GCIdMark gc_id_mark;
_gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start()); _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
SvcGCMarker sgcm(SvcGCMarker::MINOR); SvcGCMarker sgcm(SvcGCMarker::MINOR);
@ -5501,8 +5504,7 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
&keep_alive, &keep_alive,
&drain_queue, &drain_queue,
NULL, NULL,
_gc_timer_stw, _gc_timer_stw);
_gc_tracer_stw->gc_id());
} else { } else {
// Parallel reference processing // Parallel reference processing
assert(rp->num_q() == no_of_gc_workers, "sanity"); assert(rp->num_q() == no_of_gc_workers, "sanity");
@ -5513,8 +5515,7 @@ void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per
&keep_alive, &keep_alive,
&drain_queue, &drain_queue,
&par_task_executor, &par_task_executor,
_gc_timer_stw, _gc_timer_stw);
_gc_tracer_stw->gc_id());
} }
_gc_tracer_stw->report_gc_reference_stats(stats); _gc_tracer_stw->report_gc_reference_stats(stats);

View file

@ -857,7 +857,7 @@ void G1CollectorPolicy::record_concurrent_mark_remark_end() {
_cur_mark_stop_world_time_ms += elapsed_time_ms; _cur_mark_stop_world_time_ms += elapsed_time_ms;
_prev_collection_pause_end_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms;
_mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, _g1->gc_tracer_cm()->gc_id()); _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
} }
void G1CollectorPolicy::record_concurrent_mark_cleanup_start() { void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
@ -952,8 +952,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
collector_state()->set_initiate_conc_mark_if_possible(true); collector_state()->set_initiate_conc_mark_if_possible(true);
} }
_mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
end_time_sec, _g1->gc_tracer_stw()->gc_id());
if (update_stats) { if (update_stats) {
_trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
@ -1584,7 +1583,7 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
_cur_mark_stop_world_time_ms += elapsed_time_ms; _cur_mark_stop_world_time_ms += elapsed_time_ms;
_prev_collection_pause_end_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms;
_mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, _g1->gc_tracer_cm()->gc_id()); _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
} }
// Add the heap region at the head of the non-incremental collection set // Add the heap region at the head of the non-incremental collection set

View file

@ -76,7 +76,7 @@ double G1MMUTrackerQueue::calculate_gc_time(double current_time) {
return gc_time; return gc_time;
} }
void G1MMUTrackerQueue::add_pause(double start, double end, const GCId& gcId) { void G1MMUTrackerQueue::add_pause(double start, double end) {
double duration = end - start; double duration = end - start;
remove_expired_entries(end); remove_expired_entries(end);
@ -106,7 +106,7 @@ void G1MMUTrackerQueue::add_pause(double start, double end, const GCId& gcId) {
// Current entry needs to be added before calculating the value // Current entry needs to be added before calculating the value
double slice_time = calculate_gc_time(end); double slice_time = calculate_gc_time(end);
G1MMUTracer::report_mmu(gcId, _time_slice, slice_time, _max_gc_time); G1MMUTracer::report_mmu(_time_slice, slice_time, _max_gc_time);
} }
// basically the _internal call does not remove expired entries // basically the _internal call does not remove expired entries

View file

@ -43,7 +43,7 @@ protected:
public: public:
G1MMUTracker(double time_slice, double max_gc_time); G1MMUTracker(double time_slice, double max_gc_time);
virtual void add_pause(double start, double end, const GCId& gcId) = 0; virtual void add_pause(double start, double end) = 0;
virtual double when_sec(double current_time, double pause_time) = 0; virtual double when_sec(double current_time, double pause_time) = 0;
double max_gc_time() { double max_gc_time() {
@ -127,7 +127,7 @@ private:
public: public:
G1MMUTrackerQueue(double time_slice, double max_gc_time); G1MMUTrackerQueue(double time_slice, double max_gc_time);
virtual void add_pause(double start, double end, const GCId& gcId); virtual void add_pause(double start, double end);
virtual double when_sec(double current_time, double pause_time); virtual double when_sec(double current_time, double pause_time);
}; };

View file

@ -121,7 +121,7 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) { bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them // Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -146,8 +146,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
&GenMarkSweep::keep_alive, &GenMarkSweep::keep_alive,
&GenMarkSweep::follow_stack_closure, &GenMarkSweep::follow_stack_closure,
NULL, NULL,
gc_timer(), gc_timer());
gc_tracer()->gc_id());
gc_tracer()->report_gc_reference_stats(stats); gc_tracer()->report_gc_reference_stats(stats);
@ -200,7 +199,7 @@ void G1MarkSweep::mark_sweep_phase2() {
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4. // tracking expects us to do so. See comment under phase4.
GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
prepare_compaction(); prepare_compaction();
} }
@ -233,7 +232,7 @@ void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Adjust the pointers to reflect the new locations // Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
// Need cleared claim bits for the roots processing // Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks(); ClassLoaderDataGraph::clear_claimed_marks();
@ -294,7 +293,7 @@ void G1MarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen. // to use a higher index (saved from phase2) when verifying perm_gen.
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
G1SpaceCompactClosure blk; G1SpaceCompactClosure blk;
g1h->heap_region_iterate(&blk); g1h->heap_region_iterate(&blk);

View file

@ -26,6 +26,7 @@
#include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/g1/g1Log.hpp" #include "gc/g1/g1Log.hpp"
#include "gc/g1/vm_operations_g1.hpp" #include "gc/g1/vm_operations_g1.hpp"
#include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTimer.hpp"
@ -227,7 +228,8 @@ void VM_CGC_Operation::release_and_notify_pending_list_lock() {
void VM_CGC_Operation::doit() { void VM_CGC_Operation::doit() {
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id()); GCIdMark gc_id_mark(_gc_id);
GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm());
IsGCActiveMark x; IsGCActiveMark x;
_cl->do_void(); _cl->do_void();
} }

View file

@ -26,6 +26,7 @@
#define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP #define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP
#include "gc/g1/g1AllocationContext.hpp" #include "gc/g1/g1AllocationContext.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/vmGCOperations.hpp" #include "gc/shared/vmGCOperations.hpp"
// VM_operations for the G1 collector. // VM_operations for the G1 collector.
@ -104,6 +105,7 @@ class VM_CGC_Operation: public VM_Operation {
VoidClosure* _cl; VoidClosure* _cl;
const char* _printGCMessage; const char* _printGCMessage;
bool _needs_pll; bool _needs_pll;
uint _gc_id;
protected: protected:
// java.lang.ref.Reference support // java.lang.ref.Reference support
@ -112,7 +114,7 @@ protected:
public: public:
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll) VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll)
: _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { } : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll), _gc_id(GCId::current()) { }
virtual VMOp_Type type() const { return VMOp_CGC_Operation; } virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
virtual void doit(); virtual void doit();
virtual bool doit_prologue(); virtual bool doit_prologue();

View file

@ -53,7 +53,7 @@ void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
ResourceMark rm; ResourceMark rm;
NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask", NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -82,7 +82,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
@ -153,7 +153,7 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("RefProcTask", NOT_PRODUCT(GCTraceTime tm("RefProcTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
@ -209,7 +209,7 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -241,7 +241,7 @@ void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -308,7 +308,7 @@ UpdateDensePrefixTask::UpdateDensePrefixTask(
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask", NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
@ -323,7 +323,7 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask", NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);

View file

@ -36,6 +36,7 @@
#include "gc/serial/markSweep.hpp" #include "gc/serial/markSweep.hpp"
#include "gc/shared/gcCause.hpp" #include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
@ -113,6 +114,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCause::Cause gc_cause = heap->gc_cause(); GCCause::Cause gc_cause = heap->gc_cause();
GCIdMark gc_id_mark;
_gc_timer->register_gc_start(); _gc_timer->register_gc_start();
_gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
@ -165,7 +167,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
HandleMark hm; HandleMark hm;
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters()); TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@ -508,7 +510,7 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them // Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
@ -541,7 +543,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats = const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references( ref_processor()->process_discovered_references(
is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
gc_tracer()->report_gc_reference_stats(stats); gc_tracer()->report_gc_reference_stats(stats);
} }
@ -567,7 +569,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
void PSMarkSweep::mark_sweep_phase2() { void PSMarkSweep::mark_sweep_phase2() {
GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
// Now all live objects are marked, compute the new object addresses. // Now all live objects are marked, compute the new object addresses.
@ -594,7 +596,7 @@ static PSAlwaysTrueClosure always_true;
void PSMarkSweep::mark_sweep_phase3() { void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations // Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen(); PSYoungGen* young_gen = heap->young_gen();
@ -634,7 +636,7 @@ void PSMarkSweep::mark_sweep_phase3() {
void PSMarkSweep::mark_sweep_phase4() { void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap"); EventMark m("4 compact heap");
GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
// All pointers are now adjusted, move objects accordingly // All pointers are now adjusted, move objects accordingly

View file

@ -40,6 +40,7 @@
#include "gc/parallel/psYoungGen.hpp" #include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcCause.hpp" #include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
@ -960,7 +961,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
// at each young gen gc. Do the update unconditionally (even though a // at each young gen gc. Do the update unconditionally (even though a
// promotion failure does not swap spaces) because an unknown number of young // promotion failure does not swap spaces) because an unknown number of young
// collections will have swapped the spaces an unknown number of times. // collections will have swapped the spaces an unknown number of times.
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space());
@ -1003,7 +1004,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
void PSParallelCompact::post_compact() void PSParallelCompact::post_compact()
{ {
GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
for (unsigned int id = old_space_id; id < last_space_id; ++id) { for (unsigned int id = old_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info. // Clear the marking bitmap, summary data and split info.
@ -1824,7 +1825,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm, void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction) bool maximum_compaction)
{ {
GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
// trace("2"); // trace("2");
#ifdef ASSERT #ifdef ASSERT
@ -1984,6 +1985,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCIdMark gc_id_mark;
_gc_timer.register_gc_start(); _gc_timer.register_gc_start();
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
@ -2031,7 +2033,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gc_task_manager()->task_idle_workers(); gc_task_manager()->task_idle_workers();
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters()); TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@ -2331,7 +2333,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction, bool maximum_heap_compaction,
ParallelOldTracer *gc_tracer) { ParallelOldTracer *gc_tracer) {
// Recursively traverse all live objects and mark them // Recursively traverse all live objects and mark them
GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint parallel_gc_threads = heap->gc_task_manager()->workers();
@ -2346,7 +2348,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ClassLoaderDataGraph::clear_claimed_marks(); ClassLoaderDataGraph::clear_claimed_marks();
{ {
GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
ParallelScavengeHeap::ParStrongRootsScope psrs; ParallelScavengeHeap::ParStrongRootsScope psrs;
@ -2375,24 +2377,24 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Process reference objects found during marking // Process reference objects found during marking
{ {
GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
ReferenceProcessorStats stats; ReferenceProcessorStats stats;
if (ref_processor()->processing_is_mt()) { if (ref_processor()->processing_is_mt()) {
RefProcTaskExecutor task_executor; RefProcTaskExecutor task_executor;
stats = ref_processor()->process_discovered_references( stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
&task_executor, &_gc_timer, _gc_tracer.gc_id()); &task_executor, &_gc_timer);
} else { } else {
stats = ref_processor()->process_discovered_references( stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
&_gc_timer, _gc_tracer.gc_id()); &_gc_timer);
} }
gc_tracer->report_gc_reference_stats(stats); gc_tracer->report_gc_reference_stats(stats);
} }
GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
// This is the point where the entire marking should have completed. // This is the point where the entire marking should have completed.
assert(cm->marking_stacks_empty(), "Marking should have completed"); assert(cm->marking_stacks_empty(), "Marking should have completed");
@ -2423,7 +2425,7 @@ static PSAlwaysTrueClosure always_true;
void PSParallelCompact::adjust_roots() { void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations // Adjust the pointers to reflect the new locations
GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
// Need new claim bits when tracing through and adjusting pointers. // Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks(); ClassLoaderDataGraph::clear_claimed_marks();
@ -2459,7 +2461,7 @@ void PSParallelCompact::adjust_roots() {
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
uint parallel_gc_threads) uint parallel_gc_threads)
{ {
GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
// Find the threads that are active // Find the threads that are active
unsigned int which = 0; unsigned int which = 0;
@ -2533,7 +2535,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
uint parallel_gc_threads) { uint parallel_gc_threads) {
GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
ParallelCompactData& sd = PSParallelCompact::summary_data(); ParallelCompactData& sd = PSParallelCompact::summary_data();
@ -2615,7 +2617,7 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
GCTaskQueue* q, GCTaskQueue* q,
ParallelTaskTerminator* terminator_ptr, ParallelTaskTerminator* terminator_ptr,
uint parallel_gc_threads) { uint parallel_gc_threads) {
GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
// Once a thread has drained it's stack, it should try to steal regions from // Once a thread has drained it's stack, it should try to steal regions from
// other threads. // other threads.
@ -2663,7 +2665,7 @@ void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
void PSParallelCompact::compact() { void PSParallelCompact::compact() {
// trace("5"); // trace("5");
GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen(); PSOldGen* old_gen = heap->old_gen();
@ -2679,7 +2681,7 @@ void PSParallelCompact::compact() {
enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
{ {
GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
gc_task_manager()->execute_and_wait(q); gc_task_manager()->execute_and_wait(q);
@ -2693,7 +2695,7 @@ void PSParallelCompact::compact() {
{ {
// Update the deferred objects, if any. Any compaction manager can be used. // Update the deferred objects, if any. Any compaction manager can be used.
GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
ParCompactionManager* cm = ParCompactionManager::manager_array(0); ParCompactionManager* cm = ParCompactionManager::manager_array(0);
for (unsigned int id = old_space_id; id < last_space_id; ++id) { for (unsigned int id = old_space_id; id < last_space_id; ++id) {
update_deferred_objects(cm, SpaceId(id)); update_deferred_objects(cm, SpaceId(id));

View file

@ -36,6 +36,7 @@
#include "gc/shared/collectorPolicy.hpp" #include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcCause.hpp" #include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
@ -278,6 +279,7 @@ bool PSScavenge::invoke_no_policy() {
return false; return false;
} }
GCIdMark gc_id_mark;
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
bool promotion_failure_occurred = false; bool promotion_failure_occurred = false;
@ -322,7 +324,7 @@ bool PSScavenge::invoke_no_policy() {
HandleMark hm; HandleMark hm;
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters()); TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
@ -387,7 +389,7 @@ bool PSScavenge::invoke_no_policy() {
// We'll use the promotion manager again later. // We'll use the promotion manager again later.
PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
{ {
GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("Scavenge", false, false, &_gc_timer);
ParallelScavengeHeap::ParStrongRootsScope psrs; ParallelScavengeHeap::ParStrongRootsScope psrs;
GCTaskQueue* q = GCTaskQueue::create(); GCTaskQueue* q = GCTaskQueue::create();
@ -429,7 +431,7 @@ bool PSScavenge::invoke_no_policy() {
// Process reference objects discovered during scavenge // Process reference objects discovered during scavenge
{ {
GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("References", false, false, &_gc_timer);
reference_processor()->setup_policy(false); // not always_clear reference_processor()->setup_policy(false); // not always_clear
reference_processor()->set_active_mt_degree(active_workers); reference_processor()->set_active_mt_degree(active_workers);
@ -440,10 +442,10 @@ bool PSScavenge::invoke_no_policy() {
PSRefProcTaskExecutor task_executor; PSRefProcTaskExecutor task_executor;
stats = reference_processor()->process_discovered_references( stats = reference_processor()->process_discovered_references(
&_is_alive_closure, &keep_alive, &evac_followers, &task_executor, &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
&_gc_timer, _gc_tracer.gc_id()); &_gc_timer);
} else { } else {
stats = reference_processor()->process_discovered_references( stats = reference_processor()->process_discovered_references(
&_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id()); &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
} }
_gc_tracer.report_gc_reference_stats(stats); _gc_tracer.report_gc_reference_stats(stats);
@ -458,7 +460,7 @@ bool PSScavenge::invoke_no_policy() {
} }
{ {
GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("StringTable", false, false, &_gc_timer);
// Unlink any dead interned Strings and process the remaining live ones. // Unlink any dead interned Strings and process the remaining live ones.
PSScavengeRootsClosure root_closure(promotion_manager); PSScavengeRootsClosure root_closure(promotion_manager);
StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
@ -628,7 +630,7 @@ bool PSScavenge::invoke_no_policy() {
NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
{ {
GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id()); GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
CodeCache::prune_scavenge_root_nmethods(); CodeCache::prune_scavenge_root_nmethods();
} }

View file

@ -583,7 +583,7 @@ void DefNewGeneration::collect(bool full,
init_assuming_no_promotion_failure(); init_assuming_no_promotion_failure();
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
// Capture heap used before collection (for printing). // Capture heap used before collection (for printing).
size_t gch_prev_used = gch->used(); size_t gch_prev_used = gch->used();
@ -646,7 +646,7 @@ void DefNewGeneration::collect(bool full,
rp->setup_policy(clear_all_soft_refs); rp->setup_policy(clear_all_soft_refs);
const ReferenceProcessorStats& stats = const ReferenceProcessorStats& stats =
rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
NULL, _gc_timer, gc_tracer.gc_id()); NULL, _gc_timer);
gc_tracer.report_gc_reference_stats(stats); gc_tracer.report_gc_reference_stats(stats);
if (!_promotion_failed) { if (!_promotion_failed) {

View file

@ -70,7 +70,7 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
set_ref_processor(rp); set_ref_processor(rp);
rp->setup_policy(clear_all_softrefs); rp->setup_policy(clear_all_softrefs);
GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id()); GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
gch->trace_heap_before_gc(_gc_tracer); gch->trace_heap_before_gc(_gc_tracer);
@ -186,7 +186,7 @@ void GenMarkSweep::deallocate_stacks() {
void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them // Recursively traverse all live objects and mark them
GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -217,7 +217,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats = const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references( ref_processor()->process_discovered_references(
&is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id()); &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
gc_tracer()->report_gc_reference_stats(stats); gc_tracer()->report_gc_reference_stats(stats);
} }
@ -259,7 +259,7 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
gch->prepare_for_compaction(); gch->prepare_for_compaction();
} }
@ -275,7 +275,7 @@ void GenMarkSweep::mark_sweep_phase3() {
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
// Adjust the pointers to reflect the new locations // Adjust the pointers to reflect the new locations
GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
// Need new claim bits for the pointer adjustment tracing. // Need new claim bits for the pointer adjustment tracing.
ClassLoaderDataGraph::clear_claimed_marks(); ClassLoaderDataGraph::clear_claimed_marks();
@ -327,7 +327,7 @@ void GenMarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen. // to use a higher index (saved from phase2) when verifying perm_gen.
GenCollectedHeap* gch = GenCollectedHeap::heap(); GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
GenCompactClosure blk; GenCompactClosure blk;
gch->generation_iterate(&blk, true); gch->generation_iterate(&blk, true);

View file

@ -573,13 +573,13 @@ void CollectedHeap::resize_all_tlabs() {
void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
if (HeapDumpBeforeFullGC) { if (HeapDumpBeforeFullGC) {
GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create()); GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
// We are doing a full collection and a heap dump before // We are doing a full collection and a heap dump before
// full collection has been requested. // full collection has been requested.
HeapDumper::dump_heap(); HeapDumper::dump_heap();
} }
if (PrintClassHistogramBeforeFullGC) { if (PrintClassHistogramBeforeFullGC) {
GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create()); GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
inspector.doit(); inspector.doit();
} }
@ -587,11 +587,11 @@ void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
void CollectedHeap::post_full_gc_dump(GCTimer* timer) { void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
if (HeapDumpAfterFullGC) { if (HeapDumpAfterFullGC) {
GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create()); GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
HeapDumper::dump_heap(); HeapDumper::dump_heap();
} }
if (PrintClassHistogramAfterFullGC) { if (PrintClassHistogramAfterFullGC) {
GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create()); GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
inspector.doit(); inspector.doit();
} }

View file

@ -25,18 +25,37 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/shared/gcId.hpp" #include "gc/shared/gcId.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
uint GCId::_next_id = 0; uint GCId::_next_id = 0;
const GCId GCId::create() { NamedThread* currentNamedthread() {
return GCId(_next_id++); assert(Thread::current()->is_Named_thread(), "This thread must be NamedThread");
return (NamedThread*)Thread::current();
} }
const GCId GCId::peek() {
return GCId(_next_id); const uint GCId::create() {
return _next_id++;
} }
const GCId GCId::undefined() {
return GCId(UNDEFINED); const uint GCId::current() {
assert(currentNamedthread()->gc_id() != undefined(), "Using undefined GC id.");
return current_raw();
} }
bool GCId::is_undefined() const {
return _id == UNDEFINED; const uint GCId::current_raw() {
return currentNamedthread()->gc_id();
} }
GCIdMark::GCIdMark() : _gc_id(GCId::create()) {
currentNamedthread()->set_gc_id(_gc_id);
}
GCIdMark::GCIdMark(uint gc_id) : _gc_id(gc_id) {
currentNamedthread()->set_gc_id(_gc_id);
}
GCIdMark::~GCIdMark() {
currentNamedthread()->set_gc_id(GCId::undefined());
}

View file

@ -27,25 +27,26 @@
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
class GCId VALUE_OBJ_CLASS_SPEC { class GCId : public AllStatic {
private: friend class GCIdMark;
uint _id;
GCId(uint id) : _id(id) {}
GCId() { } // Unused
static uint _next_id; static uint _next_id;
static const uint UNDEFINED = (uint)-1; static const uint UNDEFINED = (uint)-1;
static const uint create();
public: public:
uint id() const { // Returns the currently active GC id. Asserts that there is an active GC id.
assert(_id != UNDEFINED, "Using undefined GC ID"); static const uint current();
return _id; // Same as current() but can return undefined() if no GC id is currently active
} static const uint current_raw();
bool is_undefined() const; static const uint undefined() { return UNDEFINED; }
};
static const GCId create(); class GCIdMark : public StackObj {
static const GCId peek(); uint _gc_id;
static const GCId undefined(); public:
GCIdMark();
GCIdMark(uint gc_id);
~GCIdMark();
}; };
#endif // SHARE_VM_GC_SHARED_GCID_HPP #endif // SHARE_VM_GC_SHARED_GCID_HPP

View file

@ -40,31 +40,16 @@
#include "gc/g1/evacuationInfo.hpp" #include "gc/g1/evacuationInfo.hpp"
#endif #endif
#define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?")
#define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?")
void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) { void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
assert_unset_gc_id();
GCId gc_id = GCId::create();
_shared_gc_info.set_gc_id(gc_id);
_shared_gc_info.set_cause(cause); _shared_gc_info.set_cause(cause);
_shared_gc_info.set_start_timestamp(timestamp); _shared_gc_info.set_start_timestamp(timestamp);
} }
void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) { void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {
assert_unset_gc_id();
report_gc_start_impl(cause, timestamp); report_gc_start_impl(cause, timestamp);
} }
bool GCTracer::has_reported_gc_start() const {
return !_shared_gc_info.gc_id().is_undefined();
}
void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
assert_set_gc_id();
_shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses()); _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
_shared_gc_info.set_longest_pause(time_partitions->longest_pause()); _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
_shared_gc_info.set_end_timestamp(timestamp); _shared_gc_info.set_end_timestamp(timestamp);
@ -74,16 +59,10 @@ void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_p
} }
void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) { void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) {
assert_set_gc_id();
report_gc_end_impl(timestamp, time_partitions); report_gc_end_impl(timestamp, time_partitions);
_shared_gc_info.set_gc_id(GCId::undefined());
} }
void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const { void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
assert_set_gc_id();
send_reference_stats_event(REF_SOFT, rps.soft_count()); send_reference_stats_event(REF_SOFT, rps.soft_count());
send_reference_stats_event(REF_WEAK, rps.weak_count()); send_reference_stats_event(REF_WEAK, rps.weak_count());
send_reference_stats_event(REF_FINAL, rps.final_count()); send_reference_stats_event(REF_FINAL, rps.final_count());
@ -92,14 +71,12 @@ void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) con
#if INCLUDE_SERVICES #if INCLUDE_SERVICES
class ObjectCountEventSenderClosure : public KlassInfoClosure { class ObjectCountEventSenderClosure : public KlassInfoClosure {
const GCId _gc_id;
const double _size_threshold_percentage; const double _size_threshold_percentage;
const size_t _total_size_in_words; const size_t _total_size_in_words;
const Ticks _timestamp; const Ticks _timestamp;
public: public:
ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, const Ticks& timestamp) : ObjectCountEventSenderClosure(size_t total_size_in_words, const Ticks& timestamp) :
_gc_id(gc_id),
_size_threshold_percentage(ObjectCountCutOffPercent / 100), _size_threshold_percentage(ObjectCountCutOffPercent / 100),
_total_size_in_words(total_size_in_words), _total_size_in_words(total_size_in_words),
_timestamp(timestamp) _timestamp(timestamp)
@ -107,7 +84,7 @@ class ObjectCountEventSenderClosure : public KlassInfoClosure {
virtual void do_cinfo(KlassInfoEntry* entry) { virtual void do_cinfo(KlassInfoEntry* entry) {
if (should_send_event(entry)) { if (should_send_event(entry)) {
ObjectCountEventSender::send(entry, _gc_id, _timestamp); ObjectCountEventSender::send(entry, _timestamp);
} }
} }
@ -119,7 +96,6 @@ class ObjectCountEventSenderClosure : public KlassInfoClosure {
}; };
void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) { void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
assert_set_gc_id();
assert(is_alive_cl != NULL, "Must supply function to check liveness"); assert(is_alive_cl != NULL, "Must supply function to check liveness");
if (ObjectCountEventSender::should_send_event()) { if (ObjectCountEventSender::should_send_event()) {
@ -129,7 +105,7 @@ void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
if (!cit.allocation_failed()) { if (!cit.allocation_failed()) {
HeapInspection hi(false, false, false, NULL); HeapInspection hi(false, false, false, NULL);
hi.populate_table(&cit, is_alive_cl); hi.populate_table(&cit, is_alive_cl);
ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now()); ObjectCountEventSenderClosure event_sender(cit.size_of_instances_in_words(), Ticks::now());
cit.iterate(&event_sender); cit.iterate(&event_sender);
} }
} }
@ -137,14 +113,10 @@ void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
#endif // INCLUDE_SERVICES #endif // INCLUDE_SERVICES
void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const { void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
assert_set_gc_id();
send_gc_heap_summary_event(when, heap_summary); send_gc_heap_summary_event(when, heap_summary);
} }
void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const { void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {
assert_set_gc_id();
send_meta_space_summary_event(when, summary); send_meta_space_summary_event(when, summary);
send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary()); send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
@ -154,7 +126,6 @@ void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummar
} }
void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
assert_set_gc_id();
assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported"); assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
GCTracer::report_gc_end_impl(timestamp, time_partitions); GCTracer::report_gc_end_impl(timestamp, time_partitions);
@ -164,8 +135,6 @@ void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* t
} }
void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const { void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const {
assert_set_gc_id();
send_promotion_failed_event(pf_info); send_promotion_failed_event(pf_info);
} }
@ -189,78 +158,56 @@ bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured, uint age, bool tenured,
size_t plab_size) const { size_t plab_size) const {
assert_set_gc_id();
send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size); send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
} }
void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size, void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured) const { uint age, bool tenured) const {
assert_set_gc_id();
send_promotion_outside_plab_event(klass, obj_size, age, tenured); send_promotion_outside_plab_event(klass, obj_size, age, tenured);
} }
void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
assert_set_gc_id();
GCTracer::report_gc_end_impl(timestamp, time_partitions); GCTracer::report_gc_end_impl(timestamp, time_partitions);
send_old_gc_event(); send_old_gc_event();
} }
void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
assert_set_gc_id();
OldGCTracer::report_gc_end_impl(timestamp, time_partitions); OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
send_parallel_old_event(); send_parallel_old_event();
} }
void ParallelOldTracer::report_dense_prefix(void* dense_prefix) { void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
assert_set_gc_id();
_parallel_old_gc_info.report_dense_prefix(dense_prefix); _parallel_old_gc_info.report_dense_prefix(dense_prefix);
} }
void OldGCTracer::report_concurrent_mode_failure() { void OldGCTracer::report_concurrent_mode_failure() {
assert_set_gc_id();
send_concurrent_mode_failure_event(); send_concurrent_mode_failure_event();
} }
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
void G1MMUTracer::report_mmu(const GCId& gcId, double timeSlice, double gcTime, double maxTime) { void G1MMUTracer::report_mmu(double timeSlice, double gcTime, double maxTime) {
assert(!gcId.is_undefined(), "Undefined GC id"); send_g1_mmu_event(timeSlice, gcTime, maxTime);
send_g1_mmu_event(gcId, timeSlice, gcTime, maxTime);
} }
void G1NewTracer::report_yc_type(G1YCType type) { void G1NewTracer::report_yc_type(G1YCType type) {
assert_set_gc_id();
_g1_young_gc_info.set_type(type); _g1_young_gc_info.set_type(type);
} }
void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
assert_set_gc_id();
YoungGCTracer::report_gc_end_impl(timestamp, time_partitions); YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
send_g1_young_gc_event(); send_g1_young_gc_event();
} }
void G1NewTracer::report_evacuation_info(EvacuationInfo* info) { void G1NewTracer::report_evacuation_info(EvacuationInfo* info) {
assert_set_gc_id();
send_evacuation_info_event(info); send_evacuation_info_event(info);
} }
void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) { void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {
assert_set_gc_id();
send_evacuation_failed_event(ef_info); send_evacuation_failed_event(ef_info);
ef_info.reset(); ef_info.reset();
} }
void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const { void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const {
assert_set_gc_id();
send_young_evacuation_statistics(young_summary); send_young_evacuation_statistics(young_summary);
send_old_evacuation_statistics(old_summary); send_old_evacuation_statistics(old_summary);
} }

View file

@ -52,7 +52,6 @@ class BoolObjectClosure;
class SharedGCInfo VALUE_OBJ_CLASS_SPEC { class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
private: private:
GCId _gc_id;
GCName _name; GCName _name;
GCCause::Cause _cause; GCCause::Cause _cause;
Ticks _start_timestamp; Ticks _start_timestamp;
@ -62,7 +61,6 @@ class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
public: public:
SharedGCInfo(GCName name) : SharedGCInfo(GCName name) :
_gc_id(GCId::undefined()),
_name(name), _name(name),
_cause(GCCause::_last_gc_cause), _cause(GCCause::_last_gc_cause),
_start_timestamp(), _start_timestamp(),
@ -71,9 +69,6 @@ class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
_longest_pause() { _longest_pause() {
} }
void set_gc_id(GCId gc_id) { _gc_id = gc_id; }
const GCId& gc_id() const { return _gc_id; }
void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; } void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; }
const Ticks start_timestamp() const { return _start_timestamp; } const Ticks start_timestamp() const { return _start_timestamp; }
@ -128,8 +123,6 @@ class GCTracer : public ResourceObj {
void report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& metaspace_summary) const; void report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& metaspace_summary) const;
void report_gc_reference_stats(const ReferenceProcessorStats& rp) const; void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN; void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
bool has_reported_gc_start() const;
const GCId& gc_id() { return _shared_gc_info.gc_id(); }
protected: protected:
GCTracer(GCName name) : _shared_gc_info(name) {} GCTracer(GCName name) : _shared_gc_info(name) {}
@ -242,10 +235,10 @@ class ParNewTracer : public YoungGCTracer {
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
class G1MMUTracer : public AllStatic { class G1MMUTracer : public AllStatic {
static void send_g1_mmu_event(const GCId& gcId, double timeSlice, double gcTime, double maxTime); static void send_g1_mmu_event(double timeSlice, double gcTime, double maxTime);
public: public:
static void report_mmu(const GCId& gcId, double timeSlice, double gcTime, double maxTime); static void report_mmu(double timeSlice, double gcTime, double maxTime);
}; };
class G1NewTracer : public YoungGCTracer { class G1NewTracer : public YoungGCTracer {

View file

@ -44,7 +44,7 @@ typedef uintptr_t TraceAddress;
void GCTracer::send_garbage_collection_event() const { void GCTracer::send_garbage_collection_event() const {
EventGCGarbageCollection event(UNTIMED); EventGCGarbageCollection event(UNTIMED);
if (event.should_commit()) { if (event.should_commit()) {
event.set_gcId(_shared_gc_info.gc_id().id()); event.set_gcId(GCId::current());
event.set_name(_shared_gc_info.name()); event.set_name(_shared_gc_info.name());
event.set_cause((u2) _shared_gc_info.cause()); event.set_cause((u2) _shared_gc_info.cause());
event.set_sumOfPauses(_shared_gc_info.sum_of_pauses()); event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
@ -58,7 +58,7 @@ void GCTracer::send_garbage_collection_event() const {
void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
EventGCReferenceStatistics e; EventGCReferenceStatistics e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_type((u1)type); e.set_type((u1)type);
e.set_count(count); e.set_count(count);
e.commit(); e.commit();
@ -69,7 +69,7 @@ void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspa
const MetaspaceChunkFreeListSummary& summary) const { const MetaspaceChunkFreeListSummary& summary) const {
EventMetaspaceChunkFreeListSummary e; EventMetaspaceChunkFreeListSummary e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_when(when); e.set_when(when);
e.set_metadataType(mdtype); e.set_metadataType(mdtype);
@ -92,7 +92,7 @@ void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspa
void ParallelOldTracer::send_parallel_old_event() const { void ParallelOldTracer::send_parallel_old_event() const {
EventGCParallelOld e(UNTIMED); EventGCParallelOld e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
e.set_starttime(_shared_gc_info.start_timestamp()); e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp());
@ -103,7 +103,7 @@ void ParallelOldTracer::send_parallel_old_event() const {
void YoungGCTracer::send_young_gc_event() const { void YoungGCTracer::send_young_gc_event() const {
EventGCYoungGarbageCollection e(UNTIMED); EventGCYoungGarbageCollection e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_tenuringThreshold(_tenuring_threshold); e.set_tenuringThreshold(_tenuring_threshold);
e.set_starttime(_shared_gc_info.start_timestamp()); e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp());
@ -125,7 +125,7 @@ void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_si
EventPromoteObjectInNewPLAB event; EventPromoteObjectInNewPLAB event;
if (event.should_commit()) { if (event.should_commit()) {
event.set_gcId(_shared_gc_info.gc_id().id()); event.set_gcId(GCId::current());
event.set_class(klass); event.set_class(klass);
event.set_objectSize(obj_size); event.set_objectSize(obj_size);
event.set_tenured(tenured); event.set_tenured(tenured);
@ -140,7 +140,7 @@ void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_s
EventPromoteObjectOutsidePLAB event; EventPromoteObjectOutsidePLAB event;
if (event.should_commit()) { if (event.should_commit()) {
event.set_gcId(_shared_gc_info.gc_id().id()); event.set_gcId(GCId::current());
event.set_class(klass); event.set_class(klass);
event.set_objectSize(obj_size); event.set_objectSize(obj_size);
event.set_tenured(tenured); event.set_tenured(tenured);
@ -152,7 +152,7 @@ void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_s
void OldGCTracer::send_old_gc_event() const { void OldGCTracer::send_old_gc_event() const {
EventGCOldGarbageCollection e(UNTIMED); EventGCOldGarbageCollection e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_starttime(_shared_gc_info.start_timestamp()); e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp());
e.commit(); e.commit();
@ -171,7 +171,7 @@ static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
EventPromotionFailed e; EventPromotionFailed e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_data(to_trace_struct(pf_info)); e.set_data(to_trace_struct(pf_info));
e.set_thread(pf_info.thread()->thread_id()); e.set_thread(pf_info.thread()->thread_id());
e.commit(); e.commit();
@ -182,7 +182,7 @@ void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_in
void OldGCTracer::send_concurrent_mode_failure_event() { void OldGCTracer::send_concurrent_mode_failure_event() {
EventConcurrentModeFailure e; EventConcurrentModeFailure e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.commit(); e.commit();
} }
} }
@ -191,7 +191,7 @@ void OldGCTracer::send_concurrent_mode_failure_event() {
void G1NewTracer::send_g1_young_gc_event() { void G1NewTracer::send_g1_young_gc_event() {
EventGCG1GarbageCollection e(UNTIMED); EventGCG1GarbageCollection e(UNTIMED);
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_type(_g1_young_gc_info.type()); e.set_type(_g1_young_gc_info.type());
e.set_starttime(_shared_gc_info.start_timestamp()); e.set_starttime(_shared_gc_info.start_timestamp());
e.set_endtime(_shared_gc_info.end_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp());
@ -199,10 +199,10 @@ void G1NewTracer::send_g1_young_gc_event() {
} }
} }
void G1MMUTracer::send_g1_mmu_event(const GCId& gcId, double timeSlice, double gcTime, double maxTime) { void G1MMUTracer::send_g1_mmu_event(double timeSlice, double gcTime, double maxTime) {
EventGCG1MMU e; EventGCG1MMU e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(gcId.id()); e.set_gcId(GCId::current());
e.set_timeSlice(timeSlice); e.set_timeSlice(timeSlice);
e.set_gcTime(gcTime); e.set_gcTime(gcTime);
e.set_maxGcTime(maxTime); e.set_maxGcTime(maxTime);
@ -213,7 +213,7 @@ void G1MMUTracer::send_g1_mmu_event(const GCId& gcId, double timeSlice, double g
void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
EventEvacuationInfo e; EventEvacuationInfo e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_cSetRegions(info->collectionset_regions()); e.set_cSetRegions(info->collectionset_regions());
e.set_cSetUsedBefore(info->collectionset_used_before()); e.set_cSetUsedBefore(info->collectionset_used_before());
e.set_cSetUsedAfter(info->collectionset_used_after()); e.set_cSetUsedAfter(info->collectionset_used_after());
@ -229,7 +229,7 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
EventEvacuationFailed e; EventEvacuationFailed e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_data(to_trace_struct(ef_info)); e.set_data(to_trace_struct(ef_info));
e.commit(); e.commit();
} }
@ -253,7 +253,7 @@ static TraceStructG1EvacStats create_g1_evacstats(unsigned gcid, const G1EvacSum
void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const { void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
EventGCG1EvacuationYoungStatistics surv_evt; EventGCG1EvacuationYoungStatistics surv_evt;
if (surv_evt.should_commit()) { if (surv_evt.should_commit()) {
surv_evt.set_stats(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); surv_evt.set_stats(create_g1_evacstats(GCId::current(), summary));
surv_evt.commit(); surv_evt.commit();
} }
} }
@ -261,7 +261,7 @@ void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary)
void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const { void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
EventGCG1EvacuationOldStatistics old_evt; EventGCG1EvacuationOldStatistics old_evt;
if (old_evt.should_commit()) { if (old_evt.should_commit()) {
old_evt.set_stats(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); old_evt.set_stats(create_g1_evacstats(GCId::current(), summary));
old_evt.commit(); old_evt.commit();
} }
} }
@ -287,17 +287,16 @@ static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
} }
class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
GCId _gc_id;
GCWhen::Type _when; GCWhen::Type _when;
public: public:
GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {} GCHeapSummaryEventSender(GCWhen::Type when) : _when(when) {}
void visit(const GCHeapSummary* heap_summary) const { void visit(const GCHeapSummary* heap_summary) const {
const VirtualSpaceSummary& heap_space = heap_summary->heap(); const VirtualSpaceSummary& heap_space = heap_summary->heap();
EventGCHeapSummary e; EventGCHeapSummary e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_gc_id.id()); e.set_gcId(GCId::current());
e.set_when((u1)_when); e.set_when((u1)_when);
e.set_heapSpace(to_trace_struct(heap_space)); e.set_heapSpace(to_trace_struct(heap_space));
e.set_heapUsed(heap_summary->used()); e.set_heapUsed(heap_summary->used());
@ -310,7 +309,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
EventG1HeapSummary e; EventG1HeapSummary e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_gc_id.id()); e.set_gcId(GCId::current());
e.set_when((u1)_when); e.set_when((u1)_when);
e.set_edenUsedSize(g1_heap_summary->edenUsed()); e.set_edenUsedSize(g1_heap_summary->edenUsed());
e.set_edenTotalSize(g1_heap_summary->edenCapacity()); e.set_edenTotalSize(g1_heap_summary->edenCapacity());
@ -331,7 +330,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
EventPSHeapSummary e; EventPSHeapSummary e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_gc_id.id()); e.set_gcId(GCId::current());
e.set_when((u1)_when); e.set_when((u1)_when);
e.set_oldSpace(to_trace_struct(ps_heap_summary->old())); e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
@ -346,7 +345,7 @@ class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
}; };
void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const { void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when); GCHeapSummaryEventSender visitor(when);
heap_summary.accept(&visitor); heap_summary.accept(&visitor);
} }
@ -363,7 +362,7 @@ static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
EventMetaspaceSummary e; EventMetaspaceSummary e;
if (e.should_commit()) { if (e.should_commit()) {
e.set_gcId(_shared_gc_info.gc_id().id()); e.set_gcId(GCId::current());
e.set_when((u1) when); e.set_when((u1) when);
e.set_gcThreshold(meta_space_summary.capacity_until_GC()); e.set_gcThreshold(meta_space_summary.capacity_until_GC());
e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
@ -374,15 +373,12 @@ void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceS
} }
class PhaseSender : public PhaseVisitor { class PhaseSender : public PhaseVisitor {
GCId _gc_id;
public: public:
PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
template<typename T> template<typename T>
void send_phase(PausePhase* pause) { void send_phase(PausePhase* pause) {
T event(UNTIMED); T event(UNTIMED);
if (event.should_commit()) { if (event.should_commit()) {
event.set_gcId(_gc_id.id()); event.set_gcId(GCId::current());
event.set_name(pause->name()); event.set_name(pause->name());
event.set_starttime(pause->start()); event.set_starttime(pause->start());
event.set_endtime(pause->end()); event.set_endtime(pause->end());
@ -406,7 +402,7 @@ class PhaseSender : public PhaseVisitor {
}; };
void GCTracer::send_phase_events(TimePartitions* time_partitions) const { void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
PhaseSender phase_reporter(_shared_gc_info.gc_id()); PhaseSender phase_reporter;
TimePartitionPhasesIterator iter(time_partitions); TimePartitionPhasesIterator iter(time_partitions);
while (iter.has_next()) { while (iter.has_next()) {

View file

@ -35,7 +35,7 @@
#include "utilities/ticks.inline.hpp" #include "utilities/ticks.inline.hpp"
GCTraceTimeImpl::GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) : GCTraceTimeImpl::GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer) :
_title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() { _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() {
if (_doit || _timer != NULL) { if (_doit || _timer != NULL) {
_start_counter.stamp(); _start_counter.stamp();
@ -49,11 +49,7 @@ GCTraceTimeImpl::GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GC
} }
if (_doit) { if (_doit) {
gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->gclog_stamp();
gclog_or_tty->stamp(PrintGCTimeStamps);
if (PrintGCID) {
gclog_or_tty->print("#%u: ", gc_id.id());
}
gclog_or_tty->print("[%s", title); gclog_or_tty->print("[%s", title);
gclog_or_tty->flush(); gclog_or_tty->flush();
} }

View file

@ -40,7 +40,7 @@ class GCTraceTimeImpl VALUE_OBJ_CLASS_SPEC {
Ticks _start_counter; Ticks _start_counter;
public: public:
GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id); GCTraceTimeImpl(const char* title, bool doit, bool print_cr, GCTimer* timer);
~GCTraceTimeImpl(); ~GCTraceTimeImpl();
}; };
@ -48,8 +48,8 @@ class GCTraceTime : public StackObj {
GCTraceTimeImpl _gc_trace_time_impl; GCTraceTimeImpl _gc_trace_time_impl;
public: public:
GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) : GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
_gc_trace_time_impl(title, doit, print_cr, timer, gc_id) {}; _gc_trace_time_impl(title, doit, print_cr, timer) {};
}; };
#endif // SHARE_VM_GC_SHARED_GCTRACETIME_HPP #endif // SHARE_VM_GC_SHARED_GCTRACETIME_HPP

View file

@ -30,6 +30,7 @@
#include "code/icBuffer.hpp" #include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectorCounters.hpp" #include "gc/shared/collectorCounters.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcTrace.hpp" #include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.hpp" #include "gc/shared/gcTraceTime.hpp"
@ -315,9 +316,7 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
bool restore_marks_for_biased_locking) { bool restore_marks_for_biased_locking) {
// Timer for individual generations. Last argument is false: no CR // Timer for individual generations. Last argument is false: no CR
// FIXME: We should try to start the timing earlier to cover more of the GC pause // FIXME: We should try to start the timing earlier to cover more of the GC pause
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL);
// so we can assume here that the next GC id is what we want.
GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek());
TraceCollectorStats tcs(gen->counters()); TraceCollectorStats tcs(gen->counters());
TraceMemoryManagerStats tmms(gen->kind(),gc_cause()); TraceMemoryManagerStats tmms(gen->kind(),gc_cause());
@ -434,6 +433,8 @@ void GenCollectedHeap::do_collection(bool full,
return; // GC is disabled (e.g. JNI GetXXXCritical operation) return; // GC is disabled (e.g. JNI GetXXXCritical operation)
} }
GCIdMark gc_id_mark;
const bool do_clear_all_soft_refs = clear_all_soft_refs || const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs(); collector_policy()->should_clear_all_soft_refs();
@ -449,9 +450,7 @@ void GenCollectedHeap::do_collection(bool full,
bool complete = full && (max_generation == OldGen); bool complete = full && (max_generation == OldGen);
const char* gc_cause_prefix = complete ? "Full GC" : "GC"; const char* gc_cause_prefix = complete ? "Full GC" : "GC";
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
// The PrintGCDetails logging starts before we have incremented the GC id. We will do that later GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
// so we can assume here that the next GC id is what we want.
GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
gc_prologue(complete); gc_prologue(complete);
increment_total_collections(complete); increment_total_collections(complete);
@ -489,6 +488,7 @@ void GenCollectedHeap::do_collection(bool full,
bool must_restore_marks_for_biased_locking = false; bool must_restore_marks_for_biased_locking = false;
if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) { if (max_generation == OldGen && _old_gen->should_collect(full, size, is_tlab)) {
GCIdMark gc_id_mark;
if (!complete) { if (!complete) {
// The full_collections increment was missed above. // The full_collections increment was missed above.
increment_total_full_collections(); increment_total_full_collections();

View file

@ -33,13 +33,13 @@
#include "utilities/ticks.hpp" #include "utilities/ticks.hpp"
#if INCLUDE_SERVICES #if INCLUDE_SERVICES
void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) { void ObjectCountEventSender::send(const KlassInfoEntry* entry, const Ticks& timestamp) {
#if INCLUDE_TRACE #if INCLUDE_TRACE
assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId), assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
"Only call this method if the event is enabled"); "Only call this method if the event is enabled");
EventObjectCountAfterGC event(UNTIMED); EventObjectCountAfterGC event(UNTIMED);
event.set_gcId(gc_id.id()); event.set_gcId(GCId::current());
event.set_class(entry->klass()); event.set_class(entry->klass());
event.set_count(entry->count()); event.set_count(entry->count());
event.set_totalSize(entry->words() * BytesPerWord); event.set_totalSize(entry->words() * BytesPerWord);

View file

@ -36,7 +36,7 @@ class Ticks;
class ObjectCountEventSender : public AllStatic { class ObjectCountEventSender : public AllStatic {
public: public:
static void send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp); static void send(const KlassInfoEntry* entry, const Ticks& timestamp);
static bool should_send_event(); static bool should_send_event();
}; };

View file

@ -192,8 +192,8 @@ static void log_ref_count(size_t count, bool doit) {
class GCRefTraceTime : public StackObj { class GCRefTraceTime : public StackObj {
GCTraceTimeImpl _gc_trace_time; GCTraceTimeImpl _gc_trace_time;
public: public:
GCRefTraceTime(const char* title, bool doit, GCTimer* timer, GCId gc_id, size_t count) : GCRefTraceTime(const char* title, bool doit, GCTimer* timer, size_t count) :
_gc_trace_time(title, doit, false, timer, gc_id) { _gc_trace_time(title, doit, false, timer) {
log_ref_count(count, doit); log_ref_count(count, doit);
} }
}; };
@ -203,8 +203,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
OopClosure* keep_alive, OopClosure* keep_alive,
VoidClosure* complete_gc, VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor, AbstractRefProcTaskExecutor* task_executor,
GCTimer* gc_timer, GCTimer* gc_timer) {
GCId gc_id) {
assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
// Stop treating discovered references specially. // Stop treating discovered references specially.
@ -233,7 +232,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// Soft references // Soft references
{ {
GCRefTraceTime tt("SoftReference", trace_time, gc_timer, gc_id, stats.soft_count()); GCRefTraceTime tt("SoftReference", trace_time, gc_timer, stats.soft_count());
process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
is_alive, keep_alive, complete_gc, task_executor); is_alive, keep_alive, complete_gc, task_executor);
} }
@ -242,21 +241,21 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// Weak references // Weak references
{ {
GCRefTraceTime tt("WeakReference", trace_time, gc_timer, gc_id, stats.weak_count()); GCRefTraceTime tt("WeakReference", trace_time, gc_timer, stats.weak_count());
process_discovered_reflist(_discoveredWeakRefs, NULL, true, process_discovered_reflist(_discoveredWeakRefs, NULL, true,
is_alive, keep_alive, complete_gc, task_executor); is_alive, keep_alive, complete_gc, task_executor);
} }
// Final references // Final references
{ {
GCRefTraceTime tt("FinalReference", trace_time, gc_timer, gc_id, stats.final_count()); GCRefTraceTime tt("FinalReference", trace_time, gc_timer, stats.final_count());
process_discovered_reflist(_discoveredFinalRefs, NULL, false, process_discovered_reflist(_discoveredFinalRefs, NULL, false,
is_alive, keep_alive, complete_gc, task_executor); is_alive, keep_alive, complete_gc, task_executor);
} }
// Phantom references // Phantom references
{ {
GCRefTraceTime tt("PhantomReference", trace_time, gc_timer, gc_id, stats.phantom_count()); GCRefTraceTime tt("PhantomReference", trace_time, gc_timer, stats.phantom_count());
process_discovered_reflist(_discoveredPhantomRefs, NULL, false, process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
is_alive, keep_alive, complete_gc, task_executor); is_alive, keep_alive, complete_gc, task_executor);
@ -273,7 +272,7 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
// thus use JNI weak references to circumvent the phantom references and // thus use JNI weak references to circumvent the phantom references and
// resurrect a "post-mortem" object. // resurrect a "post-mortem" object.
{ {
GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id); GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
NOT_PRODUCT(log_ref_count(count_jni_refs(), trace_time);) NOT_PRODUCT(log_ref_count(count_jni_refs(), trace_time);)
if (task_executor != NULL) { if (task_executor != NULL) {
task_executor->set_single_threaded_mode(); task_executor->set_single_threaded_mode();
@ -1152,13 +1151,12 @@ void ReferenceProcessor::preclean_discovered_references(
OopClosure* keep_alive, OopClosure* keep_alive,
VoidClosure* complete_gc, VoidClosure* complete_gc,
YieldClosure* yield, YieldClosure* yield,
GCTimer* gc_timer, GCTimer* gc_timer) {
GCId gc_id) {
// Soft references // Soft references
{ {
GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer, gc_id); false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;
@ -1171,7 +1169,7 @@ void ReferenceProcessor::preclean_discovered_references(
// Weak references // Weak references
{ {
GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer, gc_id); false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;
@ -1184,7 +1182,7 @@ void ReferenceProcessor::preclean_discovered_references(
// Final references // Final references
{ {
GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer, gc_id); false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;
@ -1197,7 +1195,7 @@ void ReferenceProcessor::preclean_discovered_references(
// Phantom references // Phantom references
{ {
GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
false, gc_timer, gc_id); false, gc_timer);
for (uint i = 0; i < _max_num_q; i++) { for (uint i = 0; i < _max_num_q; i++) {
if (yield->should_return()) { if (yield->should_return()) {
return; return;

View file

@ -331,8 +331,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
OopClosure* keep_alive, OopClosure* keep_alive,
VoidClosure* complete_gc, VoidClosure* complete_gc,
YieldClosure* yield, YieldClosure* yield,
GCTimer* gc_timer, GCTimer* gc_timer);
GCId gc_id);
// Returns the name of the discovered reference list // Returns the name of the discovered reference list
// occupying the i / _num_q slot. // occupying the i / _num_q slot.
@ -441,8 +440,7 @@ class ReferenceProcessor : public CHeapObj<mtGC> {
OopClosure* keep_alive, OopClosure* keep_alive,
VoidClosure* complete_gc, VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor, AbstractRefProcTaskExecutor* task_executor,
GCTimer *gc_timer, GCTimer *gc_timer);
GCId gc_id);
// Enqueue references at end of GC (called by the garbage collector) // Enqueue references at end of GC (called by the garbage collector)
bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);

View file

@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/workgroup.hpp" #include "gc/shared/workgroup.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp" #include "memory/allocation.inline.hpp"
@ -328,6 +329,7 @@ void GangWorker::print_task_done(WorkData data) {
void GangWorker::run_task(WorkData data) { void GangWorker::run_task(WorkData data) {
print_task_started(data); print_task_started(data);
GCIdMark gc_id_mark(data._task->gc_id());
data._task->work(data._worker_id); data._task->work(data._worker_id);
print_task_done(data); print_task_done(data);

View file

@ -28,6 +28,7 @@
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
#include "runtime/thread.hpp" #include "runtime/thread.hpp"
#include "gc/shared/gcId.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
@ -54,9 +55,13 @@ class WorkGang;
// You subclass this to supply your own work() method // You subclass this to supply your own work() method
class AbstractGangTask VALUE_OBJ_CLASS_SPEC { class AbstractGangTask VALUE_OBJ_CLASS_SPEC {
const char* _name; const char* _name;
const uint _gc_id;
public: public:
AbstractGangTask(const char* name) : _name(name) {} AbstractGangTask(const char* name) :
_name(name),
_gc_id(GCId::current_raw())
{}
// The abstract work method. // The abstract work method.
// The argument tells you which member of the gang you are. // The argument tells you which member of the gang you are.
@ -64,6 +69,7 @@ class AbstractGangTask VALUE_OBJ_CLASS_SPEC {
// Debugging accessor for the name. // Debugging accessor for the name.
const char* name() const { return _name; } const char* name() const { return _name; }
const uint gc_id() const { return _gc_id; }
}; };
struct WorkData { struct WorkData {

View file

@ -31,6 +31,7 @@
#include "code/codeCacheExtensions.hpp" #include "code/codeCacheExtensions.hpp"
#include "code/scopeDesc.hpp" #include "code/scopeDesc.hpp"
#include "compiler/compileBroker.hpp" #include "compiler/compileBroker.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/workgroup.hpp" #include "gc/shared/workgroup.hpp"
#include "interpreter/interpreter.hpp" #include "interpreter/interpreter.hpp"
@ -1149,6 +1150,7 @@ void JavaThread::allocate_threadObj(Handle thread_group, const char* thread_name
NamedThread::NamedThread() : Thread() { NamedThread::NamedThread() : Thread() {
_name = NULL; _name = NULL;
_processed_thread = NULL; _processed_thread = NULL;
_gc_id = GCId::undefined();
} }
NamedThread::~NamedThread() { NamedThread::~NamedThread() {

View file

@ -678,6 +678,7 @@ class NamedThread: public Thread {
char* _name; char* _name;
// log JavaThread being processed by oops_do // log JavaThread being processed by oops_do
JavaThread* _processed_thread; JavaThread* _processed_thread;
uint _gc_id; // The current GC id when a thread takes part in GC
public: public:
NamedThread(); NamedThread();
@ -690,6 +691,9 @@ class NamedThread: public Thread {
JavaThread *processed_thread() { return _processed_thread; } JavaThread *processed_thread() { return _processed_thread; }
void set_processed_thread(JavaThread *thread) { _processed_thread = thread; } void set_processed_thread(JavaThread *thread) { _processed_thread = thread; }
virtual void print_on(outputStream* st) const; virtual void print_on(outputStream* st) const;
void set_gc_id(uint gc_id) { _gc_id = gc_id; }
uint gc_id() { return _gc_id; }
}; };
// Worker threads are named and have an id of an assigned work. // Worker threads are named and have an id of an assigned work.

View file

@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "compiler/compileLog.hpp" #include "compiler/compileLog.hpp"
#include "gc/shared/gcId.hpp" #include "gc/shared/gcId.hpp"
#include "gc/shared/gcId.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
@ -238,11 +239,11 @@ void outputStream::date_stamp(bool guard,
return; return;
} }
void outputStream::gclog_stamp(const GCId& gc_id) { void outputStream::gclog_stamp() {
date_stamp(PrintGCDateStamps); date_stamp(PrintGCDateStamps);
stamp(PrintGCTimeStamps); stamp(PrintGCTimeStamps);
if (PrintGCID) { if (PrintGCID) {
print("#%u: ", gc_id.id()); print("#%u: ", GCId::current());
} }
} }

View file

@ -108,7 +108,7 @@ class outputStream : public ResourceObj {
void date_stamp(bool guard) { void date_stamp(bool guard) {
date_stamp(guard, "", ": "); date_stamp(guard, "", ": ");
} }
void gclog_stamp(const GCId& gc_id); void gclog_stamp();
// portable printing of 64 bit integers // portable printing of 64 bit integers
void print_jlong(jlong value); void print_jlong(jlong value);