8152100: Rework and unify the GC phase logging

Reviewed-by: sjohanss, jwilhelm
This commit is contained in:
Stefan Karlsson 2016-03-18 08:59:07 +01:00
parent 9682b4de13
commit a15383123d
12 changed files with 117 additions and 117 deletions

View file

@ -1518,7 +1518,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
gch->pre_full_gc_dump(gc_timer);
GCTraceTime(Trace, gc) t("CMS:MSC");
GCTraceTime(Trace, gc, phases) t("CMS:MSC");
// Temporarily widen the span of the weak reference processing to
// the entire heap.
@ -2234,7 +2234,7 @@ class VerifyMarkedClosure: public BitMapClosure {
};
bool CMSCollector::verify_after_remark() {
GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
static bool init = false;
@ -2818,7 +2818,7 @@ void CMSCollector::checkpointRootsInitialWork() {
// CMS collection cycle.
setup_cms_unloading_and_verification_state();
GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
// Reset all the PLAB chunk arrays if necessary.
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@ -4102,8 +4102,6 @@ void CMSCollector::checkpointRootsFinal() {
// expect it to be false and set to true
FlagSetting fl(gch->_is_gc_active, false);
GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm);
gch->do_collection(true, // full (i.e. force, see below)
false, // !clear_all_soft_refs
0, // size
@ -4121,7 +4119,7 @@ void CMSCollector::checkpointRootsFinal() {
}
void CMSCollector::checkpointRootsFinalWork() {
GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
@ -4171,10 +4169,10 @@ void CMSCollector::checkpointRootsFinalWork() {
// the most recent young generation GC, minus those cleaned up by the
// concurrent precleaning.
if (CMSParallelRemarkEnabled) {
GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
do_remark_parallel();
} else {
GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
do_remark_non_parallel();
}
}
@ -4182,7 +4180,7 @@ void CMSCollector::checkpointRootsFinalWork() {
verify_overflow_empty();
{
GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
refProcessingWork();
}
verify_work_stacks_empty();
@ -4905,7 +4903,7 @@ void CMSCollector::do_remark_non_parallel() {
NULL, // space is set further below
&_markBitMap, &_markStack, &mrias_cl);
{
GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
// Iterate over the dirty cards, setting the corresponding bits in the
// mod union table.
{
@ -4939,7 +4937,7 @@ void CMSCollector::do_remark_non_parallel() {
Universe::verify();
}
{
GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
verify_work_stacks_empty();
@ -4961,7 +4959,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
verify_work_stacks_empty();
@ -4980,7 +4978,7 @@ void CMSCollector::do_remark_non_parallel() {
}
{
GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
verify_work_stacks_empty();
@ -5184,7 +5182,7 @@ void CMSCollector::refProcessingWork() {
_span, &_markBitMap, &_markStack,
&cmsKeepAliveClosure, false /* !preclean */);
{
GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
ReferenceProcessorStats stats;
if (rp->processing_is_mt()) {
@ -5226,7 +5224,7 @@ void CMSCollector::refProcessingWork() {
if (should_unload_classes()) {
{
GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@ -5239,13 +5237,13 @@ void CMSCollector::refProcessingWork() {
}
{
GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
{
GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
// Delete entries for dead interned strings.
StringTable::unlink(&_is_alive_closure);
}

View file

@ -901,7 +901,7 @@ void ParNewGeneration::collect(bool full,
size_policy->minor_collection_begin();
}
GCTraceTime(Trace, gc) t1("ParNew", NULL, gch->gc_cause());
GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
age_table()->clear();
to()->clear(SpaceDecorator::Mangle);

View file

@ -48,7 +48,7 @@ void VM_CMS_Operation::release_and_notify_pending_list_lock() {
void VM_CMS_Operation::verify_before_gc() {
if (VerifyBeforeGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm);
GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@ -60,7 +60,7 @@ void VM_CMS_Operation::verify_before_gc() {
void VM_CMS_Operation::verify_after_gc() {
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm);
GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
HandleMark hm;
FreelistLocker x(_collector);
MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);

View file

@ -1108,7 +1108,7 @@ void G1ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
reset_marking_state();
} else {
{
GCTraceTime(Debug, gc) trace("Aggregate Data", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) trace("Aggregate Data", _gc_timer_cm);
// Aggregate the per-task counting data that we have accumulated
// while marking.
@ -2027,7 +2027,7 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
{
GCTraceTime(Debug, gc) trace("Reference Processing", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) trace("Reference Processing", _gc_timer_cm);
ReferenceProcessor* rp = g1h->ref_processor_cm();
@ -2116,28 +2116,24 @@ void G1ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
assert(_markStack.isEmpty(), "Marking should have completed");
// Unload Klasses, String, Symbols, Code Cache, etc.
{
GCTraceTime(Debug, gc) trace("Unloading", _gc_timer_cm);
if (ClassUnloadingWithConcurrentMark) {
bool purged_classes;
{
GCTraceTime(Trace, gc) trace("System Dictionary Unloading", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) trace("System Dictionary Unloading", _gc_timer_cm);
purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
}
{
GCTraceTime(Trace, gc) trace("Parallel Unloading", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) trace("Parallel Unloading", _gc_timer_cm);
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
}
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Trace, gc) trace("String Deduplication Unlink", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", _gc_timer_cm);
G1StringDedup::unlink(&g1_is_alive);
}
}
}
void G1ConcurrentMark::swapMarkBitMaps() {
@ -2255,7 +2251,7 @@ void G1ConcurrentMark::checkpointRootsFinalWork() {
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Debug, gc) trace("Finalize Marking", _gc_timer_cm);
GCTraceTime(Debug, gc, phases) trace("Finalize Marking", _gc_timer_cm);
g1h->ensure_parsability(false);

View file

@ -122,7 +122,7 @@ void G1MarkSweep::allocate_stacks() {
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer());
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@ -137,6 +137,9 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
&follow_code_closure);
}
{
GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer());
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
assert(rp == g1h->ref_processor_stw(), "Sanity");
@ -149,13 +152,13 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
NULL,
gc_timer());
gc_tracer()->report_gc_reference_stats(stats);
}
// This is the point where the entire marking should have completed.
assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
{
GCTraceTime(Debug, gc) trace("Class Unloading", gc_timer());
GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
@ -168,13 +171,13 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
}
{
GCTraceTime(Debug, gc) trace("Scrub String and Symbol Tables", gc_timer());
GCTraceTime(Debug, gc, phases) trace("Scrub String and Symbol Tables", gc_timer());
// Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
g1h->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
}
if (G1StringDedup::is_enabled()) {
GCTraceTime(Debug, gc) trace("String Deduplication Unlink", gc_timer());
GCTraceTime(Debug, gc, phases) trace("String Deduplication Unlink", gc_timer());
G1StringDedup::unlink(&GenMarkSweep::is_alive);
}
@ -209,7 +212,7 @@ void G1MarkSweep::mark_sweep_phase2() {
// phase2, phase3 and phase4, but the ValidateMarkSweep live oops
// tracking expects us to do so. See comment under phase4.
GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer());
prepare_compaction();
}
@ -236,7 +239,7 @@ void G1MarkSweep::mark_sweep_phase3() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
// Need cleared claim bits for the roots processing
ClassLoaderDataGraph::clear_claimed_marks();
@ -297,7 +300,7 @@ void G1MarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Trace, gc) tm("Phase 4: Move objects", gc_timer());
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer());
G1SpaceCompactClosure blk;
g1h->heap_region_iterate(&blk);

View file

@ -493,7 +493,7 @@ void PSMarkSweep::deallocate_stacks() {
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
@ -523,6 +523,8 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Process reference objects found during marking
{
GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
@ -534,7 +536,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
assert(_marking_stack.is_empty(), "Marking should have completed");
{
GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer);
GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
@ -547,13 +549,13 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
}
{
GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer);
GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer);
// Delete entries for dead interned strings.
StringTable::unlink(is_alive_closure());
}
{
GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer);
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
@ -563,7 +565,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
void PSMarkSweep::mark_sweep_phase2() {
GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
// Now all live objects are marked, compute the new object addresses.
@ -583,7 +585,7 @@ void PSMarkSweep::mark_sweep_phase2() {
void PSMarkSweep::mark_sweep_phase3() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
@ -623,7 +625,7 @@ void PSMarkSweep::mark_sweep_phase3() {
void PSMarkSweep::mark_sweep_phase4() {
EventMark m("4 compact heap");
GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
// All pointers are now adjusted, move objects accordingly

View file

@ -195,10 +195,10 @@ const char* PSParallelCompact::space_names[] = {
};
void PSParallelCompact::print_region_ranges() {
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (!log_develop_is_enabled(Trace, gc, compaction)) {
return;
}
LogHandle(gc, compaction, phases) log;
LogHandle(gc, compaction) log;
ResourceMark rm;
Universe::print_on(log.trace_stream());
log.trace("space bottom top end new_top");
@ -225,7 +225,7 @@ print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
ParallelCompactData& sd = PSParallelCompact::summary_data();
size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
REGION_IDX_FORMAT " " PTR_FORMAT " "
REGION_IDX_FORMAT " " PTR_FORMAT " "
REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
@ -258,14 +258,14 @@ print_generic_summary_data(ParallelCompactData& summary_data,
++i;
}
log_develop_trace(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
log_develop_trace(gc, compaction)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
}
void
print_generic_summary_data(ParallelCompactData& summary_data,
SpaceInfo* space_info)
{
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (!log_develop_is_enabled(Trace, gc, compaction)) {
return;
}
@ -296,7 +296,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
size_t i = summary_data.addr_to_region_idx(space->bottom());
while (i < end_region && summary_data.region(i)->data_size() == region_size) {
ParallelCompactData::RegionData* c = summary_data.region(i);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
i, p2i(c->destination()),
c->partial_obj_size(), c->live_obj_size(),
@ -330,7 +330,7 @@ print_initial_summary_data(ParallelCompactData& summary_data,
}
ParallelCompactData::RegionData* c = summary_data.region(i);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
"%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
i, p2i(c->destination()),
@ -346,21 +346,21 @@ print_initial_summary_data(ParallelCompactData& summary_data,
// Any remaining regions are empty. Print one more if there is one.
if (i < end_region) {
ParallelCompactData::RegionData* c = summary_data.region(i);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
i, p2i(c->destination()),
c->partial_obj_size(), c->live_obj_size(),
c->data_size(), c->source_region(), c->destination_count());
}
log_develop_trace(gc, compaction, phases)("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
log_develop_trace(gc, compaction)("max: " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
}
void
print_initial_summary_data(ParallelCompactData& summary_data,
SpaceInfo* space_info) {
if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (!log_develop_is_enabled(Trace, gc, compaction)) {
return;
}
@ -621,7 +621,7 @@ ParallelCompactData::summarize_split_space(size_t src_region,
sr->partial_obj_size()));
const size_t end_idx = addr_to_region_idx(target_end);
log_develop_trace(gc, compaction, phases)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
log_develop_trace(gc, compaction)("split: clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
for (size_t idx = beg_idx; idx < end_idx; ++idx) {
_region_data[idx].set_source_region(0);
}
@ -641,11 +641,11 @@ ParallelCompactData::summarize_split_space(size_t src_region,
*target_next = split_destination + partial_obj_size;
HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (log_develop_is_enabled(Trace, gc, compaction)) {
const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
log_develop_trace(gc, compaction, phases)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
log_develop_trace(gc, compaction)("%s split: src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
split_type, p2i(source_next), split_region, partial_obj_size);
log_develop_trace(gc, compaction, phases)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
log_develop_trace(gc, compaction)("%s split: dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
split_type, p2i(split_destination),
addr_to_region_idx(split_destination),
p2i(*target_next));
@ -653,7 +653,7 @@ ParallelCompactData::summarize_split_space(size_t src_region,
if (partial_obj_size != 0) {
HeapWord* const po_beg = split_info.destination();
HeapWord* const po_end = po_beg + split_info.partial_obj_size();
log_develop_trace(gc, compaction, phases)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
log_develop_trace(gc, compaction)("%s split: po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
split_type,
p2i(po_beg), addr_to_region_idx(po_beg),
p2i(po_end), addr_to_region_idx(po_end));
@ -670,7 +670,7 @@ bool ParallelCompactData::summarize(SplitInfo& split_info,
HeapWord** target_next)
{
HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
"sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
"tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
p2i(source_beg), p2i(source_end), p2i(source_next_val),
@ -938,7 +938,7 @@ void PSParallelCompact::pre_compact()
// at each young gen gc. Do the update unconditionally (even though a
// promotion failure does not swap spaces) because an unknown number of young
// collections will have swapped the spaces an unknown number of times.
GCTraceTime(Trace, gc, phases) tm("Pre Compact", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
@ -981,7 +981,7 @@ void PSParallelCompact::pre_compact()
void PSParallelCompact::post_compact()
{
GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info.
@ -1524,7 +1524,7 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
}
}
if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
if (log_develop_is_enabled(Trace, gc, compaction)) {
const size_t region_size = ParallelCompactData::RegionSize;
HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
@ -1532,7 +1532,7 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
HeapWord* const new_top = _space_info[id].new_top();
const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
"id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
"dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
"cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
@ -1548,7 +1548,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
SpaceId src_space_id,
HeapWord* src_beg, HeapWord* src_end)
{
log_develop_trace(gc, compaction, phases)(
log_develop_trace(gc, compaction)(
"Summarizing %d [%s] into %d [%s]: "
"src=" PTR_FORMAT "-" PTR_FORMAT " "
SIZE_FORMAT "-" SIZE_FORMAT " "
@ -1568,7 +1568,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction)
{
GCTraceTime(Trace, gc, phases) tm("Summary Phase", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
#ifdef ASSERT
if (TraceParallelOldGCMarkingPhase) {
@ -1584,7 +1584,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
// Quick summarization of each space into itself, to see how much is live.
summarize_spaces_quick();
log_develop_trace(gc, compaction, phases)("summary phase: after summarizing each space to self");
log_develop_trace(gc, compaction)("summary phase: after summarizing each space to self");
NOT_PRODUCT(print_region_ranges());
NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
@ -1660,7 +1660,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
}
}
log_develop_trace(gc, compaction, phases)("Summary_phase: after final summarization");
log_develop_trace(gc, compaction)("Summary_phase: after final summarization");
NOT_PRODUCT(print_region_ranges());
NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
}
@ -2042,7 +2042,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction,
ParallelOldTracer *gc_tracer) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc, phases) tm("Marking Phase", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
@ -2057,7 +2057,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ClassLoaderDataGraph::clear_claimed_marks();
{
GCTraceTime(Trace, gc, phases) tm("Par Mark", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
ParallelScavengeHeap::ParStrongRootsScope psrs;
@ -2086,7 +2086,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Process reference objects found during marking
{
GCTraceTime(Trace, gc, phases) tm("Reference Processing", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
ReferenceProcessorStats stats;
if (ref_processor()->processing_is_mt()) {
@ -2107,7 +2107,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
assert(cm->marking_stacks_empty(), "Marking should have completed");
{
GCTraceTime(Debug, gc) tm_m("Class Unloading", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
// Follow system dictionary roots and unload classes.
bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
@ -2120,13 +2120,13 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
}
{
GCTraceTime(Debug, gc) t("Scrub String Table", &_gc_timer);
GCTraceTime(Debug, gc, phases) t("Scrub String Table", &_gc_timer);
// Delete entries for dead interned strings.
StringTable::unlink(is_alive_closure());
}
{
GCTraceTime(Debug, gc) t("Scrub Symbol Table", &_gc_timer);
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", &_gc_timer);
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
@ -2136,7 +2136,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
@ -2410,7 +2410,7 @@ void PSParallelCompact::write_block_fill_histogram()
#endif // #ifdef ASSERT
void PSParallelCompact::compact() {
GCTraceTime(Trace, gc, phases) tm("Compaction Phase", &_gc_timer);
GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();

View file

@ -404,7 +404,7 @@ bool PSScavenge::invoke_no_policy() {
// Process reference objects discovered during scavenge
{
GCTraceTime(Debug, gc, phases) tm("References", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
reference_processor()->setup_policy(false); // not always_clear
reference_processor()->set_active_mt_degree(active_workers);
@ -433,7 +433,7 @@ bool PSScavenge::invoke_no_policy() {
}
{
GCTraceTime(Debug, gc, phases) tm("StringTable", &_gc_timer);
GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer);
// Unlink any dead interned Strings and process the remaining live ones.
PSScavengeRootsClosure root_closure(promotion_manager);
StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);

View file

@ -594,7 +594,7 @@ void DefNewGeneration::collect(bool full,
init_assuming_no_promotion_failure();
GCTraceTime(Trace, gc) tm("DefNew", NULL, gch->gc_cause());
GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause());
gch->trace_heap_before_gc(&gc_tracer);

View file

@ -180,7 +180,7 @@ void GenMarkSweep::deallocate_stacks() {
void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Recursively traverse all live objects and mark them
GCTraceTime(Trace, gc) tm("Phase 1: Mark live objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
GenCollectedHeap* gch = GenCollectedHeap::heap();
@ -208,6 +208,8 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Process reference objects found during marking
{
GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
ref_processor()->setup_policy(clear_all_softrefs);
const ReferenceProcessorStats& stats =
ref_processor()->process_discovered_references(
@ -219,7 +221,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
assert(_marking_stack.is_empty(), "Marking should have completed");
{
GCTraceTime(Debug, gc) tm_m("Class Unloading", gc_timer());
GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer());
// Unload classes and purge the SystemDictionary.
bool purged_class = SystemDictionary::do_unloading(&is_alive);
@ -232,13 +234,13 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
}
{
GCTraceTime(Debug, gc) t("Scrub String Table", gc_timer());
GCTraceTime(Debug, gc, phases) t("Scrub String Table", gc_timer());
// Delete entries for dead interned strings.
StringTable::unlink(&is_alive);
}
{
GCTraceTime(Debug, gc) t("Scrub Symbol Table", gc_timer());
GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", gc_timer());
// Clean up unreferenced symbols in symbol table.
SymbolTable::unlink();
}
@ -263,7 +265,7 @@ void GenMarkSweep::mark_sweep_phase2() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime(Trace, gc) tm("Phase 2: Compute new object addresses", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
gch->prepare_for_compaction();
}
@ -279,7 +281,7 @@ void GenMarkSweep::mark_sweep_phase3() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
// Adjust the pointers to reflect the new locations
GCTraceTime(Trace, gc) tm("Phase 3: Adjust pointers", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
// Need new claim bits for the pointer adjustment tracing.
ClassLoaderDataGraph::clear_claimed_marks();
@ -331,7 +333,7 @@ void GenMarkSweep::mark_sweep_phase4() {
// to use a higher index (saved from phase2) when verifying perm_gen.
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCTraceTime(Trace, gc) tm("Phase 4: Move objects", _gc_timer);
GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
GenCompactClosure blk;
gch->generation_iterate(&blk, true);

View file

@ -315,7 +315,7 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz
bool is_tlab, bool run_verification, bool clear_soft_refs,
bool restore_marks_for_biased_locking) {
FormatBuffer<> title("Collect gen: %s", gen->short_name());
GCTraceTime(Debug, gc) t1(title);
GCTraceTime(Trace, gc, phases) t1(title);
TraceCollectorStats tcs(gen->counters());
TraceMemoryManagerStats tmms(gen->kind(),gc_cause());

View file

@ -45,7 +45,6 @@
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, barrier)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, classhisto)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, compaction)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, compaction, phases)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, cpu)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, cset)) \