8134953: Make the GC ID available in a central place

Reviewed-by: pliden, jmasa
This commit is contained in:
Bengt Rutisson 2015-09-30 09:07:21 +02:00
parent d516b42238
commit 003892f897
41 changed files with 253 additions and 291 deletions

View file

@ -40,6 +40,7 @@
#include "gc/parallel/psYoungGen.hpp"
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp"
@ -960,7 +961,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
// at each young gen gc. Do the update unconditionally (even though a
// promotion failure does not swap spaces) because an unknown number of young
// collections will have swapped the spaces an unknown number of times.
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_space_info[from_space_id].set_space(heap->young_gen()->from_space());
_space_info[to_space_id].set_space(heap->young_gen()->to_space());
@ -1003,7 +1004,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
void PSParallelCompact::post_compact()
{
GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
// Clear the marking bitmap, summary data and split info.
@ -1824,7 +1825,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
bool maximum_compaction)
{
GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
// trace("2");
#ifdef ASSERT
@ -1984,6 +1985,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCIdMark gc_id_mark;
_gc_timer.register_gc_start();
_gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
@ -2031,7 +2033,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
gc_task_manager()->task_idle_workers();
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
TraceCollectorStats tcs(counters());
TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
@ -2331,7 +2333,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
bool maximum_heap_compaction,
ParallelOldTracer *gc_tracer) {
// Recursively traverse all live objects and mark them
GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
uint parallel_gc_threads = heap->gc_task_manager()->workers();
@ -2346,7 +2348,7 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
ClassLoaderDataGraph::clear_claimed_marks();
{
GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
ParallelScavengeHeap::ParStrongRootsScope psrs;
@ -2375,24 +2377,24 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
// Process reference objects found during marking
{
GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
ReferenceProcessorStats stats;
if (ref_processor()->processing_is_mt()) {
RefProcTaskExecutor task_executor;
stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
&task_executor, &_gc_timer, _gc_tracer.gc_id());
&task_executor, &_gc_timer);
} else {
stats = ref_processor()->process_discovered_references(
is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
&_gc_timer, _gc_tracer.gc_id());
&_gc_timer);
}
gc_tracer->report_gc_reference_stats(stats);
}
GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
// This is the point where the entire marking should have completed.
assert(cm->marking_stacks_empty(), "Marking should have completed");
@ -2423,7 +2425,7 @@ static PSAlwaysTrueClosure always_true;
void PSParallelCompact::adjust_roots() {
// Adjust the pointers to reflect the new locations
GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
// Need new claim bits when tracing through and adjusting pointers.
ClassLoaderDataGraph::clear_claimed_marks();
@ -2459,7 +2461,7 @@ void PSParallelCompact::adjust_roots() {
void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
uint parallel_gc_threads)
{
GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
// Find the threads that are active
unsigned int which = 0;
@ -2533,7 +2535,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
uint parallel_gc_threads) {
GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
ParallelCompactData& sd = PSParallelCompact::summary_data();
@ -2615,7 +2617,7 @@ void PSParallelCompact::enqueue_region_stealing_tasks(
GCTaskQueue* q,
ParallelTaskTerminator* terminator_ptr,
uint parallel_gc_threads) {
GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
// Once a thread has drained it's stack, it should try to steal regions from
// other threads.
@ -2663,7 +2665,7 @@ void PSParallelCompact::write_block_fill_histogram(outputStream* const out)
void PSParallelCompact::compact() {
// trace("5");
GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSOldGen* old_gen = heap->old_gen();
@ -2679,7 +2681,7 @@ void PSParallelCompact::compact() {
enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
{
GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
gc_task_manager()->execute_and_wait(q);
@ -2693,7 +2695,7 @@ void PSParallelCompact::compact() {
{
// Update the deferred objects, if any. Any compaction manager can be used.
GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
ParCompactionManager* cm = ParCompactionManager::manager_array(0);
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
update_deferred_objects(cm, SpaceId(id));