mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 22:34:27 +02:00
8129549: G1: Make sure the concurrent thread does not mix its logging with the STW pauses
Reviewed-by: pliden, tschatzl
This commit is contained in:
parent
8a03d57f9b
commit
7f2ffe8c0e
5 changed files with 60 additions and 66 deletions
|
@ -1179,6 +1179,8 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
void ConcurrentMark::scanRootRegions() {
|
void ConcurrentMark::scanRootRegions() {
|
||||||
|
double scan_start = os::elapsedTime();
|
||||||
|
|
||||||
// Start of concurrent marking.
|
// Start of concurrent marking.
|
||||||
ClassLoaderDataGraph::clear_claimed_marks();
|
ClassLoaderDataGraph::clear_claimed_marks();
|
||||||
|
|
||||||
|
@ -1186,6 +1188,11 @@ void ConcurrentMark::scanRootRegions() {
|
||||||
// at least one root region to scan. So, if it's false, we
|
// at least one root region to scan. So, if it's false, we
|
||||||
// should not attempt to do any further work.
|
// should not attempt to do any further work.
|
||||||
if (root_regions()->scan_in_progress()) {
|
if (root_regions()->scan_in_progress()) {
|
||||||
|
if (G1Log::fine()) {
|
||||||
|
gclog_or_tty->gclog_stamp(concurrent_gc_id());
|
||||||
|
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
|
||||||
|
}
|
||||||
|
|
||||||
_parallel_marking_threads = calc_parallel_marking_threads();
|
_parallel_marking_threads = calc_parallel_marking_threads();
|
||||||
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
|
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
|
||||||
"Maximum number of marking threads exceeded");
|
"Maximum number of marking threads exceeded");
|
||||||
|
@ -1195,6 +1202,11 @@ void ConcurrentMark::scanRootRegions() {
|
||||||
_parallel_workers->set_active_workers(active_workers);
|
_parallel_workers->set_active_workers(active_workers);
|
||||||
_parallel_workers->run_task(&task);
|
_parallel_workers->run_task(&task);
|
||||||
|
|
||||||
|
if (G1Log::fine()) {
|
||||||
|
gclog_or_tty->gclog_stamp(concurrent_gc_id());
|
||||||
|
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
|
||||||
|
}
|
||||||
|
|
||||||
// It's possible that has_aborted() is true here without actually
|
// It's possible that has_aborted() is true here without actually
|
||||||
// aborting the survivor scan earlier. This is OK as it's
|
// aborting the survivor scan earlier. This is OK as it's
|
||||||
// mainly used for sanity checking.
|
// mainly used for sanity checking.
|
||||||
|
@ -2993,6 +3005,11 @@ void ConcurrentMark::print_stats() {
|
||||||
|
|
||||||
// abandon current marking iteration due to a Full GC
|
// abandon current marking iteration due to a Full GC
|
||||||
void ConcurrentMark::abort() {
|
void ConcurrentMark::abort() {
|
||||||
|
if (!cmThread()->during_cycle() || _has_aborted) {
|
||||||
|
// We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
|
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
|
||||||
// concurrent bitmap clearing.
|
// concurrent bitmap clearing.
|
||||||
_nextMarkBitMap->clearAll();
|
_nextMarkBitMap->clearAll();
|
||||||
|
@ -3010,12 +3027,8 @@ void ConcurrentMark::abort() {
|
||||||
}
|
}
|
||||||
_first_overflow_barrier_sync.abort();
|
_first_overflow_barrier_sync.abort();
|
||||||
_second_overflow_barrier_sync.abort();
|
_second_overflow_barrier_sync.abort();
|
||||||
const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id();
|
_aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
|
||||||
if (!gc_id.is_undefined()) {
|
assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
|
||||||
// We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance
|
|
||||||
// to detect that it was aborted. Only keep track of the first GC id that we aborted.
|
|
||||||
_aborted_gc_id = gc_id;
|
|
||||||
}
|
|
||||||
_has_aborted = true;
|
_has_aborted = true;
|
||||||
|
|
||||||
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
|
||||||
|
|
|
@ -78,7 +78,19 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// We want to avoid that the logging from the concurrent thread is mixed
|
||||||
|
// with the logging from a STW GC. So, if necessary join the STS to ensure
|
||||||
|
// that the logging is done either before or after the STW logging.
|
||||||
|
void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) {
|
||||||
|
if (doit) {
|
||||||
|
SuspendibleThreadSetJoiner sts_joiner(join_sts);
|
||||||
|
va_list args;
|
||||||
|
va_start(args, fmt);
|
||||||
|
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
||||||
|
gclog_or_tty->vprint_cr(fmt, args);
|
||||||
|
va_end(args);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ConcurrentMarkThread::run() {
|
void ConcurrentMarkThread::run() {
|
||||||
initialize_in_thread();
|
initialize_in_thread();
|
||||||
|
@ -110,28 +122,12 @@ void ConcurrentMarkThread::run() {
|
||||||
// without the root regions have been scanned which would be a
|
// without the root regions have been scanned which would be a
|
||||||
// correctness issue.
|
// correctness issue.
|
||||||
|
|
||||||
double scan_start = os::elapsedTime();
|
|
||||||
if (!cm()->has_aborted()) {
|
if (!cm()->has_aborted()) {
|
||||||
if (G1Log::fine()) {
|
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
|
|
||||||
}
|
|
||||||
|
|
||||||
_cm->scanRootRegions();
|
_cm->scanRootRegions();
|
||||||
|
|
||||||
double scan_end = os::elapsedTime();
|
|
||||||
if (G1Log::fine()) {
|
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
|
|
||||||
scan_end - scan_start);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
double mark_start_sec = os::elapsedTime();
|
double mark_start_sec = os::elapsedTime();
|
||||||
if (G1Log::fine()) {
|
cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]");
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-start]");
|
|
||||||
}
|
|
||||||
|
|
||||||
int iter = 0;
|
int iter = 0;
|
||||||
do {
|
do {
|
||||||
|
@ -151,25 +147,15 @@ void ConcurrentMarkThread::run() {
|
||||||
os::sleep(current_thread, sleep_time_ms, false);
|
os::sleep(current_thread, sleep_time_ms, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (G1Log::fine()) {
|
cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
|
|
||||||
mark_end_sec - mark_start_sec);
|
|
||||||
}
|
|
||||||
|
|
||||||
CMCheckpointRootsFinalClosure final_cl(_cm);
|
CMCheckpointRootsFinalClosure final_cl(_cm);
|
||||||
VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
|
VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
}
|
}
|
||||||
if (cm()->restart_for_overflow()) {
|
if (cm()->restart_for_overflow()) {
|
||||||
if (G1TraceMarkStackOverflow) {
|
cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter);
|
||||||
gclog_or_tty->print_cr("Restarting conc marking because of MS overflow "
|
cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]");
|
||||||
"in remark (restart #%d).", iter);
|
|
||||||
}
|
|
||||||
if (G1Log::fine()) {
|
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} while (cm()->restart_for_overflow());
|
} while (cm()->restart_for_overflow());
|
||||||
|
|
||||||
|
@ -209,10 +195,7 @@ void ConcurrentMarkThread::run() {
|
||||||
// reclaimed by cleanup.
|
// reclaimed by cleanup.
|
||||||
|
|
||||||
double cleanup_start_sec = os::elapsedTime();
|
double cleanup_start_sec = os::elapsedTime();
|
||||||
if (G1Log::fine()) {
|
cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-start]");
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now do the concurrent cleanup operation.
|
// Now do the concurrent cleanup operation.
|
||||||
_cm->completeCleanup();
|
_cm->completeCleanup();
|
||||||
|
@ -229,11 +212,7 @@ void ConcurrentMarkThread::run() {
|
||||||
g1h->reset_free_regions_coming();
|
g1h->reset_free_regions_coming();
|
||||||
|
|
||||||
double cleanup_end_sec = os::elapsedTime();
|
double cleanup_end_sec = os::elapsedTime();
|
||||||
if (G1Log::fine()) {
|
cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec);
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
|
|
||||||
cleanup_end_sec - cleanup_start_sec);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
guarantee(cm()->cleanup_list_is_empty(),
|
guarantee(cm()->cleanup_list_is_empty(),
|
||||||
"at this point there should be no regions on the cleanup list");
|
"at this point there should be no regions on the cleanup list");
|
||||||
|
@ -266,13 +245,8 @@ void ConcurrentMarkThread::run() {
|
||||||
SuspendibleThreadSetJoiner sts_join;
|
SuspendibleThreadSetJoiner sts_join;
|
||||||
if (!cm()->has_aborted()) {
|
if (!cm()->has_aborted()) {
|
||||||
g1_policy->record_concurrent_mark_cleanup_completed();
|
g1_policy->record_concurrent_mark_cleanup_completed();
|
||||||
}
|
} else {
|
||||||
}
|
cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]");
|
||||||
|
|
||||||
if (cm()->has_aborted()) {
|
|
||||||
if (G1Log::fine()) {
|
|
||||||
gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
|
|
||||||
gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,7 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||||
double _vtime_accum; // Accumulated virtual time.
|
double _vtime_accum; // Accumulated virtual time.
|
||||||
|
|
||||||
double _vtime_mark_accum;
|
double _vtime_mark_accum;
|
||||||
|
void cm_log(bool doit, bool join_sts, const char* fmt, ...) ATTRIBUTE_PRINTF(4, 5);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
virtual void run();
|
virtual void run();
|
||||||
|
|
|
@ -3867,6 +3867,21 @@ void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
|
||||||
gclog_or_tty->flush();
|
gclog_or_tty->flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::wait_for_root_region_scanning() {
|
||||||
|
double scan_wait_start = os::elapsedTime();
|
||||||
|
// We have to wait until the CM threads finish scanning the
|
||||||
|
// root regions as it's the only way to ensure that all the
|
||||||
|
// objects on them have been correctly scanned before we start
|
||||||
|
// moving them during the GC.
|
||||||
|
bool waited = _cm->root_regions()->wait_until_scan_finished();
|
||||||
|
double wait_time_ms = 0.0;
|
||||||
|
if (waited) {
|
||||||
|
double scan_wait_end = os::elapsedTime();
|
||||||
|
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
|
||||||
|
}
|
||||||
|
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
assert_at_safepoint(true /* should_be_vm_thread */);
|
assert_at_safepoint(true /* should_be_vm_thread */);
|
||||||
|
@ -3883,6 +3898,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
SvcGCMarker sgcm(SvcGCMarker::MINOR);
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
|
|
||||||
|
wait_for_root_region_scanning();
|
||||||
|
|
||||||
G1Log::update_level();
|
G1Log::update_level();
|
||||||
print_heap_before_gc();
|
print_heap_before_gc();
|
||||||
trace_heap_before_gc(_gc_tracer_stw);
|
trace_heap_before_gc(_gc_tracer_stw);
|
||||||
|
@ -4004,19 +4021,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||||
|
|
||||||
g1_policy()->record_collection_pause_start(sample_start_time_sec);
|
g1_policy()->record_collection_pause_start(sample_start_time_sec);
|
||||||
|
|
||||||
double scan_wait_start = os::elapsedTime();
|
|
||||||
// We have to wait until the CM threads finish scanning the
|
|
||||||
// root regions as it's the only way to ensure that all the
|
|
||||||
// objects on them have been correctly scanned before we start
|
|
||||||
// moving them during the GC.
|
|
||||||
bool waited = _cm->root_regions()->wait_until_scan_finished();
|
|
||||||
double wait_time_ms = 0.0;
|
|
||||||
if (waited) {
|
|
||||||
double scan_wait_end = os::elapsedTime();
|
|
||||||
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
|
|
||||||
}
|
|
||||||
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
|
|
||||||
|
|
||||||
#if YOUNG_LIST_VERBOSE
|
#if YOUNG_LIST_VERBOSE
|
||||||
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
|
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
|
||||||
_young_list->print();
|
_young_list->print();
|
||||||
|
|
|
@ -805,6 +805,8 @@ protected:
|
||||||
bool* succeeded,
|
bool* succeeded,
|
||||||
GCCause::Cause gc_cause);
|
GCCause::Cause gc_cause);
|
||||||
|
|
||||||
|
void wait_for_root_region_scanning();
|
||||||
|
|
||||||
// The guts of the incremental collection pause, executed by the vm
|
// The guts of the incremental collection pause, executed by the vm
|
||||||
// thread. It returns false if it is unable to do the collection due
|
// thread. It returns false if it is unable to do the collection due
|
||||||
// to the GC locker being active, true otherwise
|
// to the GC locker being active, true otherwise
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue