mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-20 11:04:34 +02:00
8138920: Refactor the sampling thread from ConcurrentG1RefineThread
Helps enable running without concurrent refinement threads Reviewed-by: brutisso, pliden
This commit is contained in:
parent
9a7829e5a6
commit
3133bbb7fa
9 changed files with 291 additions and 147 deletions
|
@ -30,7 +30,8 @@
|
|||
#include "runtime/java.hpp"
|
||||
|
||||
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
|
||||
_threads(NULL), _n_threads(0),
|
||||
_threads(NULL),
|
||||
_sample_thread(NULL),
|
||||
_hot_card_cache(g1h)
|
||||
{
|
||||
// Ergonomically select initial concurrent refinement parameters
|
||||
|
@ -58,12 +59,10 @@ ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEn
|
|||
return NULL;
|
||||
}
|
||||
cg1r->_n_worker_threads = thread_num();
|
||||
// We need one extra thread to do the young gen rset size sampling.
|
||||
cg1r->_n_threads = cg1r->_n_worker_threads + 1;
|
||||
|
||||
cg1r->reset_threshold_step();
|
||||
|
||||
cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_threads, mtGC);
|
||||
cg1r->_threads = NEW_C_HEAP_ARRAY_RETURN_NULL(ConcurrentG1RefineThread*, cg1r->_n_worker_threads, mtGC);
|
||||
if (cg1r->_threads == NULL) {
|
||||
*ecode = JNI_ENOMEM;
|
||||
vm_shutdown_during_initialization("Could not allocate an array for ConcurrentG1RefineThread");
|
||||
|
@ -73,7 +72,7 @@ ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEn
|
|||
uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
|
||||
|
||||
ConcurrentG1RefineThread *next = NULL;
|
||||
for (uint i = cg1r->_n_threads - 1; i != UINT_MAX; i--) {
|
||||
for (uint i = cg1r->_n_worker_threads - 1; i != UINT_MAX; i--) {
|
||||
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(cg1r, next, refine_closure, worker_id_offset, i);
|
||||
assert(t != NULL, "Conc refine should have been created");
|
||||
if (t->osthread() == NULL) {
|
||||
|
@ -86,6 +85,14 @@ ConcurrentG1Refine* ConcurrentG1Refine::create(G1CollectedHeap* g1h, CardTableEn
|
|||
cg1r->_threads[i] = t;
|
||||
next = t;
|
||||
}
|
||||
|
||||
cg1r->_sample_thread = new G1YoungRemSetSamplingThread();
|
||||
if (cg1r->_sample_thread->osthread() == NULL) {
|
||||
*ecode = JNI_ENOMEM;
|
||||
vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*ecode = JNI_OK;
|
||||
return cg1r;
|
||||
}
|
||||
|
@ -103,58 +110,48 @@ void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) {
|
|||
}
|
||||
|
||||
void ConcurrentG1Refine::stop() {
|
||||
if (_threads != NULL) {
|
||||
for (uint i = 0; i < _n_threads; i++) {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
_threads[i]->stop();
|
||||
}
|
||||
}
|
||||
_sample_thread->stop();
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::reinitialize_threads() {
|
||||
reset_threshold_step();
|
||||
if (_threads != NULL) {
|
||||
for (uint i = 0; i < _n_threads; i++) {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
_threads[i]->initialize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ConcurrentG1Refine::~ConcurrentG1Refine() {
|
||||
if (_threads != NULL) {
|
||||
for (uint i = 0; i < _n_threads; i++) {
|
||||
for (uint i = 0; i < _n_worker_threads; i++) {
|
||||
delete _threads[i];
|
||||
}
|
||||
FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
|
||||
}
|
||||
|
||||
delete _sample_thread;
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
|
||||
if (_threads != NULL) {
|
||||
for (uint i = 0; i < _n_threads; i++) {
|
||||
tc->do_thread(_threads[i]);
|
||||
}
|
||||
}
|
||||
worker_threads_do(tc);
|
||||
tc->do_thread(_sample_thread);
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
|
||||
if (_threads != NULL) {
|
||||
for (uint i = 0; i < worker_thread_num(); i++) {
|
||||
tc->do_thread(_threads[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint ConcurrentG1Refine::thread_num() {
|
||||
return G1ConcRefinementThreads;
|
||||
}
|
||||
|
||||
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
|
||||
for (uint i = 0; i < _n_threads; ++i) {
|
||||
for (uint i = 0; i < _n_worker_threads; ++i) {
|
||||
_threads[i]->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
|
||||
ConcurrentG1RefineThread * ConcurrentG1Refine::sampling_thread() const {
|
||||
return _threads[worker_thread_num()];
|
||||
_sample_thread->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
|
||||
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
@ -39,8 +40,9 @@ class G1RemSet;
|
|||
class DirtyCardQueue;
|
||||
|
||||
class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
||||
G1YoungRemSetSamplingThread* _sample_thread;
|
||||
|
||||
ConcurrentG1RefineThread** _threads;
|
||||
uint _n_threads;
|
||||
uint _n_worker_threads;
|
||||
/*
|
||||
* The value of the update buffer queue length falls into one of 3 zones:
|
||||
|
@ -91,8 +93,8 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
|||
// Iterate over all worker refinement threads
|
||||
void worker_threads_do(ThreadClosure * tc);
|
||||
|
||||
// The RS sampling thread
|
||||
ConcurrentG1RefineThread * sampling_thread() const;
|
||||
// The RS sampling thread has nothing to do with refinement, but is here for now.
|
||||
G1YoungRemSetSamplingThread * sampling_thread() const { return _sample_thread; }
|
||||
|
||||
static uint thread_num();
|
||||
|
||||
|
@ -106,7 +108,6 @@ class ConcurrentG1Refine: public CHeapObj<mtGC> {
|
|||
int yellow_zone() const { return _yellow_zone; }
|
||||
int red_zone() const { return _red_zone; }
|
||||
|
||||
uint total_thread_num() const { return _n_threads; }
|
||||
uint worker_thread_num() const { return _n_worker_threads; }
|
||||
|
||||
int thread_threshold_step() const { return _thread_threshold_step; }
|
||||
|
|
|
@ -50,9 +50,8 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
|
|||
// Each thread has its own monitor. The i-th thread is responsible for signaling
|
||||
// to thread i+1 if the number of buffers in the queue exceeds a threshold for this
|
||||
// thread. Monitors are also used to wake up the threads during termination.
|
||||
// The 0th worker in notified by mutator threads and has a special monitor.
|
||||
// The last worker is used for young gen rset size sampling.
|
||||
if (worker_id > 0) {
|
||||
// The 0th (primary) worker is notified by mutator threads and has a special monitor.
|
||||
if (!is_primary()) {
|
||||
_monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true,
|
||||
Monitor::_safepoint_check_never);
|
||||
} else {
|
||||
|
@ -66,61 +65,11 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
|
|||
}
|
||||
|
||||
void ConcurrentG1RefineThread::initialize() {
|
||||
if (_worker_id < cg1r()->worker_thread_num()) {
|
||||
// Current thread activation threshold
|
||||
_threshold = MIN2<int>(cg1r()->thread_threshold_step() * (_worker_id + 1) + cg1r()->green_zone(),
|
||||
cg1r()->yellow_zone());
|
||||
// A thread deactivates once the number of buffer reached a deactivation threshold
|
||||
_deactivation_threshold = MAX2<int>(_threshold - cg1r()->thread_threshold_step(), cg1r()->green_zone());
|
||||
} else {
|
||||
set_active(true);
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::sample_young_list_rs_lengths() {
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
if (g1p->adaptive_young_list_length()) {
|
||||
int regions_visited = 0;
|
||||
g1h->young_list()->rs_length_sampling_init();
|
||||
while (g1h->young_list()->rs_length_sampling_more()) {
|
||||
g1h->young_list()->rs_length_sampling_next();
|
||||
++regions_visited;
|
||||
|
||||
// we try to yield every time we visit 10 regions
|
||||
if (regions_visited == 10) {
|
||||
if (sts_join.should_yield()) {
|
||||
sts_join.yield();
|
||||
// we just abandon the iteration
|
||||
break;
|
||||
}
|
||||
regions_visited = 0;
|
||||
}
|
||||
}
|
||||
|
||||
g1p->revise_young_list_target_length_if_necessary();
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::run_young_rs_sampling() {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
_vtime_start = os::elapsedVTime();
|
||||
while(!_should_terminate) {
|
||||
sample_young_list_rs_lengths();
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
_vtime_accum = (os::elapsedVTime() - _vtime_start);
|
||||
} else {
|
||||
_vtime_accum = 0.0;
|
||||
}
|
||||
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
if (_should_terminate) {
|
||||
break;
|
||||
}
|
||||
_monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
|
||||
|
@ -133,12 +82,12 @@ void ConcurrentG1RefineThread::wait_for_completed_buffers() {
|
|||
|
||||
bool ConcurrentG1RefineThread::is_active() {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
return _worker_id > 0 ? _active : dcqs.process_completed_buffers();
|
||||
return is_primary() ? dcqs.process_completed_buffers() : _active;
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::activate() {
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
if (_worker_id > 0) {
|
||||
if (!is_primary()) {
|
||||
if (G1TraceConcRefinement) {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
|
||||
|
@ -154,7 +103,7 @@ void ConcurrentG1RefineThread::activate() {
|
|||
|
||||
void ConcurrentG1RefineThread::deactivate() {
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
if (_worker_id > 0) {
|
||||
if (!is_primary()) {
|
||||
if (G1TraceConcRefinement) {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
|
||||
|
@ -171,25 +120,24 @@ void ConcurrentG1RefineThread::run() {
|
|||
initialize_in_thread();
|
||||
wait_for_universe_init();
|
||||
|
||||
if (_worker_id >= cg1r()->worker_thread_num()) {
|
||||
run_young_rs_sampling();
|
||||
run_service();
|
||||
|
||||
terminate();
|
||||
return;
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::run_service() {
|
||||
_vtime_start = os::elapsedVTime();
|
||||
while (!_should_terminate) {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
|
||||
while (!_should_terminate) {
|
||||
// Wait for work
|
||||
wait_for_completed_buffers();
|
||||
|
||||
if (_should_terminate) {
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
|
||||
do {
|
||||
int curr_buffer_num = (int)dcqs.completed_buffers_num();
|
||||
|
@ -199,7 +147,7 @@ void ConcurrentG1RefineThread::run() {
|
|||
dcqs.set_completed_queue_padding(0);
|
||||
}
|
||||
|
||||
if (_worker_id > 0 && curr_buffer_num <= _deactivation_threshold) {
|
||||
if (!is_primary() && curr_buffer_num <= _deactivation_threshold) {
|
||||
// If the number of the buffer has fallen below our threshold
|
||||
// we should deactivate. The predecessor will reactivate this
|
||||
// thread should the number of the buffers cross the threshold again.
|
||||
|
@ -225,8 +173,10 @@ void ConcurrentG1RefineThread::run() {
|
|||
_vtime_accum = 0.0;
|
||||
}
|
||||
}
|
||||
assert(_should_terminate, "just checking");
|
||||
terminate();
|
||||
|
||||
if (G1TraceConcRefinement) {
|
||||
gclog_or_tty->print_cr("G1-Refine-stop");
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::stop() {
|
||||
|
@ -236,10 +186,7 @@ void ConcurrentG1RefineThread::stop() {
|
|||
_should_terminate = true;
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
_monitor->notify();
|
||||
}
|
||||
stop_service();
|
||||
|
||||
{
|
||||
MutexLockerEx mu(Terminator_lock);
|
||||
|
@ -247,8 +194,9 @@ void ConcurrentG1RefineThread::stop() {
|
|||
Terminator_lock->wait();
|
||||
}
|
||||
}
|
||||
if (G1TraceConcRefinement) {
|
||||
gclog_or_tty->print_cr("G1-Refine-stop");
|
||||
}
|
||||
}
|
||||
|
||||
void ConcurrentG1RefineThread::stop_service() {
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
_monitor->notify();
|
||||
}
|
|
@ -31,14 +31,14 @@
|
|||
class CardTableEntryClosure;
|
||||
class ConcurrentG1Refine;
|
||||
|
||||
// The G1 Concurrent Refinement Thread (could be several in the future).
|
||||
|
||||
// One or more G1 Concurrent Refinement Threads may be active if concurrent
|
||||
// refinement is in progress.
|
||||
class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
||||
friend class VMStructs;
|
||||
friend class G1CollectedHeap;
|
||||
|
||||
double _vtime_start; // Initial virtual time.
|
||||
double _vtime_accum; // Initial virtual time.
|
||||
double _vtime_accum; // Accumulated virtual time.
|
||||
uint _worker_id;
|
||||
uint _worker_id_offset;
|
||||
|
||||
|
@ -59,8 +59,6 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
|||
// This thread deactivation threshold
|
||||
int _deactivation_threshold;
|
||||
|
||||
void sample_young_list_rs_lengths();
|
||||
void run_young_rs_sampling();
|
||||
void wait_for_completed_buffers();
|
||||
|
||||
void set_active(bool x) { _active = x; }
|
||||
|
@ -68,6 +66,11 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
|
|||
void activate();
|
||||
void deactivate();
|
||||
|
||||
bool is_primary() { return (_worker_id == 0); }
|
||||
|
||||
void run_service();
|
||||
void stop_service();
|
||||
|
||||
public:
|
||||
virtual void run();
|
||||
// Constructor
|
||||
|
|
|
@ -92,15 +92,31 @@ void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...
|
|||
}
|
||||
}
|
||||
|
||||
// Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
|
||||
void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
|
||||
if (g1_policy->adaptive_young_list_length()) {
|
||||
double now = os::elapsedTime();
|
||||
double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
|
||||
: g1_policy->predict_cleanup_time_ms();
|
||||
G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
|
||||
jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
|
||||
os::sleep(this, sleep_time_ms, false);
|
||||
}
|
||||
}
|
||||
void ConcurrentMarkThread::run() {
|
||||
initialize_in_thread();
|
||||
_vtime_start = os::elapsedVTime();
|
||||
wait_for_universe_init();
|
||||
|
||||
run_service();
|
||||
|
||||
terminate();
|
||||
}
|
||||
|
||||
void ConcurrentMarkThread::run_service() {
|
||||
_vtime_start = os::elapsedVTime();
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1_policy = g1h->g1_policy();
|
||||
G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
|
||||
Thread *current_thread = Thread::current();
|
||||
|
||||
while (!_should_terminate) {
|
||||
// wait until started is set.
|
||||
|
@ -141,12 +157,7 @@ void ConcurrentMarkThread::run() {
|
|||
double mark_end_sec = os::elapsedTime();
|
||||
_vtime_mark_accum += (mark_end_time - cycle_start);
|
||||
if (!cm()->has_aborted()) {
|
||||
if (g1_policy->adaptive_young_list_length()) {
|
||||
double now = os::elapsedTime();
|
||||
double remark_prediction_ms = g1_policy->predict_remark_time_ms();
|
||||
jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms);
|
||||
os::sleep(current_thread, sleep_time_ms, false);
|
||||
}
|
||||
delay_to_keep_mmu(g1_policy, true /* remark */);
|
||||
|
||||
cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec);
|
||||
|
||||
|
@ -167,12 +178,7 @@ void ConcurrentMarkThread::run() {
|
|||
_vtime_accum = (end_time - _vtime_start);
|
||||
|
||||
if (!cm()->has_aborted()) {
|
||||
if (g1_policy->adaptive_young_list_length()) {
|
||||
double now = os::elapsedTime();
|
||||
double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms();
|
||||
jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms);
|
||||
os::sleep(current_thread, sleep_time_ms, false);
|
||||
}
|
||||
delay_to_keep_mmu(g1_policy, false /* cleanup */);
|
||||
|
||||
CMCleanUp cl_cl(_cm);
|
||||
VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
|
||||
|
@ -272,9 +278,6 @@ void ConcurrentMarkThread::run() {
|
|||
g1h->register_concurrent_cycle_end();
|
||||
}
|
||||
}
|
||||
assert(_should_terminate, "just checking");
|
||||
|
||||
terminate();
|
||||
}
|
||||
|
||||
void ConcurrentMarkThread::stop() {
|
||||
|
@ -283,10 +286,7 @@ void ConcurrentMarkThread::stop() {
|
|||
_should_terminate = true;
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
|
||||
CGC_lock->notify_all();
|
||||
}
|
||||
stop_service();
|
||||
|
||||
{
|
||||
MutexLockerEx ml(Terminator_lock);
|
||||
|
@ -296,6 +296,11 @@ void ConcurrentMarkThread::stop() {
|
|||
}
|
||||
}
|
||||
|
||||
void ConcurrentMarkThread::stop_service() {
|
||||
MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
|
||||
CGC_lock->notify_all();
|
||||
}
|
||||
|
||||
void ConcurrentMarkThread::sleepBeforeNextCycle() {
|
||||
// We join here because we don't want to do the "shouldConcurrentMark()"
|
||||
// below while the world is otherwise stopped.
|
||||
|
|
|
@ -27,11 +27,11 @@
|
|||
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
|
||||
// The Concurrent Mark GC Thread (could be several in the future).
|
||||
// This is copied from the Concurrent Mark Sweep GC Thread
|
||||
// Still under construction.
|
||||
// The Concurrent Mark GC Thread triggers the parallel CMConcurrentMarkingTasks
|
||||
// as well as handling various marking cleanup.
|
||||
|
||||
class ConcurrentMark;
|
||||
class G1CollectorPolicy;
|
||||
|
||||
class ConcurrentMarkThread: public ConcurrentGCThread {
|
||||
friend class VMStructs;
|
||||
|
@ -57,6 +57,10 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
|||
volatile State _state;
|
||||
|
||||
void sleepBeforeNextCycle();
|
||||
void delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark);
|
||||
|
||||
void run_service();
|
||||
void stop_service();
|
||||
|
||||
static SurrogateLockerThread* _slt;
|
||||
|
||||
|
@ -67,9 +71,9 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
|
|||
static void makeSurrogateLockerThread(TRAPS);
|
||||
static SurrogateLockerThread* slt() { return _slt; }
|
||||
|
||||
// Total virtual time so far.
|
||||
// Total virtual time so far for this thread and concurrent marking tasks.
|
||||
double vtime_accum();
|
||||
// Marking virtual time so far
|
||||
// Marking virtual time so far this thread and concurrent marking tasks.
|
||||
double vtime_mark_accum();
|
||||
|
||||
ConcurrentMark* cm() { return _cm; }
|
||||
|
|
122
hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp
Normal file
122
hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp
Normal file
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
|
||||
#include "gc/g1/suspendibleThreadSet.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
void G1YoungRemSetSamplingThread::run() {
|
||||
initialize_in_thread();
|
||||
wait_for_universe_init();
|
||||
|
||||
run_service();
|
||||
|
||||
terminate();
|
||||
}
|
||||
|
||||
void G1YoungRemSetSamplingThread::stop() {
|
||||
// it is ok to take late safepoints here, if needed
|
||||
{
|
||||
MutexLockerEx mu(Terminator_lock);
|
||||
_should_terminate = true;
|
||||
}
|
||||
|
||||
stop_service();
|
||||
|
||||
{
|
||||
MutexLockerEx mu(Terminator_lock);
|
||||
while (!_has_terminated) {
|
||||
Terminator_lock->wait();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() : ConcurrentGCThread() {
|
||||
_monitor = new Monitor(Mutex::nonleaf,
|
||||
"G1YoungRemSetSamplingThread monitor",
|
||||
true,
|
||||
Monitor::_safepoint_check_never);
|
||||
|
||||
create_and_start();
|
||||
|
||||
set_name("G1 Young RemSet Sampling");
|
||||
}
|
||||
|
||||
void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
if (!_should_terminate) {
|
||||
intx waitms = G1ConcRefinementServiceIntervalMillis; // 300, really should be?
|
||||
_monitor->wait(Mutex::_no_safepoint_check_flag, waitms);
|
||||
}
|
||||
}
|
||||
|
||||
void G1YoungRemSetSamplingThread::run_service() {
|
||||
double vtime_start = os::elapsedVTime();
|
||||
|
||||
while (!_should_terminate) {
|
||||
sample_young_list_rs_lengths();
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
_vtime_accum = (os::elapsedVTime() - vtime_start);
|
||||
} else {
|
||||
_vtime_accum = 0.0;
|
||||
}
|
||||
|
||||
sleep_before_next_cycle();
|
||||
}
|
||||
}
|
||||
|
||||
void G1YoungRemSetSamplingThread::stop_service() {
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
_monitor->notify();
|
||||
}
|
||||
|
||||
void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
|
||||
SuspendibleThreadSetJoiner sts;
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
if (g1p->adaptive_young_list_length()) {
|
||||
int regions_visited = 0;
|
||||
g1h->young_list()->rs_length_sampling_init();
|
||||
while (g1h->young_list()->rs_length_sampling_more()) {
|
||||
g1h->young_list()->rs_length_sampling_next();
|
||||
++regions_visited;
|
||||
|
||||
// we try to yield every time we visit 10 regions
|
||||
if (regions_visited == 10) {
|
||||
if (sts.should_yield()) {
|
||||
sts.yield();
|
||||
// we just abandon the iteration
|
||||
break;
|
||||
}
|
||||
regions_visited = 0;
|
||||
}
|
||||
}
|
||||
|
||||
g1p->revise_young_list_target_length_if_necessary();
|
||||
}
|
||||
}
|
63
hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp
Normal file
63
hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.hpp
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP
|
||||
#define SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP
|
||||
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
|
||||
// The G1YoungRemSetSamplingThread is used to re-assess the validity of
|
||||
// the prediction for the remembered set lengths of the young generation.
|
||||
//
|
||||
// At the end of the GC G1 determines the length of the young gen based on
|
||||
// how much time the next GC can take, and when the next GC may occur
|
||||
// according to the MMU.
|
||||
//
|
||||
// The assumption is that a significant part of the GC is spent on scanning
|
||||
// the remembered sets (and many other components), so this thread constantly
|
||||
// reevaluates the prediction for the remembered set scanning costs, and potentially
|
||||
// G1CollectorPolicy resizes the young gen. This may do a premature GC or even
|
||||
// increase the young gen size to keep pause time length goal.
|
||||
class G1YoungRemSetSamplingThread: public ConcurrentGCThread {
|
||||
private:
|
||||
Monitor* _monitor;
|
||||
|
||||
void sample_young_list_rs_lengths();
|
||||
|
||||
void run_service();
|
||||
void stop_service();
|
||||
|
||||
void sleep_before_next_cycle();
|
||||
|
||||
double _vtime_accum; // Accumulated virtual time.
|
||||
|
||||
public:
|
||||
G1YoungRemSetSamplingThread();
|
||||
double vtime_accum() { return _vtime_accum; }
|
||||
|
||||
virtual void run();
|
||||
void stop();
|
||||
};
|
||||
|
||||
#endif /* SHARE_VM_GC_G1_G1YOUNGREMSETSAMPLINGTHREAD_HPP */
|
|
@ -66,6 +66,7 @@ void ConcurrentGCThread::wait_for_universe_init() {
|
|||
}
|
||||
|
||||
void ConcurrentGCThread::terminate() {
|
||||
assert(_should_terminate, "Should only be called on terminate request.");
|
||||
// Signal that it is terminated
|
||||
{
|
||||
MutexLockerEx mu(Terminator_lock,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue