mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8151670: Unexpected concurrent refinement deactivation and reactivation
Refinement threads now use SuspendibleThreadSet::yield. Reviewed-by: jmasa, mgerdin
This commit is contained in:
parent
6bf9194f41
commit
cb72aacabb
3 changed files with 66 additions and 40 deletions
|
@ -76,7 +76,6 @@ void ConcurrentG1RefineThread::initialize() {
|
|||
}
|
||||
|
||||
void ConcurrentG1RefineThread::wait_for_completed_buffers() {
|
||||
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
|
||||
MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
|
||||
while (!should_terminate() && !is_active()) {
|
||||
_monitor->wait(Mutex::_no_safepoint_check_flag);
|
||||
|
@ -126,7 +125,12 @@ void ConcurrentG1RefineThread::run_service() {
|
|||
{
|
||||
SuspendibleThreadSetJoiner sts_join;
|
||||
|
||||
do {
|
||||
while (!should_terminate()) {
|
||||
if (sts_join.should_yield()) {
|
||||
sts_join.yield();
|
||||
continue; // Re-check for termination after yield delay.
|
||||
}
|
||||
|
||||
size_t curr_buffer_num = dcqs.completed_buffers_num();
|
||||
// If the number of the buffers falls down into the yellow zone,
|
||||
// that means that the transition period after the evacuation pause has ended.
|
||||
|
@ -138,17 +142,23 @@ void ConcurrentG1RefineThread::run_service() {
|
|||
if (_next != NULL && !_next->is_active() && curr_buffer_num > _next->_threshold) {
|
||||
_next->activate();
|
||||
}
|
||||
} while (dcqs.apply_closure_to_completed_buffer(_refine_closure,
|
||||
_worker_id + _worker_id_offset,
|
||||
_deactivation_threshold,
|
||||
false /* during_pause */));
|
||||
|
||||
deactivate();
|
||||
log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
|
||||
_worker_id, _deactivation_threshold,
|
||||
dcqs.completed_buffers_num());
|
||||
// Process the next buffer, if there are enough left.
|
||||
if (!dcqs.apply_closure_to_completed_buffer(_refine_closure,
|
||||
_worker_id + _worker_id_offset,
|
||||
_deactivation_threshold,
|
||||
false /* during_pause */)) {
|
||||
break; // Deactivate, number of buffers fell below threshold.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deactivate();
|
||||
log_debug(gc, refine)("Deactivated %d, off threshold: " SIZE_FORMAT
|
||||
", current: " SIZE_FORMAT,
|
||||
_worker_id, _deactivation_threshold,
|
||||
dcqs.completed_buffers_num());
|
||||
|
||||
if (os::supports_vtime()) {
|
||||
_vtime_accum = (os::elapsedVTime() - _vtime_start);
|
||||
} else {
|
||||
|
|
|
@ -155,42 +155,52 @@ bool DirtyCardQueueSet::apply_closure_to_buffer(CardTableEntryClosure* cl,
|
|||
bool consume,
|
||||
uint worker_i) {
|
||||
if (cl == NULL) return true;
|
||||
bool result = true;
|
||||
void** buf = BufferNode::make_buffer_from_node(node);
|
||||
size_t limit = DirtyCardQueue::byte_index_to_index(buffer_size());
|
||||
size_t start = DirtyCardQueue::byte_index_to_index(node->index());
|
||||
for (size_t i = start; i < limit; ++i) {
|
||||
size_t i = DirtyCardQueue::byte_index_to_index(node->index());
|
||||
for ( ; i < limit; ++i) {
|
||||
jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
|
||||
assert(card_ptr != NULL, "invariant");
|
||||
if (!cl->do_card_ptr(card_ptr, worker_i)) {
|
||||
if (consume) {
|
||||
size_t new_index = DirtyCardQueue::index_to_byte_index(i + 1);
|
||||
assert(new_index <= buffer_size(), "invariant");
|
||||
node->set_index(new_index);
|
||||
}
|
||||
return false;
|
||||
result = false; // Incomplete processing.
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (consume) {
|
||||
node->set_index(buffer_size());
|
||||
size_t new_index = DirtyCardQueue::index_to_byte_index(i);
|
||||
assert(new_index <= buffer_size(), "invariant");
|
||||
node->set_index(new_index);
|
||||
}
|
||||
return true;
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifndef ASSERT
|
||||
#define assert_fully_consumed(node, buffer_size)
|
||||
#else
|
||||
#define assert_fully_consumed(node, buffer_size) \
|
||||
do { \
|
||||
size_t _afc_index = (node)->index(); \
|
||||
size_t _afc_size = (buffer_size); \
|
||||
assert(_afc_index == _afc_size, \
|
||||
"Buffer was not fully consumed as claimed: index: " \
|
||||
SIZE_FORMAT ", size: " SIZE_FORMAT, \
|
||||
_afc_index, _afc_size); \
|
||||
} while (0)
|
||||
#endif // ASSERT
|
||||
|
||||
bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
|
||||
guarantee(_free_ids != NULL, "must be");
|
||||
|
||||
// claim a par id
|
||||
uint worker_i = _free_ids->claim_par_id();
|
||||
uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
|
||||
bool result = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
|
||||
_free_ids->release_par_id(worker_i); // release the id
|
||||
|
||||
bool b = apply_closure_to_buffer(_mut_process_closure, node, true, worker_i);
|
||||
if (b) {
|
||||
if (result) {
|
||||
assert_fully_consumed(node, buffer_size());
|
||||
Atomic::inc(&_processed_buffers_mut);
|
||||
}
|
||||
|
||||
// release the id
|
||||
_free_ids->release_par_id(worker_i);
|
||||
|
||||
return b;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
@ -227,15 +237,16 @@ bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure*
|
|||
return false;
|
||||
} else {
|
||||
if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
|
||||
assert_fully_consumed(nd, buffer_size());
|
||||
// Done with fully processed buffer.
|
||||
deallocate_buffer(nd);
|
||||
Atomic::inc(&_processed_buffers_rs_thread);
|
||||
return true;
|
||||
} else {
|
||||
// Return partially processed buffer to the queue.
|
||||
guarantee(!during_pause, "Should never stop early");
|
||||
enqueue_complete_buffer(nd);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -82,7 +82,8 @@ class DirtyCardQueueSet: public PtrQueueSet {
|
|||
// returns true. Stops processing after the first closure
|
||||
// application that returns false, and returns false from this
|
||||
// function. If "consume" is true, the node's index is updated to
|
||||
// follow the last processed element.
|
||||
// exclude the processed elements, e.g. up to the element for which
|
||||
// the closure returned false.
|
||||
bool apply_closure_to_buffer(CardTableEntryClosure* cl,
|
||||
BufferNode* node,
|
||||
bool consume,
|
||||
|
@ -121,14 +122,18 @@ public:
|
|||
|
||||
static void handle_zero_index_for_thread(JavaThread* t);
|
||||
|
||||
// If there exists some completed buffer, pop it, then apply the
|
||||
// specified closure to its active elements. If all active elements
|
||||
// are processed, returns "true". If no completed buffers exist,
|
||||
// returns false. If a completed buffer exists, but is only
|
||||
// partially completed before a "yield" happens, the partially
|
||||
// completed buffer (with its index updated to exclude the processed
|
||||
// elements) is returned to the completed buffer set, and this call
|
||||
// returns false.
|
||||
// If there are more than stop_at completed buffers, pop one, apply
|
||||
// the specified closure to its active elements, and return true.
|
||||
// Otherwise return false.
|
||||
//
|
||||
// A completely processed buffer is freed. However, if a closure
|
||||
// invocation returns false, processing is stopped and the partially
|
||||
// processed buffer (with its index updated to exclude the processed
|
||||
// elements, e.g. up to the element for which the closure returned
|
||||
// false) is returned to the completed buffer set.
|
||||
//
|
||||
// If during_pause is true, stop_at must be zero, and the closure
|
||||
// must never return false.
|
||||
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
|
||||
uint worker_i,
|
||||
size_t stop_at,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue