mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-26 14:24:46 +02:00
6690928: Use spinning in combination with yields for workstealing termination
Substitute a spin loop for most calls to yield() to reduce the stress on the system. Reviewed-by: tonyp
This commit is contained in:
parent
61181babf1
commit
3b5d6a04dd
9 changed files with 116 additions and 4 deletions
|
@ -25,6 +25,12 @@
|
|||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_taskqueue.cpp.incl"
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
uint ParallelTaskTerminator::_total_yields = 0;
|
||||
uint ParallelTaskTerminator::_total_spins = 0;
|
||||
uint ParallelTaskTerminator::_total_peeks = 0;
|
||||
#endif
|
||||
|
||||
bool TaskQueueSuper::peek() {
|
||||
return _bottom != _age.top();
|
||||
}
|
||||
|
@ -70,14 +76,61 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||
Atomic::inc(&_offered_termination);
|
||||
|
||||
uint yield_count = 0;
|
||||
// Number of hard spin loops done since last yield
|
||||
uint hard_spin_count = 0;
|
||||
// Number of iterations in the hard spin loop.
|
||||
uint hard_spin_limit = WorkStealingHardSpins;
|
||||
|
||||
// If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
|
||||
// If it is greater than 0, then start with a small number
|
||||
// of spins and increase number with each turn at spinning until
|
||||
// the count of hard spins exceeds WorkStealingSpinToYieldRatio.
|
||||
// Then do a yield() call and start spinning afresh.
|
||||
if (WorkStealingSpinToYieldRatio > 0) {
|
||||
hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
|
||||
hard_spin_limit = MAX2(hard_spin_limit, 1U);
|
||||
}
|
||||
// Remember the initial spin limit.
|
||||
uint hard_spin_start = hard_spin_limit;
|
||||
|
||||
// Loop waiting for all threads to offer termination or
|
||||
// more work.
|
||||
while (true) {
|
||||
// Are all threads offering termination?
|
||||
if (_offered_termination == _n_threads) {
|
||||
//inner_termination_loop();
|
||||
return true;
|
||||
} else {
|
||||
// Look for more work.
|
||||
// Periodically sleep() instead of yield() to give threads
|
||||
// waiting on the cores the chance to grab this code
|
||||
if (yield_count <= WorkStealingYieldsBeforeSleep) {
|
||||
// Do a yield or hardspin. For purposes of deciding whether
|
||||
// to sleep, count this as a yield.
|
||||
yield_count++;
|
||||
yield();
|
||||
|
||||
// Periodically call yield() instead spinning
|
||||
// After WorkStealingSpinToYieldRatio spins, do a yield() call
|
||||
// and reset the counts and starting limit.
|
||||
if (hard_spin_count > WorkStealingSpinToYieldRatio) {
|
||||
yield();
|
||||
hard_spin_count = 0;
|
||||
hard_spin_limit = hard_spin_start;
|
||||
#ifdef TRACESPINNING
|
||||
_total_yields++;
|
||||
#endif
|
||||
} else {
|
||||
// Hard spin this time
|
||||
// Increase the hard spinning period but only up to a limit.
|
||||
hard_spin_limit = MIN2(2*hard_spin_limit,
|
||||
(uint) WorkStealingHardSpins);
|
||||
for (uint j = 0; j < hard_spin_limit; j++) {
|
||||
SpinPause();
|
||||
}
|
||||
hard_spin_count++;
|
||||
#ifdef TRACESPINNING
|
||||
_total_spins++;
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
if (PrintGCDetails && Verbose) {
|
||||
gclog_or_tty->print_cr("ParallelTaskTerminator::offer_termination() "
|
||||
|
@ -92,6 +145,9 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||
sleep(WorkStealingSleepMillis);
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
_total_peeks++;
|
||||
#endif
|
||||
if (peek_in_queue_set() ||
|
||||
(terminator != NULL && terminator->should_exit_termination())) {
|
||||
Atomic::dec(&_offered_termination);
|
||||
|
@ -101,6 +157,16 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef TRACESPINNING
|
||||
void ParallelTaskTerminator::print_termination_counts() {
|
||||
gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: %lld "
|
||||
"Total spins: %lld Total peeks: %lld",
|
||||
total_yields(),
|
||||
total_spins(),
|
||||
total_peeks());
|
||||
}
|
||||
#endif
|
||||
|
||||
void ParallelTaskTerminator::reset_for_reuse() {
|
||||
if (_offered_termination != 0) {
|
||||
assert(_offered_termination == _n_threads,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue