8276901: Implement UseHeavyMonitors consistently

Reviewed-by: coleenp, mdoerr, dcubed
This commit is contained in:
Roman Kennke 2021-12-07 14:41:42 +00:00
parent 69d8669fb3
commit 5b81d5eeb4
19 changed files with 428 additions and 304 deletions

View file

@ -3906,6 +3906,7 @@ encode %{
// Check for existing monitor
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
if (!UseHeavyMonitors) {
// Set tmp to be (markWord of object | UNLOCK_VALUE).
__ orr(tmp, disp_hdr, markWord::unlocked_value);
@ -3936,7 +3937,9 @@ encode %{
// displaced header in the box, which indicates that it is a recursive lock.
__ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
__ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
} else {
__ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
}
__ b(cont);
// Handle existing monitor.
@ -3982,23 +3985,29 @@ encode %{
assert_different_registers(oop, box, tmp, disp_hdr);
if (!UseHeavyMonitors) {
// Find the lock address and load the displaced header from the stack.
__ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
// If the displaced header is 0, we have a recursive unlock.
__ cmp(disp_hdr, zr);
__ br(Assembler::EQ, cont);
}
// Handle existing monitor.
__ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
__ tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
if (!UseHeavyMonitors) {
// Check if it is still a light weight lock, this is is true if we
// see the stack address of the basicLock in the markWord of the
// object.
__ cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
/*release*/ true, /*weak*/ false, tmp);
} else {
__ tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
}
__ b(cont);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");

View file

@ -438,7 +438,11 @@ int LIR_Assembler::emit_unwind_handler() {
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::r0_opr);
stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
if (UseHeavyMonitors) {
__ b(*stub->entry());
} else {
__ unlock_object(r5, r4, r0, *stub->entry());
}
__ bind(*stub->continuation());
}
@ -2562,7 +2566,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register(); // may not be an oop
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
if (!UseFastLocking) {
if (UseHeavyMonitors) {
__ b(*op->stub()->entry());
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");

View file

@ -1642,6 +1642,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ ldr(obj_reg, Address(oop_handle_reg, 0));
if (!UseHeavyMonitors) {
// Load (object->mark() | 1) into swap_reg %r0
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ orr(swap_reg, rscratch1, 1);
@ -1672,9 +1673,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Save the test result, for recursive case, the result is zero
__ str(swap_reg, Address(lock_reg, mark_word_offset));
__ br(Assembler::NE, slow_path_lock);
} else {
__ b(slow_path_lock);
}
// Slow path will re-enter here
__ bind(lock_done);
}
@ -1775,8 +1778,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ ldr(obj_reg, Address(oop_handle_reg, 0));
Label done;
// Simple recursive lock?
if (!UseHeavyMonitors) {
// Simple recursive lock?
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
__ cbz(rscratch1, done);
@ -1795,6 +1799,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label succeed;
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
__ bind(succeed);
} else {
__ b(slow_path_unlock);
}
// slow path re-enters here
__ bind(unlock_done);

View file

@ -2425,7 +2425,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register hdr = op->hdr_opr()->as_pointer_register();
Register lock = op->lock_opr()->as_pointer_register();
if (!UseFastLocking) {
if (UseHeavyMonitors) {
__ b(*op->stub()->entry());
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");

View file

@ -2689,7 +2689,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
// Obj may not be an oop.
if (op->code() == lir_lock) {
MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
if (UseFastLocking) {
if (!UseHeavyMonitors) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
// Add debug info for NullPointerException only if one is possible.
if (op->info() != NULL) {
@ -2711,7 +2711,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
}
} else {
assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
if (UseFastLocking) {
if (!UseHeavyMonitors) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
} else {

View file

@ -2660,6 +2660,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
andi_(temp, displaced_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
if (!UseHeavyMonitors) {
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markWord::unlocked_value);
@ -2681,6 +2682,10 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
&cas_failed,
/*check without membar and ldarx first*/true);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
} else {
// Set NE to indicate 'failure' -> take slow-path.
crandc(flag, Assembler::equal, flag, Assembler::equal);
}
// If the compare-and-exchange succeeded, then we found an unlocked
// object and we have now locked it.
@ -2768,12 +2773,14 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
}
#endif
if (!UseHeavyMonitors) {
// Find the lock address and load the displaced header from the stack.
ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
// If the displaced header is 0, we have a recursive unlock.
cmpdi(flag, displaced_header, 0);
beq(flag, cont);
}
// Handle existing monitor.
// The object has an existing monitor iff (mark & monitor_value) != 0.
@ -2782,6 +2789,7 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
andi_(R0, current_header, markWord::monitor_value);
bne(CCR0, object_has_monitor);
if (!UseHeavyMonitors) {
// Check if it is still a light weight lock, this is is true if we see
// the stack address of the basicLock in the markWord of the object.
// Cmpxchg sets flag to cmpd(current_header, box).
@ -2794,8 +2802,11 @@ void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Registe
MacroAssembler::cmpxchgx_hint_release_lock(),
noreg,
&cont);
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
} else {
// Set NE to indicate 'failure' -> take slow-path.
crandc(flag, Assembler::equal, flag, Assembler::equal);
}
// Handle existing monitor.
b(cont);

View file

@ -2730,7 +2730,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register(); // May not be an oop.
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
if (!UseFastLocking) {
if (UseHeavyMonitors) {
__ branch_optimized(Assembler::bcondAlways, *op->stub()->entry());
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");

View file

@ -461,7 +461,11 @@ int LIR_Assembler::emit_unwind_handler() {
if (method()->is_synchronized()) {
monitor_address(0, FrameMap::rax_opr);
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
if (UseHeavyMonitors) {
__ jmp(*stub->entry());
} else {
__ unlock_object(rdi, rsi, rax, *stub->entry());
}
__ bind(*stub->continuation());
}
@ -3498,7 +3502,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
Register obj = op->obj_opr()->as_register(); // may not be an oop
Register hdr = op->hdr_opr()->as_register();
Register lock = op->lock_opr()->as_register();
if (!UseFastLocking) {
if (UseHeavyMonitors) {
__ jmp(*op->stub()->entry());
} else if (op->code() == lir_lock) {
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");

View file

@ -485,6 +485,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
#if INCLUDE_RTM_OPT
if (UseRTMForStackLocks && use_rtm) {
assert(!UseHeavyMonitors, "+UseHeavyMonitors and +UseRTMForStackLocks are mutually exclusive");
rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
stack_rtm_counters, method_data, profile_rtm,
DONE_LABEL, IsInflated);
@ -495,6 +496,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral
jccb(Assembler::notZero, IsInflated);
if (!UseHeavyMonitors) {
// Attempt stack-locking ...
orptr (tmpReg, markWord::unlocked_value);
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
@ -509,6 +511,10 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
movptr(Address(boxReg, 0), tmpReg);
} else {
// Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
testptr(objReg, objReg);
}
jmp(DONE_LABEL);
bind(IsInflated);
@ -638,6 +644,7 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
#if INCLUDE_RTM_OPT
if (UseRTMForStackLocks && use_rtm) {
assert(!UseHeavyMonitors, "+UseHeavyMonitors and +UseRTMForStackLocks are mutually exclusive");
Label L_regular_unlock;
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword
andptr(tmpReg, markWord::lock_mask_in_place); // look at 2 lock bits
@ -649,11 +656,15 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
}
#endif
if (!UseHeavyMonitors) {
cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
}
movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword
if (!UseHeavyMonitors) {
testptr(tmpReg, markWord::monitor_value); // Inflated?
jccb (Assembler::zero, Stacked);
}
// It's inflated.
#if INCLUDE_RTM_OPT
@ -795,11 +806,12 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
testl (boxReg, 0); // set ICC.ZF=1 to indicate success
jmpb (DONE_LABEL);
if (!UseHeavyMonitors) {
bind (Stacked);
movptr(tmpReg, Address (boxReg, 0)); // re-fetch
lock();
cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
}
#endif
bind(DONE_LABEL);
}

View file

@ -1705,6 +1705,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ movptr(obj_reg, Address(oop_handle_reg, 0));
if (!UseHeavyMonitors) {
// Load immediate 1 into swap_reg %rax,
__ movptr(swap_reg, 1);
@ -1735,6 +1736,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Save the test result, for recursive case, the result is zero
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
__ jmp(slow_path_lock);
}
// Slow path will re-enter here
__ bind(lock_done);
}
@ -1852,6 +1857,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Get locked oop from the handle we passed to jni
__ movptr(obj_reg, Address(oop_handle_reg, 0));
if (!UseHeavyMonitors) {
// Simple recursive lock?
__ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
@ -1874,6 +1880,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ lock();
__ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ jcc(Assembler::notEqual, slow_path_unlock);
} else {
__ jmp(slow_path_unlock);
}
// slow path re-enters here
__ bind(unlock_done);

View file

@ -1918,6 +1918,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Load the oop from the handle
__ movptr(obj_reg, Address(oop_handle_reg, 0));
if (!UseHeavyMonitors) {
// Load immediate 1 into swap_reg %rax
__ movl(swap_reg, 1);
@ -1949,6 +1950,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Save the test result, for recursive case, the result is zero
__ movptr(Address(lock_reg, mark_word_offset), swap_reg);
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
__ jmp(slow_path_lock);
}
// Slow path will re-enter here
@ -2055,8 +2059,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ movptr(obj_reg, Address(oop_handle_reg, 0));
Label done;
// Simple recursive lock?
if (!UseHeavyMonitors) {
// Simple recursive lock?
__ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, done);
@ -2075,6 +2080,9 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ lock();
__ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ jcc(Assembler::notEqual, slow_path_unlock);
} else {
__ jmp(slow_path_unlock);
}
// slow path re-enters here
__ bind(unlock_done);

View file

@ -620,7 +620,7 @@ void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, L
// setup registers
LIR_Opr hdr = lock;
lock = new_hdr;
CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
CodeStub* slow_path = new MonitorExitStub(lock, !UseHeavyMonitors, monitor_no);
__ load_stack_address_monitor(monitor_no, lock);
__ unlock_object(hdr, object, lock, scratch, slow_path);
}

View file

@ -736,7 +736,7 @@ JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj,
_monitorenter_slowcase_cnt++;
}
#endif
if (!UseFastLocking) {
if (UseHeavyMonitors) {
lock->set_obj(obj);
}
assert(obj == lock->obj(), "must match");

View file

@ -242,9 +242,6 @@
develop(bool, UseFastNewObjectArray, true, \
"Use fast inlined object array allocation") \
\
develop(bool, UseFastLocking, true, \
"Use fast inlined locking code") \
\
develop(bool, UseSlowPath, false, \
"For debugging: test slow cases by always using them") \
\

View file

@ -532,6 +532,9 @@ static SpecialFlag const special_jvm_flags[] = {
{ "DynamicDumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
{ "RequireSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
{ "UseSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
#ifdef PRODUCT
{ "UseHeavyMonitors", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::jdk(20) },
#endif
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
@ -2018,6 +2021,20 @@ bool Arguments::check_vm_args_consistency() {
}
#endif
#if !defined(X86) && !defined(AARCH64) && !defined(PPC64)
if (UseHeavyMonitors) {
warning("UseHeavyMonitors is not fully implemented on this architecture");
}
#endif
#if defined(X86) || defined(PPC64)
if (UseHeavyMonitors && UseRTMForStackLocks) {
fatal("-XX:+UseHeavyMonitors and -XX:+UseRTMForStackLocks are mutually exclusive");
}
#endif
if (VerifyHeavyMonitors && !UseHeavyMonitors) {
fatal("-XX:+VerifyHeavyMonitors requires -XX:+UseHeavyMonitors");
}
return status;
}

View file

@ -1066,7 +1066,12 @@ const intx ObjectAlignmentInBytes = 8;
"If true, error data is printed to stdout instead of a file") \
\
product(bool, UseHeavyMonitors, false, \
"use heavyweight instead of lightweight Java monitors") \
"(Deprecated) Use heavyweight instead of lightweight Java " \
"monitors") \
\
develop(bool, VerifyHeavyMonitors, false, \
"Checks that no stack locking happens when using " \
"+UseHeavyMonitors") \
\
product(bool, PrintStringTableStatistics, false, \
"print statistics about the StringTable and SymbolTable") \

View file

@ -417,6 +417,14 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread
}
}
static bool useHeavyMonitors() {
#if defined(X86) || defined(AARCH64) || defined(PPC64)
return UseHeavyMonitors;
#else
return false;
#endif
}
// -----------------------------------------------------------------------------
// Monitor Enter/Exit
// The interpreter and compiler assembly code tries to lock using the fast path
@ -428,6 +436,7 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
handle_sync_on_value_based_class(obj, current);
}
if (!useHeavyMonitors()) {
markWord mark = obj->mark();
if (mark.is_neutral()) {
// Anticipate successful CAS -- the ST of the displaced mark must
@ -450,6 +459,10 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
// must be non-zero to avoid looking like a re-entrant lock,
// and must not look locked either.
lock->set_displaced_header(markWord::unused_mark());
} else if (VerifyHeavyMonitors) {
guarantee(!obj->mark().has_locker(), "must not be stack-locked");
}
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
@ -462,6 +475,7 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
}
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
if (!useHeavyMonitors()) {
markWord mark = object->mark();
markWord dhw = lock->displaced_header();
@ -502,6 +516,9 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current)
return;
}
}
} else if (VerifyHeavyMonitors) {
guarantee(!object->mark().has_locker(), "must not be stack-locked");
}
// We have to take the slow-path of possible inflation and then exit.
// The ObjectMonitor* can't be async deflated until ownership is
@ -804,7 +821,10 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
markWord temp, test;
intptr_t hash;
markWord mark = read_stable_mark(obj);
if (VerifyHeavyMonitors) {
assert(UseHeavyMonitors, "+VerifyHeavyMonitors requires +UseHeavyMonitors");
guarantee(!mark.has_locker(), "must not be stack locked");
}
if (mark.is_neutral()) { // if this is a normal header
hash = mark.hash();
if (hash != 0) { // if it has a hash, just return it

View file

@ -21,6 +21,10 @@
* questions.
*/
import java.util.Arrays;
import java.util.ArrayList;
import jdk.test.lib.Platform;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.cli.*;
@ -39,7 +43,10 @@ public class VMDeprecatedOptions {
* each entry is {[0]: option name, [1]: value to set
* (true/false/n/string)}.
*/
public static final String[][] DEPRECATED_OPTIONS = {
public static final String[][] DEPRECATED_OPTIONS;
static {
ArrayList<String[]> deprecated = new ArrayList(
Arrays.asList(new String[][] {
// deprecated non-alias flags:
{"MaxGCMinorPauseMillis", "1032"},
{"MaxRAMFraction", "8"},
@ -55,6 +62,12 @@ public class VMDeprecatedOptions {
// deprecated alias flags (see also aliased_jvm_flags):
{"DefaultMaxRAMFraction", "4"},
{"CreateMinidumpOnCrash", "false"}
}
));
if (!Platform.isDebugBuild()) {
deprecated.add(new String[]{"UseHeavyMonitors", "false"});
}
DEPRECATED_OPTIONS = deprecated.toArray(new String[][]{});
};
static String getDeprecationString(String optionName) {

View file

@ -45,6 +45,14 @@
* @run main/timeout=1600 MapLoops
*/
/*
* @test
* @summary Exercise multithreaded maps, using only heavy monitors.
* @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64" | os.arch == "ppc64" | os.arch == "ppc64le"
* @library /test/lib
* @run main/othervm/timeout=1600 -XX:+IgnoreUnrecognizedVMOptions -XX:+UseHeavyMonitors -XX:+VerifyHeavyMonitors MapLoops
*/
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.util.List;