mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-16 17:14:41 +02:00
8272797: Mutex with rank safepoint_check_never imply allow_vm_block
Reviewed-by: dholmes, pchilanomate
This commit is contained in:
parent
f11e099a14
commit
98b9d98032
40 changed files with 125 additions and 109 deletions
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
|
@ -46,7 +46,7 @@ void OSThread::pd_initialize() {
|
||||||
|
|
||||||
sigemptyset(&_caller_sigmask);
|
sigemptyset(&_caller_sigmask);
|
||||||
|
|
||||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
|
_startThread_lock = new Monitor(Mutex::event, "startThread_lock",
|
||||||
Monitor::_safepoint_check_never);
|
Monitor::_safepoint_check_never);
|
||||||
assert(_startThread_lock != NULL, "check");
|
assert(_startThread_lock != NULL, "check");
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -45,7 +45,7 @@ void OSThread::pd_initialize() {
|
||||||
|
|
||||||
sigemptyset(&_caller_sigmask);
|
sigemptyset(&_caller_sigmask);
|
||||||
|
|
||||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
|
_startThread_lock = new Monitor(Mutex::event, "startThread_lock",
|
||||||
Monitor::_safepoint_check_never);
|
Monitor::_safepoint_check_never);
|
||||||
assert(_startThread_lock !=NULL, "check");
|
assert(_startThread_lock !=NULL, "check");
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -40,7 +40,7 @@ void OSThread::pd_initialize() {
|
||||||
|
|
||||||
sigemptyset(&_caller_sigmask);
|
sigemptyset(&_caller_sigmask);
|
||||||
|
|
||||||
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
|
_startThread_lock = new Monitor(Mutex::event, "startThread_lock",
|
||||||
Monitor::_safepoint_check_never);
|
Monitor::_safepoint_check_never);
|
||||||
assert(_startThread_lock !=NULL, "check");
|
assert(_startThread_lock !=NULL, "check");
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,7 +133,7 @@ void ClassLoaderData::initialize_name(Handle class_loader) {
|
||||||
|
|
||||||
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder) :
|
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder) :
|
||||||
_metaspace(NULL),
|
_metaspace(NULL),
|
||||||
_metaspace_lock(new Mutex(Mutex::leaf+1, "Metaspace allocation lock", true,
|
_metaspace_lock(new Mutex(Mutex::leaf+1, "Metaspace allocation lock",
|
||||||
Mutex::_safepoint_check_never)),
|
Mutex::_safepoint_check_never)),
|
||||||
_unloading(false), _has_class_mirror_holder(has_class_mirror_holder),
|
_unloading(false), _has_class_mirror_holder(has_class_mirror_holder),
|
||||||
_modified_oops(true),
|
_modified_oops(true),
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -104,7 +104,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CompileTask() : _failure_reason(NULL), _failure_reason_on_C_heap(false) {
|
CompileTask() : _failure_reason(NULL), _failure_reason_on_C_heap(false) {
|
||||||
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
|
_lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock", Mutex::_safepoint_check_always);
|
||||||
}
|
}
|
||||||
|
|
||||||
void initialize(int compile_id, const methodHandle& method, int osr_bci, int comp_level,
|
void initialize(int compile_id, const methodHandle& method, int osr_bci, int comp_level,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -167,7 +167,7 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
|
||||||
MEMFLAGS type) :
|
MEMFLAGS type) :
|
||||||
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
|
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
|
||||||
_regions_per_page((page_size * commit_factor) / alloc_granularity),
|
_regions_per_page((page_size * commit_factor) / alloc_granularity),
|
||||||
_lock(Mutex::leaf, "G1 mapper lock", true, Mutex::_safepoint_check_never) {
|
_lock(Mutex::leaf, "G1 mapper lock", Mutex::_safepoint_check_never) {
|
||||||
|
|
||||||
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
|
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,6 @@ G1ServiceThread::G1ServiceThread() :
|
||||||
ConcurrentGCThread(),
|
ConcurrentGCThread(),
|
||||||
_monitor(Mutex::leaf,
|
_monitor(Mutex::leaf,
|
||||||
"G1ServiceThread monitor",
|
"G1ServiceThread monitor",
|
||||||
true,
|
|
||||||
Monitor::_safepoint_check_never),
|
Monitor::_safepoint_check_never),
|
||||||
_task_queue() {
|
_task_queue() {
|
||||||
set_name("G1 Service");
|
set_name("G1 Service");
|
||||||
|
|
|
@ -233,7 +233,7 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||||
_top(NULL),
|
_top(NULL),
|
||||||
_compaction_top(NULL),
|
_compaction_top(NULL),
|
||||||
_bot_part(bot, this),
|
_bot_part(bot, this),
|
||||||
_par_alloc_lock(Mutex::leaf, "HeapRegion par alloc lock", true),
|
_par_alloc_lock(Mutex::leaf, "HeapRegion par alloc lock", Mutex::_safepoint_check_always, true),
|
||||||
_pre_dummy_top(NULL),
|
_pre_dummy_top(NULL),
|
||||||
_rem_set(NULL),
|
_rem_set(NULL),
|
||||||
_hrm_index(hrm_index),
|
_hrm_index(hrm_index),
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -47,7 +47,7 @@ const char* HeapRegionRemSet::_short_state_strings[] = {"UNTRA", "UPDAT", "CMPL
|
||||||
|
|
||||||
HeapRegionRemSet::HeapRegionRemSet(HeapRegion* hr,
|
HeapRegionRemSet::HeapRegionRemSet(HeapRegion* hr,
|
||||||
G1CardSetConfiguration* config) :
|
G1CardSetConfiguration* config) :
|
||||||
_m(Mutex::leaf + 1, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
|
_m(Mutex::leaf + 1, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), Monitor::_safepoint_check_never),
|
||||||
_code_roots(),
|
_code_roots(),
|
||||||
_card_set_mm(config, G1CardSetFreePool::free_list_pool()),
|
_card_set_mm(config, G1CardSetFreePool::free_list_pool()),
|
||||||
_card_set(config, &_card_set_mm),
|
_card_set(config, &_card_set_mm),
|
||||||
|
|
|
@ -93,7 +93,7 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||||
_shadow_region_array = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t >(10, mtGC);
|
_shadow_region_array = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t >(10, mtGC);
|
||||||
|
|
||||||
_shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor",
|
_shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor",
|
||||||
Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
|
Monitor::_safepoint_check_never);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::reset_all_bitmap_query_caches() {
|
void ParCompactionManager::reset_all_bitmap_query_caches() {
|
||||||
|
|
|
@ -35,7 +35,6 @@ void GCLogPrecious::initialize() {
|
||||||
_temp = new (ResourceObj::C_HEAP, mtGC) stringStream();
|
_temp = new (ResourceObj::C_HEAP, mtGC) stringStream();
|
||||||
_lock = new Mutex(Mutex::event, /* The lowest lock rank I could find */
|
_lock = new Mutex(Mutex::event, /* The lowest lock rank I could find */
|
||||||
"GCLogPrecious Lock",
|
"GCLogPrecious Lock",
|
||||||
true,
|
|
||||||
Mutex::_safepoint_check_never);
|
Mutex::_safepoint_check_never);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -814,7 +814,7 @@ static Mutex* make_oopstorage_mutex(const char* storage_name,
|
||||||
int rank) {
|
int rank) {
|
||||||
char name[256];
|
char name[256];
|
||||||
os::snprintf(name, sizeof(name), "%s %s lock", storage_name, kind);
|
os::snprintf(name, sizeof(name), "%s %s lock", storage_name, kind);
|
||||||
return new PaddedMutex(rank, name, true, Mutex::_safepoint_check_never);
|
return new PaddedMutex(rank, name, Mutex::_safepoint_check_never);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* OopStorage::operator new(size_t size, MEMFLAGS memflags) {
|
void* OopStorage::operator new(size_t size, MEMFLAGS memflags) {
|
||||||
|
|
|
@ -777,7 +777,8 @@ HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end
|
||||||
OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
|
OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
|
||||||
MemRegion mr) :
|
MemRegion mr) :
|
||||||
_offsets(sharedOffsetArray, mr),
|
_offsets(sharedOffsetArray, mr),
|
||||||
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
|
_par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock",
|
||||||
|
Mutex::_safepoint_check_always, true)
|
||||||
{
|
{
|
||||||
_offsets.set_contig_space(this);
|
_offsets.set_contig_space(this);
|
||||||
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||||
|
|
|
@ -72,7 +72,7 @@ TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
|
||||||
_n_threads(n_threads),
|
_n_threads(n_threads),
|
||||||
_queue_set(queue_set),
|
_queue_set(queue_set),
|
||||||
_offered_termination(0),
|
_offered_termination(0),
|
||||||
_blocker(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never),
|
_blocker(Mutex::leaf, "TaskTerminator", Monitor::_safepoint_check_never),
|
||||||
_spin_master(NULL) { }
|
_spin_master(NULL) { }
|
||||||
|
|
||||||
TaskTerminator::~TaskTerminator() {
|
TaskTerminator::~TaskTerminator() {
|
||||||
|
|
|
@ -266,13 +266,13 @@ void GangWorker::loop() {
|
||||||
// *** WorkGangBarrierSync
|
// *** WorkGangBarrierSync
|
||||||
|
|
||||||
WorkGangBarrierSync::WorkGangBarrierSync()
|
WorkGangBarrierSync::WorkGangBarrierSync()
|
||||||
: _monitor(Mutex::safepoint, "work gang barrier sync", true,
|
: _monitor(Mutex::safepoint, "work gang barrier sync",
|
||||||
Monitor::_safepoint_check_never),
|
Monitor::_safepoint_check_never),
|
||||||
_n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) {
|
_n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) {
|
||||||
}
|
}
|
||||||
|
|
||||||
WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
|
WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
|
||||||
: _monitor(Mutex::safepoint, name, true, Monitor::_safepoint_check_never),
|
: _monitor(Mutex::safepoint, name, Monitor::_safepoint_check_never),
|
||||||
_n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) {
|
_n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,8 +47,8 @@
|
||||||
|
|
||||||
ShenandoahControlThread::ShenandoahControlThread() :
|
ShenandoahControlThread::ShenandoahControlThread() :
|
||||||
ConcurrentGCThread(),
|
ConcurrentGCThread(),
|
||||||
_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
|
_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", Monitor::_safepoint_check_always, true),
|
||||||
_gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
|
_gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", Monitor::_safepoint_check_always, true),
|
||||||
_periodic_task(this),
|
_periodic_task(this),
|
||||||
_requested_gc_cause(GCCause::_no_cause_specified),
|
_requested_gc_cause(GCCause::_no_cause_specified),
|
||||||
_degen_point(ShenandoahGC::_degenerated_outside_cycle),
|
_degen_point(ShenandoahGC::_degenerated_outside_cycle),
|
||||||
|
|
|
@ -67,7 +67,7 @@ public:
|
||||||
_heap(heap),
|
_heap(heap),
|
||||||
_last_time(os::elapsedTime()),
|
_last_time(os::elapsedTime()),
|
||||||
_progress_history(new TruncatedSeq(5)),
|
_progress_history(new TruncatedSeq(5)),
|
||||||
_wait_monitor(new Monitor(Mutex::leaf, "_wait_monitor", true, Monitor::_safepoint_check_always)),
|
_wait_monitor(new Monitor(Mutex::leaf, "_wait_monitor", Monitor::_safepoint_check_always, true)),
|
||||||
_epoch(0),
|
_epoch(0),
|
||||||
_tax_rate(1),
|
_tax_rate(1),
|
||||||
_budget(0),
|
_budget(0),
|
||||||
|
|
|
@ -68,7 +68,6 @@ template <typename T>
|
||||||
inline ZMessagePort<T>::ZMessagePort() :
|
inline ZMessagePort<T>::ZMessagePort() :
|
||||||
_monitor(Monitor::leaf,
|
_monitor(Monitor::leaf,
|
||||||
"ZMessagePort",
|
"ZMessagePort",
|
||||||
Monitor::_allow_vm_block_flag,
|
|
||||||
Monitor::_safepoint_check_never),
|
Monitor::_safepoint_check_never),
|
||||||
_has_message(false),
|
_has_message(false),
|
||||||
_seqnum(0),
|
_seqnum(0),
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -28,7 +28,7 @@
|
||||||
#include "utilities/ticks.hpp"
|
#include "utilities/ticks.hpp"
|
||||||
|
|
||||||
ZMetronome::ZMetronome(uint64_t hz) :
|
ZMetronome::ZMetronome(uint64_t hz) :
|
||||||
_monitor(Monitor::leaf, "ZMetronome", false, Monitor::_safepoint_check_never),
|
_monitor(Monitor::leaf, "ZMetronome", Monitor::_safepoint_check_never),
|
||||||
_interval_ms(MILLIUNITS / hz),
|
_interval_ms(MILLIUNITS / hz),
|
||||||
_start_ms(0),
|
_start_ms(0),
|
||||||
_nticks(0),
|
_nticks(0),
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -245,7 +245,7 @@ class ParHeapInspectTask : public AbstractGangTask {
|
||||||
_filter(filter),
|
_filter(filter),
|
||||||
_missed_count(0),
|
_missed_count(0),
|
||||||
_success(true),
|
_success(true),
|
||||||
_mutex(Mutex::leaf, "Parallel heap iteration data merge lock") {}
|
_mutex(Mutex::leaf, "Parallel heap iteration data merge lock", Mutex::_safepoint_check_always) {}
|
||||||
|
|
||||||
uintx missed_count() const {
|
uintx missed_count() const {
|
||||||
return _missed_count;
|
return _missed_count;
|
||||||
|
|
|
@ -92,7 +92,7 @@ MetaspaceTestContext::~MetaspaceTestContext() {
|
||||||
// Create an arena, feeding off this area.
|
// Create an arena, feeding off this area.
|
||||||
MetaspaceTestArena* MetaspaceTestContext::create_arena(Metaspace::MetaspaceType type) {
|
MetaspaceTestArena* MetaspaceTestContext::create_arena(Metaspace::MetaspaceType type) {
|
||||||
const ArenaGrowthPolicy* growth_policy = ArenaGrowthPolicy::policy_for_space_type(type, false);
|
const ArenaGrowthPolicy* growth_policy = ArenaGrowthPolicy::policy_for_space_type(type, false);
|
||||||
Mutex* lock = new Mutex(Monitor::leaf, "MetaspaceTestArea-lock", false, Monitor::_safepoint_check_never);
|
Mutex* lock = new Mutex(Monitor::leaf, "MetaspaceTestArea-lock", Monitor::_safepoint_check_never);
|
||||||
MetaspaceArena* arena = NULL;
|
MetaspaceArena* arena = NULL;
|
||||||
{
|
{
|
||||||
MutexLocker ml(lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker ml(lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
|
@ -1206,7 +1206,7 @@ void MethodData::post_initialize(BytecodeStream* stream) {
|
||||||
// Initialize the MethodData* corresponding to a given method.
|
// Initialize the MethodData* corresponding to a given method.
|
||||||
MethodData::MethodData(const methodHandle& method)
|
MethodData::MethodData(const methodHandle& method)
|
||||||
: _method(method()),
|
: _method(method()),
|
||||||
_extra_data_lock(Mutex::leaf, "MDO extra data lock"),
|
_extra_data_lock(Mutex::leaf, "MDO extra data lock", Mutex::_safepoint_check_always),
|
||||||
_compiler_counters(),
|
_compiler_counters(),
|
||||||
_parameters_type_data_di(parameters_uninitialized) {
|
_parameters_type_data_di(parameters_uninitialized) {
|
||||||
initialize();
|
initialize();
|
||||||
|
|
|
@ -72,8 +72,7 @@ bool JvmtiTagMap::_has_object_free_events = false;
|
||||||
// create a JvmtiTagMap
|
// create a JvmtiTagMap
|
||||||
JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
|
JvmtiTagMap::JvmtiTagMap(JvmtiEnv* env) :
|
||||||
_env(env),
|
_env(env),
|
||||||
_lock(Mutex::nonleaf+1, "JvmtiTagMap_lock", Mutex::_allow_vm_block_flag,
|
_lock(Mutex::nonleaf+1, "JvmtiTagMap_lock", Mutex::_safepoint_check_never),
|
||||||
Mutex::_safepoint_check_never),
|
|
||||||
_needs_rehashing(false),
|
_needs_rehashing(false),
|
||||||
_needs_cleaning(false) {
|
_needs_cleaning(false) {
|
||||||
|
|
||||||
|
|
|
@ -408,7 +408,7 @@ void Handshake::execute(AsyncHandshakeClosure* hs_cl, JavaThread* target) {
|
||||||
HandshakeState::HandshakeState(JavaThread* target) :
|
HandshakeState::HandshakeState(JavaThread* target) :
|
||||||
_handshakee(target),
|
_handshakee(target),
|
||||||
_queue(),
|
_queue(),
|
||||||
_lock(Monitor::leaf, "HandshakeState", Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never),
|
_lock(Monitor::leaf, "HandshakeState", Monitor::_safepoint_check_never),
|
||||||
_active_handshaker(),
|
_active_handshaker(),
|
||||||
_suspended(false),
|
_suspended(false),
|
||||||
_async_suspend_handshake(false)
|
_async_suspend_handshake(false)
|
||||||
|
|
|
@ -274,8 +274,8 @@ Mutex::~Mutex() {
|
||||||
os::free(const_cast<char*>(_name));
|
os::free(const_cast<char*>(_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
|
Mutex::Mutex(int Rank, const char * name, SafepointCheckRequired safepoint_check_required,
|
||||||
SafepointCheckRequired safepoint_check_required) : _owner(NULL) {
|
bool allow_vm_block) : _owner(NULL) {
|
||||||
assert(os::mutex_init_done(), "Too early!");
|
assert(os::mutex_init_done(), "Too early!");
|
||||||
assert(name != NULL, "Mutex requires a name");
|
assert(name != NULL, "Mutex requires a name");
|
||||||
_name = os::strdup(name, mtInternal);
|
_name = os::strdup(name, mtInternal);
|
||||||
|
@ -288,14 +288,15 @@ Mutex::Mutex(int Rank, const char * name, bool allow_vm_block,
|
||||||
assert(_rank > special || _safepoint_check_required == _safepoint_check_never,
|
assert(_rank > special || _safepoint_check_required == _safepoint_check_never,
|
||||||
"Special locks or below should never safepoint");
|
"Special locks or below should never safepoint");
|
||||||
|
|
||||||
|
// The allow_vm_block also includes allowing other non-Java threads to block or
|
||||||
|
// allowing Java threads to block in native.
|
||||||
|
assert(_safepoint_check_required == _safepoint_check_always || _allow_vm_block,
|
||||||
|
"Safepoint check never locks should always allow the vm to block");
|
||||||
|
|
||||||
assert(_rank >= 0, "Bad lock rank");
|
assert(_rank >= 0, "Bad lock rank");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
Monitor::Monitor(int Rank, const char * name, bool allow_vm_block,
|
|
||||||
SafepointCheckRequired safepoint_check_required) :
|
|
||||||
Mutex(Rank, name, allow_vm_block, safepoint_check_required) {}
|
|
||||||
|
|
||||||
bool Mutex::owned_by_self() const {
|
bool Mutex::owned_by_self() const {
|
||||||
return owner() == Thread::current();
|
return owner() == Thread::current();
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,8 +143,12 @@ class Mutex : public CHeapObj<mtSynchronizer> {
|
||||||
NOT_PRODUCT(SafepointCheckRequired _safepoint_check_required;)
|
NOT_PRODUCT(SafepointCheckRequired _safepoint_check_required;)
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Mutex(int rank, const char *name, bool allow_vm_block = false,
|
Mutex(int rank, const char *name, SafepointCheckRequired safepoint_check_required, bool allow_vm_block);
|
||||||
SafepointCheckRequired safepoint_check_required = _safepoint_check_always);
|
|
||||||
|
Mutex(int rank, const char *name, SafepointCheckRequired safepoint_check_required) :
|
||||||
|
Mutex(rank, name, safepoint_check_required,
|
||||||
|
safepoint_check_required == _safepoint_check_never ? true : false) {}
|
||||||
|
|
||||||
~Mutex();
|
~Mutex();
|
||||||
|
|
||||||
void lock(); // prints out warning if VM thread blocks
|
void lock(); // prints out warning if VM thread blocks
|
||||||
|
@ -184,9 +188,12 @@ class Mutex : public CHeapObj<mtSynchronizer> {
|
||||||
|
|
||||||
class Monitor : public Mutex {
|
class Monitor : public Mutex {
|
||||||
public:
|
public:
|
||||||
Monitor(int rank, const char *name, bool allow_vm_block = false,
|
Monitor(int rank, const char *name, SafepointCheckRequired safepoint_check_required, bool allow_vm_block) :
|
||||||
SafepointCheckRequired safepoint_check_required = _safepoint_check_always);
|
Mutex(rank, name, safepoint_check_required, allow_vm_block) {}
|
||||||
// default destructor
|
|
||||||
|
Monitor(int rank, const char *name, SafepointCheckRequired safepoint_check_required) :
|
||||||
|
Mutex(rank, name, safepoint_check_required) {}
|
||||||
|
// default destructor
|
||||||
|
|
||||||
// Wait until monitor is notified (or times out).
|
// Wait until monitor is notified (or times out).
|
||||||
// Defaults are to make safepoint checks, wait time is forever (i.e.,
|
// Defaults are to make safepoint checks, wait time is forever (i.e.,
|
||||||
|
@ -205,9 +212,10 @@ class PaddedMutex : public Mutex {
|
||||||
};
|
};
|
||||||
char _padding[PADDING_LEN];
|
char _padding[PADDING_LEN];
|
||||||
public:
|
public:
|
||||||
PaddedMutex(int rank, const char *name, bool allow_vm_block = false,
|
PaddedMutex(int rank, const char *name, SafepointCheckRequired safepoint_check_required, bool allow_vm_block) :
|
||||||
SafepointCheckRequired safepoint_check_required = _safepoint_check_always) :
|
Mutex(rank, name, safepoint_check_required, allow_vm_block) {};
|
||||||
Mutex(rank, name, allow_vm_block, safepoint_check_required) {};
|
PaddedMutex(int rank, const char *name, SafepointCheckRequired safepoint_check_required) :
|
||||||
|
Mutex(rank, name, safepoint_check_required) {};
|
||||||
};
|
};
|
||||||
|
|
||||||
class PaddedMonitor : public Monitor {
|
class PaddedMonitor : public Monitor {
|
||||||
|
@ -217,9 +225,10 @@ class PaddedMonitor : public Monitor {
|
||||||
};
|
};
|
||||||
char _padding[PADDING_LEN];
|
char _padding[PADDING_LEN];
|
||||||
public:
|
public:
|
||||||
PaddedMonitor(int rank, const char *name, bool allow_vm_block = false,
|
PaddedMonitor(int rank, const char *name, SafepointCheckRequired safepoint_check_required, bool allow_vm_block) :
|
||||||
SafepointCheckRequired safepoint_check_required = _safepoint_check_always) :
|
Monitor(rank, name, safepoint_check_required, allow_vm_block) {};
|
||||||
Monitor(rank, name, allow_vm_block, safepoint_check_required) {};
|
PaddedMonitor(int rank, const char *name, SafepointCheckRequired safepoint_check_required) :
|
||||||
|
Monitor(rank, name, safepoint_check_required) {};
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_RUNTIME_MUTEX_HPP
|
#endif // SHARE_RUNTIME_MUTEX_HPP
|
||||||
|
|
|
@ -200,10 +200,10 @@ void assert_locked_or_safepoint_or_handshake(const Mutex* lock, const JavaThread
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
|
#define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
|
||||||
var = new type(Mutex::pri, #var, vm_block, Mutex::safepoint_check_allowed); \
|
var = new type(Mutex::pri, #var, Mutex::safepoint_check_allowed, vm_block); \
|
||||||
assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
|
assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
|
||||||
_mutex_array[_num_mutex++] = var; \
|
_mutex_array[_num_mutex++] = var; \
|
||||||
}
|
}
|
||||||
|
|
||||||
// Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
|
// Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
|
||||||
|
@ -288,14 +288,14 @@ void mutex_init() {
|
||||||
def(Heap_lock , PaddedMonitor, nonleaf+1, false, _safepoint_check_always); // Doesn't safepoint check during termination.
|
def(Heap_lock , PaddedMonitor, nonleaf+1, false, _safepoint_check_always); // Doesn't safepoint check during termination.
|
||||||
def(JfieldIdCreation_lock , PaddedMutex , nonleaf+1, true, _safepoint_check_always); // jfieldID, Used in VM_Operation
|
def(JfieldIdCreation_lock , PaddedMutex , nonleaf+1, true, _safepoint_check_always); // jfieldID, Used in VM_Operation
|
||||||
|
|
||||||
def(CompiledIC_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_never); // locks VtableStubs_lock, InlineCacheBuffer_lock
|
def(CompiledIC_lock , PaddedMutex , nonleaf+2, true, _safepoint_check_never); // locks VtableStubs_lock, InlineCacheBuffer_lock
|
||||||
def(CompileTaskAlloc_lock , PaddedMutex , nonleaf+2, true, _safepoint_check_always);
|
def(CompileTaskAlloc_lock , PaddedMutex , nonleaf+2, true, _safepoint_check_always);
|
||||||
def(CompileStatistics_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
|
def(CompileStatistics_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
|
||||||
def(DirectivesStack_lock , PaddedMutex , special, true, _safepoint_check_never);
|
def(DirectivesStack_lock , PaddedMutex , special, true, _safepoint_check_never);
|
||||||
def(MultiArray_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
|
def(MultiArray_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
|
||||||
|
|
||||||
def(JvmtiThreadState_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always); // Used by JvmtiThreadState/JvmtiEventController
|
def(JvmtiThreadState_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always); // Used by JvmtiThreadState/JvmtiEventController
|
||||||
def(EscapeBarrier_lock , PaddedMonitor, leaf, false, _safepoint_check_never); // Used to synchronize object reallocation/relocking triggered by JVMTI
|
def(EscapeBarrier_lock , PaddedMonitor, leaf, true, _safepoint_check_never); // Used to synchronize object reallocation/relocking triggered by JVMTI
|
||||||
def(Management_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always); // used for JVM management
|
def(Management_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always); // used for JVM management
|
||||||
|
|
||||||
def(ConcurrentGCBreakpoints_lock , PaddedMonitor, nonleaf, true, _safepoint_check_always);
|
def(ConcurrentGCBreakpoints_lock , PaddedMonitor, nonleaf, true, _safepoint_check_always);
|
||||||
|
@ -313,19 +313,19 @@ void mutex_init() {
|
||||||
def(Zip_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
|
def(Zip_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
|
||||||
|
|
||||||
if (WhiteBoxAPI) {
|
if (WhiteBoxAPI) {
|
||||||
def(Compilation_lock , PaddedMonitor, leaf, false, _safepoint_check_never);
|
def(Compilation_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_JFR
|
#if INCLUDE_JFR
|
||||||
def(JfrMsg_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
|
def(JfrMsg_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
|
||||||
def(JfrBuffer_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
def(JfrBuffer_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
||||||
def(JfrStream_lock , PaddedMutex , nonleaf + 1, false, _safepoint_check_never);
|
def(JfrStream_lock , PaddedMutex , nonleaf + 1, true, _safepoint_check_never);
|
||||||
def(JfrStacktrace_lock , PaddedMutex , stackwatermark-1, true, _safepoint_check_never);
|
def(JfrStacktrace_lock , PaddedMutex , stackwatermark-1, true, _safepoint_check_never);
|
||||||
def(JfrThreadSampler_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
|
def(JfrThreadSampler_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef SUPPORTS_NATIVE_CX8
|
#ifndef SUPPORTS_NATIVE_CX8
|
||||||
def(UnsafeJlong_lock , PaddedMutex , special, false, _safepoint_check_never);
|
def(UnsafeJlong_lock , PaddedMutex , special, true, _safepoint_check_never);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
def(CodeHeapStateAnalytics_lock , PaddedMutex , nonleaf+6, false, _safepoint_check_always);
|
def(CodeHeapStateAnalytics_lock , PaddedMutex , nonleaf+6, false, _safepoint_check_always);
|
||||||
|
@ -347,7 +347,7 @@ void mutex_init() {
|
||||||
def(ClassListFile_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
def(ClassListFile_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
||||||
def(LambdaFormInvokers_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
|
def(LambdaFormInvokers_lock , PaddedMutex , nonleaf+2, false, _safepoint_check_always);
|
||||||
#endif // INCLUDE_CDS
|
#endif // INCLUDE_CDS
|
||||||
def(Bootclasspath_lock , PaddedMutex , leaf, false, _safepoint_check_never);
|
def(Bootclasspath_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
||||||
|
|
||||||
#if INCLUDE_JVMCI
|
#if INCLUDE_JVMCI
|
||||||
def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, _safepoint_check_always);
|
def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, _safepoint_check_always);
|
||||||
|
|
|
@ -164,7 +164,7 @@ StackWatermark::StackWatermark(JavaThread* jt, StackWatermarkKind kind, uint32_t
|
||||||
_next(NULL),
|
_next(NULL),
|
||||||
_jt(jt),
|
_jt(jt),
|
||||||
_iterator(NULL),
|
_iterator(NULL),
|
||||||
_lock(Mutex::stackwatermark, "StackWatermark_lock", true, Mutex::_safepoint_check_never),
|
_lock(Mutex::stackwatermark, "StackWatermark_lock", Mutex::_safepoint_check_never),
|
||||||
_kind(kind),
|
_kind(kind),
|
||||||
_linked_watermark(NULL) {
|
_linked_watermark(NULL) {
|
||||||
}
|
}
|
||||||
|
|
|
@ -368,8 +368,7 @@ int VM_Exit::wait_for_threads_in_native_to_block() {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint already");
|
||||||
|
|
||||||
Thread * thr_cur = Thread::current();
|
Thread * thr_cur = Thread::current();
|
||||||
Monitor timer(Mutex::leaf, "VM_Exit timer", true,
|
Monitor timer(Mutex::leaf, "VM_Exit timer", Monitor::_safepoint_check_never);
|
||||||
Monitor::_safepoint_check_never);
|
|
||||||
|
|
||||||
// Compiler threads need longer wait because they can access VM data directly
|
// Compiler threads need longer wait because they can access VM data directly
|
||||||
// while in native. If they are active and some structures being used are
|
// while in native. If they are active and some structures being used are
|
||||||
|
|
|
@ -128,7 +128,7 @@ void VMThread::create() {
|
||||||
assert(_timeout_task == NULL, "sanity");
|
assert(_timeout_task == NULL, "sanity");
|
||||||
}
|
}
|
||||||
|
|
||||||
_terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true,
|
_terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock",
|
||||||
Monitor::_safepoint_check_never);
|
Monitor::_safepoint_check_never);
|
||||||
|
|
||||||
if (UsePerfData) {
|
if (UsePerfData) {
|
||||||
|
|
|
@ -201,7 +201,7 @@ CompressionBackend::CompressionBackend(AbstractWriter* writer,
|
||||||
_writer(writer),
|
_writer(writer),
|
||||||
_compressor(compressor),
|
_compressor(compressor),
|
||||||
_lock(new (std::nothrow) PaddedMonitor(Mutex::leaf, "HProf Compression Backend",
|
_lock(new (std::nothrow) PaddedMonitor(Mutex::leaf, "HProf Compression Backend",
|
||||||
true, Mutex::_safepoint_check_never)) {
|
Mutex::_safepoint_check_never)) {
|
||||||
if (_writer == NULL) {
|
if (_writer == NULL) {
|
||||||
set_error("Could not allocate writer");
|
set_error("Could not allocate writer");
|
||||||
} else if (_lock == NULL) {
|
} else if (_lock == NULL) {
|
||||||
|
|
|
@ -174,7 +174,7 @@ GCMemoryManager::GCMemoryManager(const char* name, const char* gc_end_message) :
|
||||||
MemoryManager(name), _gc_end_message(gc_end_message) {
|
MemoryManager(name), _gc_end_message(gc_end_message) {
|
||||||
_num_collections = 0;
|
_num_collections = 0;
|
||||||
_last_gc_stat = NULL;
|
_last_gc_stat = NULL;
|
||||||
_last_gc_lock = new Mutex(Mutex::leaf, "_last_gc_lock", true,
|
_last_gc_lock = new Mutex(Mutex::leaf, "_last_gc_lock",
|
||||||
Mutex::_safepoint_check_never);
|
Mutex::_safepoint_check_never);
|
||||||
_current_gc_stat = NULL;
|
_current_gc_stat = NULL;
|
||||||
_num_gc_threads = 1;
|
_num_gc_threads = 1;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -1014,7 +1014,7 @@ inline ConcurrentHashTable<CONFIG, F>::
|
||||||
{
|
{
|
||||||
_stats_rate = TableRateStatistics();
|
_stats_rate = TableRateStatistics();
|
||||||
_resize_lock =
|
_resize_lock =
|
||||||
new Mutex(Mutex::leaf, "ConcurrentHashTable", true,
|
new Mutex(Mutex::leaf, "ConcurrentHashTable",
|
||||||
Mutex::_safepoint_check_never);
|
Mutex::_safepoint_check_never);
|
||||||
_table = new InternalTable(log2size);
|
_table = new InternalTable(log2size);
|
||||||
assert(log2size_limit >= log2size, "bad ergo");
|
assert(log2size_limit >= log2size, "bad ergo");
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -100,7 +100,7 @@ template <class T> class EventLogBase : public EventLog {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EventLogBase<T>(const char* name, const char* handle, int length = LogEventsBufferEntries):
|
EventLogBase<T>(const char* name, const char* handle, int length = LogEventsBufferEntries):
|
||||||
_mutex(Mutex::event, name, true, Mutex::_safepoint_check_never),
|
_mutex(Mutex::event, name, Mutex::_safepoint_check_never),
|
||||||
_name(name),
|
_name(name),
|
||||||
_handle(handle),
|
_handle(handle),
|
||||||
_length(length),
|
_length(length),
|
||||||
|
|
|
@ -49,7 +49,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void do_test(Metaspace::MetadataType mdType) {
|
void do_test(Metaspace::MetadataType mdType) {
|
||||||
_lock = new Mutex(Monitor::leaf, "gtest-IsMetaspaceObjTest-lock", false, Monitor::_safepoint_check_never);
|
_lock = new Mutex(Monitor::leaf, "gtest-IsMetaspaceObjTest-lock", Monitor::_safepoint_check_never);
|
||||||
{
|
{
|
||||||
MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||||
_ms = new ClassLoaderMetaspace(_lock, Metaspace::StandardMetaspaceType);
|
_ms = new ClassLoaderMetaspace(_lock, Metaspace::StandardMetaspaceType);
|
||||||
|
|
|
@ -66,7 +66,7 @@ class MetaspaceArenaTestHelper {
|
||||||
|
|
||||||
void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") {
|
void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") {
|
||||||
_growth_policy = growth_policy;
|
_growth_policy = growth_policy;
|
||||||
_lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTest-lock", false, Monitor::_safepoint_check_never);
|
_lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTest-lock", Monitor::_safepoint_check_never);
|
||||||
// Lock during space creation, since this is what happens in the VM too
|
// Lock during space creation, since this is what happens in the VM too
|
||||||
// (see ClassLoaderData::metaspace_non_null(), which we mimick here).
|
// (see ClassLoaderData::metaspace_non_null(), which we mimick here).
|
||||||
{
|
{
|
||||||
|
|
|
@ -139,7 +139,7 @@ public:
|
||||||
_alloc_count(),
|
_alloc_count(),
|
||||||
_dealloc_count()
|
_dealloc_count()
|
||||||
{
|
{
|
||||||
_lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTestBed-lock", false, Monitor::_safepoint_check_never);
|
_lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTestBed-lock", Monitor::_safepoint_check_never);
|
||||||
// Lock during space creation, since this is what happens in the VM too
|
// Lock during space creation, since this is what happens in the VM too
|
||||||
// (see ClassLoaderData::metaspace_non_null(), which we mimick here).
|
// (see ClassLoaderData::metaspace_non_null(), which we mimick here).
|
||||||
MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
|
@ -35,7 +35,7 @@ static Mutex* m[iterations];
|
||||||
static int i = 0;
|
static int i = 0;
|
||||||
|
|
||||||
static void create_mutex(Thread* thr) {
|
static void create_mutex(Thread* thr) {
|
||||||
m[i] = new Mutex(Mutex::leaf, FormatBuffer<128>("MyLock lock #%u", i), true, Mutex::_safepoint_check_never);
|
m[i] = new Mutex(Mutex::leaf, FormatBuffer<128>("MyLock lock #%u", i), Mutex::_safepoint_check_never);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,8 +59,8 @@ TEST_OTHER_VM(MutexRank, mutex_lock_rank_in_order) {
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", Mutex::_safepoint_check_always);
|
||||||
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
mutex_rankA_plus_one->lock();
|
mutex_rankA_plus_one->lock();
|
||||||
mutex_rankA->lock();
|
mutex_rankA->lock();
|
||||||
|
@ -73,8 +73,8 @@ TEST_VM_ASSERT_MSG(MutexRank, mutex_lock_rank_out_of_orderA,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", Mutex::_safepoint_check_always);
|
||||||
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
mutex_rankA->lock();
|
mutex_rankA->lock();
|
||||||
mutex_rankA_plus_one->lock();
|
mutex_rankA_plus_one->lock();
|
||||||
|
@ -87,8 +87,8 @@ TEST_VM_ASSERT_MSG(MutexRank, mutex_lock_rank_out_of_orderB,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", Mutex::_safepoint_check_always);
|
||||||
Mutex* mutex_rankB = new Mutex(rankA, "mutex_rankB", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankB = new Mutex(rankA, "mutex_rankB", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
mutex_rankA->lock();
|
mutex_rankA->lock();
|
||||||
mutex_rankB->lock();
|
mutex_rankB->lock();
|
||||||
|
@ -100,9 +100,9 @@ TEST_OTHER_VM(MutexRank, mutex_trylock_rank_out_of_orderA) {
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", Mutex::_safepoint_check_always);
|
||||||
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", Mutex::_safepoint_check_always);
|
||||||
Mutex* mutex_rankA_plus_two = new Mutex(rankA + 2, "mutex_rankA_plus_two", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA_plus_two = new Mutex(rankA + 2, "mutex_rankA_plus_two", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
mutex_rankA_plus_one->lock();
|
mutex_rankA_plus_one->lock();
|
||||||
mutex_rankA_plus_two->try_lock_without_rank_check();
|
mutex_rankA_plus_two->try_lock_without_rank_check();
|
||||||
|
@ -117,8 +117,8 @@ TEST_VM_ASSERT_MSG(MutexRank, mutex_trylock_rank_out_of_orderB,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA = new Mutex(rankA, "mutex_rankA", Mutex::_safepoint_check_always);
|
||||||
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", false, Mutex::_safepoint_check_always);
|
Mutex* mutex_rankA_plus_one = new Mutex(rankA + 1, "mutex_rankA_plus_one", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
mutex_rankA->lock();
|
mutex_rankA->lock();
|
||||||
mutex_rankA_plus_one->try_lock_without_rank_check();
|
mutex_rankA_plus_one->try_lock_without_rank_check();
|
||||||
|
@ -134,8 +134,8 @@ TEST_VM_ASSERT_MSG(MutexRank, mutex_lock_event_leaf,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Mutex* mutex_rank_event = new Mutex(Mutex::event, "mutex_rank_event", false, Mutex::_safepoint_check_never);
|
Mutex* mutex_rank_event = new Mutex(Mutex::event, "mutex_rank_event", Mutex::_safepoint_check_never);
|
||||||
Mutex* mutex_rank_leaf = new Mutex(Mutex::leaf, "mutex_rank_leaf", false, Mutex::_safepoint_check_never);
|
Mutex* mutex_rank_leaf = new Mutex(Mutex::leaf, "mutex_rank_leaf", Mutex::_safepoint_check_never);
|
||||||
|
|
||||||
mutex_rank_event->lock_without_safepoint_check();
|
mutex_rank_event->lock_without_safepoint_check();
|
||||||
mutex_rank_leaf->lock_without_safepoint_check();
|
mutex_rank_leaf->lock_without_safepoint_check();
|
||||||
|
@ -149,8 +149,8 @@ TEST_VM_ASSERT_MSG(MutexRank, mutex_lock_tty_special,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Mutex* mutex_rank_tty = new Mutex(Mutex::tty, "mutex_rank_tty", false, Mutex::_safepoint_check_never);
|
Mutex* mutex_rank_tty = new Mutex(Mutex::tty, "mutex_rank_tty", Mutex::_safepoint_check_never);
|
||||||
Mutex* mutex_rank_special = new Mutex(Mutex::special, "mutex_rank_special", false, Mutex::_safepoint_check_never);
|
Mutex* mutex_rank_special = new Mutex(Mutex::special, "mutex_rank_special", Mutex::_safepoint_check_never);
|
||||||
|
|
||||||
mutex_rank_tty->lock_without_safepoint_check();
|
mutex_rank_tty->lock_without_safepoint_check();
|
||||||
mutex_rank_special->lock_without_safepoint_check();
|
mutex_rank_special->lock_without_safepoint_check();
|
||||||
|
@ -162,8 +162,8 @@ TEST_OTHER_VM(MutexRank, monitor_wait_rank_in_order) {
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Monitor* monitor_rankA = new Monitor(rankA, "monitor_rankA", false, Mutex::_safepoint_check_always);
|
Monitor* monitor_rankA = new Monitor(rankA, "monitor_rankA", Mutex::_safepoint_check_always);
|
||||||
Monitor* monitor_rankA_plus_one = new Monitor(rankA + 1, "monitor_rankA_plus_one", false, Mutex::_safepoint_check_always);
|
Monitor* monitor_rankA_plus_one = new Monitor(rankA + 1, "monitor_rankA_plus_one", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
monitor_rankA_plus_one->lock();
|
monitor_rankA_plus_one->lock();
|
||||||
monitor_rankA->lock();
|
monitor_rankA->lock();
|
||||||
|
@ -178,8 +178,8 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_rank_out_of_order,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Monitor* monitor_rankA = new Monitor(rankA, "monitor_rankA", false, Mutex::_safepoint_check_always);
|
Monitor* monitor_rankA = new Monitor(rankA, "monitor_rankA", Mutex::_safepoint_check_always);
|
||||||
Monitor* monitor_rankA_plus_one = new Monitor(rankA + 1, "monitor_rankA_plus_one", false, Mutex::_safepoint_check_always);
|
Monitor* monitor_rankA_plus_one = new Monitor(rankA + 1, "monitor_rankA_plus_one", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
monitor_rankA_plus_one->lock();
|
monitor_rankA_plus_one->lock();
|
||||||
monitor_rankA->lock();
|
monitor_rankA->lock();
|
||||||
|
@ -194,8 +194,8 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_rank_out_of_order_trylock,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Monitor* monitor_rankA = new Monitor(rankA, "monitor_rankA", false, Mutex::_safepoint_check_always);
|
Monitor* monitor_rankA = new Monitor(rankA, "monitor_rankA", Mutex::_safepoint_check_always);
|
||||||
Monitor* monitor_rankA_plus_one = new Monitor(rankA + 1, "monitor_rankA_plus_one", false, Mutex::_safepoint_check_always);
|
Monitor* monitor_rankA_plus_one = new Monitor(rankA + 1, "monitor_rankA_plus_one", Mutex::_safepoint_check_always);
|
||||||
|
|
||||||
monitor_rankA->lock();
|
monitor_rankA->lock();
|
||||||
monitor_rankA_plus_one->try_lock_without_rank_check();
|
monitor_rankA_plus_one->try_lock_without_rank_check();
|
||||||
|
@ -210,8 +210,8 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_rank_special,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Monitor* monitor_rank_special = new Monitor(Mutex::special, "monitor_rank_special", false, Mutex::_safepoint_check_never);
|
Monitor* monitor_rank_special = new Monitor(Mutex::special, "monitor_rank_special", Mutex::_safepoint_check_never);
|
||||||
Monitor* monitor_rank_special_minus_one = new Monitor(Mutex::special - 1, "monitor_rank_special_minus_one", false, Mutex::_safepoint_check_never);
|
Monitor* monitor_rank_special_minus_one = new Monitor(Mutex::special - 1, "monitor_rank_special_minus_one", Mutex::_safepoint_check_never);
|
||||||
|
|
||||||
monitor_rank_special->lock_without_safepoint_check();
|
monitor_rank_special->lock_without_safepoint_check();
|
||||||
monitor_rank_special_minus_one->lock_without_safepoint_check();
|
monitor_rank_special_minus_one->lock_without_safepoint_check();
|
||||||
|
@ -226,8 +226,8 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_event_tty,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Monitor* monitor_rank_tty = new Monitor(Mutex::tty, "monitor_rank_tty", false, Mutex::_safepoint_check_never);
|
Monitor* monitor_rank_tty = new Monitor(Mutex::tty, "monitor_rank_tty", Mutex::_safepoint_check_never);
|
||||||
Monitor* monitor_rank_event = new Monitor(Mutex::event, "monitor_rank_event", false, Mutex::_safepoint_check_never);
|
Monitor* monitor_rank_event = new Monitor(Mutex::event, "monitor_rank_event", Mutex::_safepoint_check_never);
|
||||||
|
|
||||||
monitor_rank_tty->lock_without_safepoint_check();
|
monitor_rank_tty->lock_without_safepoint_check();
|
||||||
monitor_rank_event->lock_without_safepoint_check();
|
monitor_rank_event->lock_without_safepoint_check();
|
||||||
|
@ -242,8 +242,8 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_tty_special,
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Monitor* monitor_rank_special = new Monitor(Mutex::special, "monitor_rank_special", false, Mutex::_safepoint_check_never);
|
Monitor* monitor_rank_special = new Monitor(Mutex::special, "monitor_rank_special", Mutex::_safepoint_check_never);
|
||||||
Monitor* monitor_rank_tty = new Monitor(Mutex::tty, "monitor_rank_tty", false, Mutex::_safepoint_check_never);
|
Monitor* monitor_rank_tty = new Monitor(Mutex::tty, "monitor_rank_tty", Mutex::_safepoint_check_never);
|
||||||
|
|
||||||
monitor_rank_special->lock_without_safepoint_check();
|
monitor_rank_special->lock_without_safepoint_check();
|
||||||
monitor_rank_tty->lock_without_safepoint_check();
|
monitor_rank_tty->lock_without_safepoint_check();
|
||||||
|
@ -252,12 +252,22 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_tty_special,
|
||||||
monitor_rank_special->unlock();
|
monitor_rank_special->unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_VM_ASSERT_MSG(MutexRank, monitor_special_vm_block,
|
||||||
|
".*Safepoint check never locks should always allow the vm to block") {
|
||||||
|
JavaThread* THREAD = JavaThread::current();
|
||||||
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
|
Monitor* monitor_rank_special = new Monitor(Mutex::special, "monitor_rank_special", Mutex::_safepoint_check_never, false);
|
||||||
|
monitor_rank_special->lock_without_safepoint_check();
|
||||||
|
monitor_rank_special->unlock();
|
||||||
|
}
|
||||||
|
|
||||||
TEST_VM_ASSERT_MSG(MutexRank, monitor_negative_rank,
|
TEST_VM_ASSERT_MSG(MutexRank, monitor_negative_rank,
|
||||||
".*Bad lock rank") {
|
".*Bad lock rank") {
|
||||||
JavaThread* THREAD = JavaThread::current();
|
JavaThread* THREAD = JavaThread::current();
|
||||||
ThreadInVMfromNative invm(THREAD);
|
ThreadInVMfromNative invm(THREAD);
|
||||||
|
|
||||||
Monitor* monitor_rank_broken = new Monitor(Mutex::event-1, "monitor_rank_broken", false, Mutex::_safepoint_check_never);
|
Monitor* monitor_rank_broken = new Monitor(Mutex::event-1, "monitor_rank_broken", Mutex::_safepoint_check_never);
|
||||||
monitor_rank_broken->lock_without_safepoint_check();
|
monitor_rank_broken->lock_without_safepoint_check();
|
||||||
monitor_rank_broken->unlock();
|
monitor_rank_broken->unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,19 +32,19 @@
|
||||||
// Test mismatched safepoint check flag on lock declaration vs. lock acquisition.
|
// Test mismatched safepoint check flag on lock declaration vs. lock acquisition.
|
||||||
TEST_VM_ASSERT_MSG(SafepointLockAssertTest, always_check,
|
TEST_VM_ASSERT_MSG(SafepointLockAssertTest, always_check,
|
||||||
".*This lock should always have a safepoint check for Java threads: SFPT_Test_lock") {
|
".*This lock should always have a safepoint check for Java threads: SFPT_Test_lock") {
|
||||||
MutexLocker ml(new Mutex(Mutex::leaf, "SFPT_Test_lock", true, Mutex::_safepoint_check_always),
|
MutexLocker ml(new Mutex(Mutex::leaf, "SFPT_Test_lock", Mutex::_safepoint_check_always),
|
||||||
Mutex::_no_safepoint_check_flag);
|
Mutex::_no_safepoint_check_flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_VM_ASSERT_MSG(SafepointLockAssertTest, never_check,
|
TEST_VM_ASSERT_MSG(SafepointLockAssertTest, never_check,
|
||||||
".*This lock should never have a safepoint check for Java threads: SFPT_Test_lock") {
|
".*This lock should never have a safepoint check for Java threads: SFPT_Test_lock") {
|
||||||
MutexLocker ml(new Mutex(Mutex::leaf, "SFPT_Test_lock", true, Mutex::_safepoint_check_never),
|
MutexLocker ml(new Mutex(Mutex::leaf, "SFPT_Test_lock", Mutex::_safepoint_check_never),
|
||||||
Mutex::_safepoint_check_flag);
|
Mutex::_safepoint_check_flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_VM_ASSERT_MSG(SafepointLockAssertTest, special_locks,
|
TEST_VM_ASSERT_MSG(SafepointLockAssertTest, special_locks,
|
||||||
".*Special locks or below should never safepoint") {
|
".*Special locks or below should never safepoint") {
|
||||||
MutexLocker ml(new Mutex(Mutex::special, "SpecialTest_lock", /*vm_block*/true, Mutex::_safepoint_check_always),
|
MutexLocker ml(new Mutex(Mutex::special, "SpecialTest_lock", Mutex::_safepoint_check_always),
|
||||||
Mutex::_safepoint_check_flag);
|
Mutex::_safepoint_check_flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ TEST_VM_ASSERT_MSG(SafepointLockAssertTest, possible_safepoint_lock,
|
||||||
".* Possible safepoint reached by thread that does not allow it") {
|
".* Possible safepoint reached by thread that does not allow it") {
|
||||||
JavaThread* thread = JavaThread::current();
|
JavaThread* thread = JavaThread::current();
|
||||||
ThreadInVMfromNative in_native(thread);
|
ThreadInVMfromNative in_native(thread);
|
||||||
MutexLocker ml(new Mutex(Mutex::special, "SpecialTest_lock", /*vm_block*/true, Mutex::_safepoint_check_never),
|
MutexLocker ml(new Mutex(Mutex::special, "SpecialTest_lock", Mutex::_safepoint_check_never),
|
||||||
Mutex::_no_safepoint_check_flag);
|
Mutex::_no_safepoint_check_flag);
|
||||||
thread->print_thread_state_on(tty);
|
thread->print_thread_state_on(tty);
|
||||||
// If the lock above succeeds, try to safepoint to test the NSV implied with this special lock.
|
// If the lock above succeeds, try to safepoint to test the NSV implied with this special lock.
|
||||||
|
|
|
@ -196,7 +196,7 @@ public:
|
||||||
|
|
||||||
TEST_VM(FilterQueue, stress) {
|
TEST_VM(FilterQueue, stress) {
|
||||||
FilterQueue<uintptr_t> queue;
|
FilterQueue<uintptr_t> queue;
|
||||||
Mutex lock(Mutex::leaf, "Test Lock", true, Mutex::_safepoint_check_never);
|
Mutex lock(Mutex::leaf, "Test Lock", Mutex::_safepoint_check_never);
|
||||||
static const int nthreads = 4;
|
static const int nthreads = 4;
|
||||||
Semaphore post;
|
Semaphore post;
|
||||||
FilterQueueTestThread* threads[nthreads] = {};
|
FilterQueueTestThread* threads[nthreads] = {};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue