mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-16 09:04:41 +02:00
8272447: Remove 'native' ranked Mutex
Reviewed-by: stuefe, pchilanomate
This commit is contained in:
parent
63e062fb78
commit
2ef6871118
8 changed files with 19 additions and 25 deletions
|
@ -92,7 +92,7 @@ MetaspaceTestContext::~MetaspaceTestContext() {
|
|||
// Create an arena, feeding off this area.
|
||||
MetaspaceTestArena* MetaspaceTestContext::create_arena(Metaspace::MetaspaceType type) {
|
||||
const ArenaGrowthPolicy* growth_policy = ArenaGrowthPolicy::policy_for_space_type(type, false);
|
||||
Mutex* lock = new Mutex(Monitor::native, "MetaspaceTestArea-lock", false, Monitor::_safepoint_check_never);
|
||||
Mutex* lock = new Mutex(Monitor::leaf, "MetaspaceTestArea-lock", false, Monitor::_safepoint_check_never);
|
||||
MetaspaceArena* arena = NULL;
|
||||
{
|
||||
MutexLocker ml(lock, Mutex::_no_safepoint_check_flag);
|
||||
|
|
|
@ -369,18 +369,16 @@ void Mutex::check_rank(Thread* thread) {
|
|||
|
||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
||||
// We expect the locks already acquired to be in increasing rank order,
|
||||
// modulo locks of native rank or acquired in try_lock_without_rank_check()
|
||||
// modulo locks acquired in try_lock_without_rank_check()
|
||||
for (Mutex* tmp = locks_owned; tmp != NULL; tmp = tmp->next()) {
|
||||
if (tmp->next() != NULL) {
|
||||
assert(tmp->rank() == Mutex::native || tmp->rank() < tmp->next()->rank()
|
||||
assert(tmp->rank() < tmp->next()->rank()
|
||||
|| tmp->skip_rank_check(), "mutex rank anomaly?");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Locks with rank native are an exception and are not
|
||||
// subject to the verification rules.
|
||||
bool check_can_be_skipped = this->rank() == Mutex::native || SafepointSynchronize::is_at_safepoint();
|
||||
bool check_can_be_skipped = SafepointSynchronize::is_at_safepoint();
|
||||
if (owned_by_self()) {
|
||||
// wait() case
|
||||
Mutex* least = get_least_ranked_lock_besides_this(locks_owned);
|
||||
|
|
|
@ -54,10 +54,6 @@ class Mutex : public CHeapObj<mtSynchronizer> {
|
|||
// inherently a bit more special than even locks of the 'special' rank.
|
||||
// NOTE: It is critical that the rank 'special' be the lowest (earliest)
|
||||
// (except for "event" and "access") for the deadlock detection to work correctly.
|
||||
// The rank native was only for use in Mutexes created by JVM_RawMonitorCreate,
|
||||
// which being external to the VM are not subject to deadlock detection,
|
||||
// however it has now been used by other locks that don't fit into the
|
||||
// deadlock detection scheme.
|
||||
// While at a safepoint no mutexes of rank safepoint are held by any thread.
|
||||
// The rank named "leaf" is probably historical (and should
|
||||
// be changed) -- mutexes of this rank aren't really leaf mutexes
|
||||
|
@ -65,15 +61,15 @@ class Mutex : public CHeapObj<mtSynchronizer> {
|
|||
enum lock_types {
|
||||
event,
|
||||
access = event + 1,
|
||||
tty = access + 2,
|
||||
service = access + 3,
|
||||
tty = service + 3,
|
||||
special = tty + 3,
|
||||
oopstorage = special + 3,
|
||||
leaf = oopstorage + 2,
|
||||
safepoint = leaf + 10,
|
||||
barrier = safepoint + 1,
|
||||
nonleaf = barrier + 1,
|
||||
max_nonleaf = nonleaf + 900,
|
||||
native = max_nonleaf + 1
|
||||
max_nonleaf = nonleaf + 900
|
||||
};
|
||||
|
||||
private:
|
||||
|
|
|
@ -226,7 +226,7 @@ void mutex_init() {
|
|||
def(MarkStackFreeList_lock , PaddedMutex , leaf , true, _safepoint_check_never);
|
||||
def(MarkStackChunkList_lock , PaddedMutex , leaf , true, _safepoint_check_never);
|
||||
|
||||
def(MonitoringSupport_lock , PaddedMutex , native , true, _safepoint_check_never); // used for serviceability monitoring support
|
||||
def(MonitoringSupport_lock , PaddedMutex , service-1, true, _safepoint_check_never); // used for serviceability monitoring support
|
||||
}
|
||||
def(StringDedup_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
|
||||
def(StringDedupIntern_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
||||
|
@ -242,10 +242,10 @@ void mutex_init() {
|
|||
def(Patching_lock , PaddedMutex , special, true, _safepoint_check_never); // used for safepointing and code patching.
|
||||
def(CompiledMethod_lock , PaddedMutex , special-1, true, _safepoint_check_never);
|
||||
def(MonitorDeflation_lock , PaddedMonitor, tty-2, true, _safepoint_check_never); // used for monitor deflation thread operations
|
||||
def(Service_lock , PaddedMonitor, tty-2, true, _safepoint_check_never); // used for service thread operations
|
||||
def(Service_lock , PaddedMonitor, service, true, _safepoint_check_never); // used for service thread operations
|
||||
|
||||
if (UseNotificationThread) {
|
||||
def(Notification_lock , PaddedMonitor, special, true, _safepoint_check_never); // used for notification thread operations
|
||||
def(Notification_lock , PaddedMonitor, service, true, _safepoint_check_never); // used for notification thread operations
|
||||
} else {
|
||||
Notification_lock = Service_lock;
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ void mutex_init() {
|
|||
def(NMethodSweeperStats_lock , PaddedMutex , special, true, _safepoint_check_never);
|
||||
def(ThreadsSMRDelete_lock , PaddedMonitor, special, true, _safepoint_check_never);
|
||||
def(ThreadIdTableCreate_lock , PaddedMutex , leaf, false, _safepoint_check_always);
|
||||
def(SharedDecoder_lock , PaddedMutex , native, true, _safepoint_check_never);
|
||||
def(SharedDecoder_lock , PaddedMutex , tty-1, true, _safepoint_check_never);
|
||||
def(DCmdFactory_lock , PaddedMutex , leaf, true, _safepoint_check_never);
|
||||
#if INCLUDE_NMT
|
||||
def(NMTQuery_lock , PaddedMutex , max_nonleaf, false, _safepoint_check_always);
|
||||
|
|
|
@ -49,7 +49,7 @@ public:
|
|||
}
|
||||
|
||||
void do_test(Metaspace::MetadataType mdType) {
|
||||
_lock = new Mutex(Monitor::native, "gtest-IsMetaspaceObjTest-lock", false, Monitor::_safepoint_check_never);
|
||||
_lock = new Mutex(Monitor::leaf, "gtest-IsMetaspaceObjTest-lock", false, Monitor::_safepoint_check_never);
|
||||
{
|
||||
MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||
_ms = new ClassLoaderMetaspace(_lock, Metaspace::StandardMetaspaceType);
|
||||
|
|
|
@ -66,7 +66,7 @@ class MetaspaceArenaTestHelper {
|
|||
|
||||
void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") {
|
||||
_growth_policy = growth_policy;
|
||||
_lock = new Mutex(Monitor::native, "gtest-MetaspaceArenaTest-lock", false, Monitor::_safepoint_check_never);
|
||||
_lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTest-lock", false, Monitor::_safepoint_check_never);
|
||||
// Lock during space creation, since this is what happens in the VM too
|
||||
// (see ClassLoaderData::metaspace_non_null(), which we mimick here).
|
||||
{
|
||||
|
|
|
@ -139,7 +139,7 @@ public:
|
|||
_alloc_count(),
|
||||
_dealloc_count()
|
||||
{
|
||||
_lock = new Mutex(Monitor::native, "gtest-MetaspaceArenaTestBed-lock", false, Monitor::_safepoint_check_never);
|
||||
_lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTestBed-lock", false, Monitor::_safepoint_check_never);
|
||||
// Lock during space creation, since this is what happens in the VM too
|
||||
// (see ClassLoaderData::metaspace_non_null(), which we mimick here).
|
||||
MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
|
|
@ -129,7 +129,7 @@ TEST_VM_ASSERT_MSG(MutexRank, mutex_trylock_rank_out_of_orderB,
|
|||
}
|
||||
|
||||
TEST_VM_ASSERT_MSG(MutexRank, mutex_lock_access_leaf,
|
||||
".* Attempting to acquire lock mutex_rank_leaf/11 out of order with lock mutex_rank_access/1 "
|
||||
".* Attempting to acquire lock mutex_rank_leaf/.* out of order with lock mutex_rank_access/1 "
|
||||
"-- possible deadlock") {
|
||||
JavaThread* THREAD = JavaThread::current();
|
||||
ThreadInVMfromNative invm(THREAD);
|
||||
|
@ -144,7 +144,7 @@ TEST_VM_ASSERT_MSG(MutexRank, mutex_lock_access_leaf,
|
|||
}
|
||||
|
||||
TEST_VM_ASSERT_MSG(MutexRank, mutex_lock_tty_special,
|
||||
".* Attempting to acquire lock mutex_rank_special/6 out of order with lock mutex_rank_tty/3 "
|
||||
".* Attempting to acquire lock mutex_rank_special/.* out of order with lock mutex_rank_tty/.*"
|
||||
"-- possible deadlock") {
|
||||
JavaThread* THREAD = JavaThread::current();
|
||||
ThreadInVMfromNative invm(THREAD);
|
||||
|
@ -205,7 +205,7 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_rank_out_of_order_trylock,
|
|||
}
|
||||
|
||||
TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_rank_special,
|
||||
".* Attempting to wait on monitor monitor_rank_special_minus_one/5 while holding lock monitor_rank_special/6 "
|
||||
".* Attempting to wait on monitor monitor_rank_special_minus_one/.* while holding lock monitor_rank_special/.*"
|
||||
"-- possible deadlock. Should not block\\(wait\\) while holding a lock of rank special.") {
|
||||
JavaThread* THREAD = JavaThread::current();
|
||||
ThreadInVMfromNative invm(THREAD);
|
||||
|
@ -221,7 +221,7 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_rank_special,
|
|||
}
|
||||
|
||||
TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_access_leaf,
|
||||
".* Attempting to wait on monitor monitor_rank_access/1 while holding lock monitor_rank_tty/3 "
|
||||
".* Attempting to wait on monitor monitor_rank_access/1 while holding lock monitor_rank_tty/.*"
|
||||
"-- possible deadlock. Should not block\\(wait\\) while holding a lock of rank special.") {
|
||||
JavaThread* THREAD = JavaThread::current();
|
||||
ThreadInVMfromNative invm(THREAD);
|
||||
|
@ -237,7 +237,7 @@ TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_access_leaf,
|
|||
}
|
||||
|
||||
TEST_VM_ASSERT_MSG(MutexRank, monitor_wait_tty_special,
|
||||
".* Attempting to wait on monitor monitor_rank_tty/3 while holding lock monitor_rank_special/6 "
|
||||
".* Attempting to wait on monitor monitor_rank_tty/.* while holding lock monitor_rank_special/.*"
|
||||
"-- possible deadlock. Should not block\\(wait\\) while holding a lock of rank special.") {
|
||||
JavaThread* THREAD = JavaThread::current();
|
||||
ThreadInVMfromNative invm(THREAD);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue