mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 06:45:07 +02:00
8221734: Deoptimize with handshakes
Reviewed-by: dcubed, dholmes, pchilanomate, dlong, coleenp
This commit is contained in:
parent
4ea77d3e78
commit
9baafa55a6
28 changed files with 274 additions and 175 deletions
|
@ -38,6 +38,7 @@
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
#include "oops/compressedOops.hpp"
|
#include "oops/compressedOops.hpp"
|
||||||
#include "oops/method.inline.hpp"
|
#include "oops/method.inline.hpp"
|
||||||
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/safepointVerifiers.hpp"
|
#include "runtime/safepointVerifiers.hpp"
|
||||||
|
@ -733,8 +734,7 @@ void AOTCodeHeap::sweep_dependent_methods(int* indexes, int methods_cnt) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (marked > 0) {
|
if (marked > 0) {
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Enter critical section. Does not block for safepoint.
|
// Enter critical section. Does not block for safepoint.
|
||||||
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
if (*_state_adr == new_state) {
|
if (*_state_adr == new_state) {
|
||||||
// another thread already performed this transition so nothing
|
// another thread already performed this transition so nothing
|
||||||
|
@ -188,12 +188,10 @@ bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Remove AOTCompiledMethod from method.
|
// Remove AOTCompiledMethod from method.
|
||||||
if (method() != NULL && (method()->code() == this ||
|
if (method() != NULL) {
|
||||||
method()->from_compiled_entry() == verified_entry_point())) {
|
method()->unlink_code(this);
|
||||||
HandleMark hm;
|
|
||||||
method()->clear_code(false /* already owns Patching_lock */);
|
|
||||||
}
|
}
|
||||||
} // leave critical region under Patching_lock
|
} // leave critical region under CompiledMethod_lock
|
||||||
|
|
||||||
|
|
||||||
if (TraceCreateZombies) {
|
if (TraceCreateZombies) {
|
||||||
|
@ -216,7 +214,7 @@ bool AOTCompiledMethod::make_entrant() {
|
||||||
|
|
||||||
{
|
{
|
||||||
// Enter critical section. Does not block for safepoint.
|
// Enter critical section. Does not block for safepoint.
|
||||||
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
if (*_state_adr == in_use) {
|
if (*_state_adr == in_use) {
|
||||||
// another thread already performed this transition so nothing
|
// another thread already performed this transition so nothing
|
||||||
|
@ -230,7 +228,7 @@ bool AOTCompiledMethod::make_entrant() {
|
||||||
|
|
||||||
// Log the transition once
|
// Log the transition once
|
||||||
log_state_change();
|
log_state_change();
|
||||||
} // leave critical region under Patching_lock
|
} // leave critical region under CompiledMethod_lock
|
||||||
|
|
||||||
|
|
||||||
if (TraceCreateZombies) {
|
if (TraceCreateZombies) {
|
||||||
|
|
|
@ -175,6 +175,7 @@ private:
|
||||||
state() == not_used; }
|
state() == not_used; }
|
||||||
virtual bool is_alive() const { return _is_alive(); }
|
virtual bool is_alive() const { return _is_alive(); }
|
||||||
virtual bool is_in_use() const { return state() == in_use; }
|
virtual bool is_in_use() const { return state() == in_use; }
|
||||||
|
virtual bool is_not_installed() const { return state() == not_installed; }
|
||||||
|
|
||||||
virtual bool is_unloading() { return false; }
|
virtual bool is_unloading() { return false; }
|
||||||
|
|
||||||
|
|
|
@ -1142,28 +1142,25 @@ void CodeCache::flush_evol_dependents() {
|
||||||
|
|
||||||
// At least one nmethod has been marked for deoptimization
|
// At least one nmethod has been marked for deoptimization
|
||||||
|
|
||||||
// All this already happens inside a VM_Operation, so we'll do all the work here.
|
Deoptimization::deoptimize_all_marked();
|
||||||
// Stuff copied from VM_Deoptimize and modified slightly.
|
|
||||||
|
|
||||||
// We do not want any GCs to happen while we are in the middle of this VM operation
|
|
||||||
ResourceMark rm;
|
|
||||||
DeoptimizationMarker dm;
|
|
||||||
|
|
||||||
// Deoptimize all activations depending on marked nmethods
|
|
||||||
Deoptimization::deoptimize_dependents();
|
|
||||||
|
|
||||||
// Make the dependent methods not entrant
|
|
||||||
make_marked_nmethods_not_entrant();
|
|
||||||
}
|
}
|
||||||
#endif // INCLUDE_JVMTI
|
#endif // INCLUDE_JVMTI
|
||||||
|
|
||||||
// Deoptimize all methods
|
// Mark methods for deopt (if safe or possible).
|
||||||
void CodeCache::mark_all_nmethods_for_deoptimization() {
|
void CodeCache::mark_all_nmethods_for_deoptimization() {
|
||||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
||||||
while(iter.next()) {
|
while(iter.next()) {
|
||||||
CompiledMethod* nm = iter.method();
|
CompiledMethod* nm = iter.method();
|
||||||
if (!nm->method()->is_method_handle_intrinsic()) {
|
if (!nm->method()->is_method_handle_intrinsic() &&
|
||||||
|
!nm->is_not_installed() &&
|
||||||
|
nm->is_in_use() &&
|
||||||
|
!nm->is_native_method()) {
|
||||||
|
// Intrinsics and native methods are never deopted. A method that is
|
||||||
|
// not installed yet or is not in use is not safe to deopt; the
|
||||||
|
// is_in_use() check covers the not_entrant and not zombie cases.
|
||||||
|
// Note: A not_entrant method can become a zombie at anytime if it was
|
||||||
|
// made not_entrant before the previous safepoint/handshake.
|
||||||
nm->mark_for_deoptimization();
|
nm->mark_for_deoptimization();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1191,7 +1188,12 @@ void CodeCache::make_marked_nmethods_not_entrant() {
|
||||||
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading);
|
||||||
while(iter.next()) {
|
while(iter.next()) {
|
||||||
CompiledMethod* nm = iter.method();
|
CompiledMethod* nm = iter.method();
|
||||||
if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) {
|
if (nm->is_marked_for_deoptimization() && nm->is_in_use()) {
|
||||||
|
// only_alive_and_not_unloading() can return not_entrant nmethods.
|
||||||
|
// A not_entrant method can become a zombie at anytime if it was
|
||||||
|
// made not_entrant before the previous safepoint/handshake. The
|
||||||
|
// is_in_use() check covers the not_entrant and not zombie cases
|
||||||
|
// that have become true after the method was marked for deopt.
|
||||||
nm->make_not_entrant();
|
nm->make_not_entrant();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1203,17 +1205,12 @@ void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
|
||||||
|
|
||||||
if (number_of_nmethods_with_dependencies() == 0) return;
|
if (number_of_nmethods_with_dependencies() == 0) return;
|
||||||
|
|
||||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
|
||||||
// stopped during the safepoint so CodeCache will be safe to update without
|
|
||||||
// holding the CodeCache_lock.
|
|
||||||
|
|
||||||
KlassDepChange changes(dependee);
|
KlassDepChange changes(dependee);
|
||||||
|
|
||||||
// Compute the dependent nmethods
|
// Compute the dependent nmethods
|
||||||
if (mark_for_deoptimization(changes) > 0) {
|
if (mark_for_deoptimization(changes) > 0) {
|
||||||
// At least one nmethod has been marked for deoptimization
|
// At least one nmethod has been marked for deoptimization
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1222,26 +1219,9 @@ void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
|
||||||
// --- Compile_lock is not held. However we are at a safepoint.
|
// --- Compile_lock is not held. However we are at a safepoint.
|
||||||
assert_locked_or_safepoint(Compile_lock);
|
assert_locked_or_safepoint(Compile_lock);
|
||||||
|
|
||||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
|
||||||
// stopped dring the safepoint so CodeCache will be safe to update without
|
|
||||||
// holding the CodeCache_lock.
|
|
||||||
|
|
||||||
// Compute the dependent nmethods
|
// Compute the dependent nmethods
|
||||||
if (mark_for_deoptimization(m_h()) > 0) {
|
if (mark_for_deoptimization(m_h()) > 0) {
|
||||||
// At least one nmethod has been marked for deoptimization
|
Deoptimization::deoptimize_all_marked();
|
||||||
|
|
||||||
// All this already happens inside a VM_Operation, so we'll do all the work here.
|
|
||||||
// Stuff copied from VM_Deoptimize and modified slightly.
|
|
||||||
|
|
||||||
// We do not want any GCs to happen while we are in the middle of this VM operation
|
|
||||||
ResourceMark rm;
|
|
||||||
DeoptimizationMarker dm;
|
|
||||||
|
|
||||||
// Deoptimize all activations depending on marked nmethods
|
|
||||||
Deoptimization::deoptimize_dependents();
|
|
||||||
|
|
||||||
// Make the dependent methods not entrant
|
|
||||||
make_marked_nmethods_not_entrant();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -214,6 +214,7 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
virtual bool is_in_use() const = 0;
|
virtual bool is_in_use() const = 0;
|
||||||
|
virtual bool is_not_installed() const = 0;
|
||||||
virtual int comp_level() const = 0;
|
virtual int comp_level() const = 0;
|
||||||
virtual int compile_id() const = 0;
|
virtual int compile_id() const = 0;
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,7 @@
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "prims/jvmtiImpl.hpp"
|
#include "prims/jvmtiImpl.hpp"
|
||||||
#include "runtime/atomic.hpp"
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/flags/flagSetting.hpp"
|
#include "runtime/flags/flagSetting.hpp"
|
||||||
#include "runtime/frame.inline.hpp"
|
#include "runtime/frame.inline.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
|
@ -1176,11 +1177,7 @@ void nmethod::make_unloaded() {
|
||||||
// have the Method* live here, in case we unload the nmethod because
|
// have the Method* live here, in case we unload the nmethod because
|
||||||
// it is pointing to some oop (other than the Method*) being unloaded.
|
// it is pointing to some oop (other than the Method*) being unloaded.
|
||||||
if (_method != NULL) {
|
if (_method != NULL) {
|
||||||
// OSR methods point to the Method*, but the Method* does not
|
_method->unlink_code(this);
|
||||||
// point back!
|
|
||||||
if (_method->code() == this) {
|
|
||||||
_method->clear_code(); // Break a cycle
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the class unloaded - i.e., change state and notify sweeper
|
// Make the class unloaded - i.e., change state and notify sweeper
|
||||||
|
@ -1262,16 +1259,9 @@ void nmethod::log_state_change() const {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void nmethod::unlink_from_method(bool acquire_lock) {
|
void nmethod::unlink_from_method() {
|
||||||
// We need to check if both the _code and _from_compiled_code_entry_point
|
if (method() != NULL) {
|
||||||
// refer to this nmethod because there is a race in setting these two fields
|
method()->unlink_code();
|
||||||
// in Method* as seen in bugid 4947125.
|
|
||||||
// If the vep() points to the zombie nmethod, the memory for the nmethod
|
|
||||||
// could be flushed and the compiler and vtable stubs could still call
|
|
||||||
// through it.
|
|
||||||
if (method() != NULL && (method()->code() == this ||
|
|
||||||
method()->from_compiled_entry() == verified_entry_point())) {
|
|
||||||
method()->clear_code(acquire_lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1298,24 +1288,24 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||||
|
|
||||||
// during patching, depending on the nmethod state we must notify the GC that
|
// during patching, depending on the nmethod state we must notify the GC that
|
||||||
// code has been unloaded, unregistering it. We cannot do this right while
|
// code has been unloaded, unregistering it. We cannot do this right while
|
||||||
// holding the Patching_lock because we need to use the CodeCache_lock. This
|
// holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
|
||||||
// would be prone to deadlocks.
|
// would be prone to deadlocks.
|
||||||
// This flag is used to remember whether we need to later lock and unregister.
|
// This flag is used to remember whether we need to later lock and unregister.
|
||||||
bool nmethod_needs_unregister = false;
|
bool nmethod_needs_unregister = false;
|
||||||
|
|
||||||
{
|
// invalidate osr nmethod before acquiring the patching lock since
|
||||||
// invalidate osr nmethod before acquiring the patching lock since
|
// they both acquire leaf locks and we don't want a deadlock.
|
||||||
// they both acquire leaf locks and we don't want a deadlock.
|
// This logic is equivalent to the logic below for patching the
|
||||||
// This logic is equivalent to the logic below for patching the
|
// verified entry point of regular methods. We check that the
|
||||||
// verified entry point of regular methods. We check that the
|
// nmethod is in use to ensure that it is invalidated only once.
|
||||||
// nmethod is in use to ensure that it is invalidated only once.
|
if (is_osr_method() && is_in_use()) {
|
||||||
if (is_osr_method() && is_in_use()) {
|
// this effectively makes the osr nmethod not entrant
|
||||||
// this effectively makes the osr nmethod not entrant
|
invalidate_osr_method();
|
||||||
invalidate_osr_method();
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
{
|
||||||
// Enter critical section. Does not block for safepoint.
|
// Enter critical section. Does not block for safepoint.
|
||||||
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
if (_state == state) {
|
if (_state == state) {
|
||||||
// another thread already performed this transition so nothing
|
// another thread already performed this transition so nothing
|
||||||
|
@ -1359,8 +1349,9 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
|
||||||
log_state_change();
|
log_state_change();
|
||||||
|
|
||||||
// Remove nmethod from method.
|
// Remove nmethod from method.
|
||||||
unlink_from_method(false /* already owns Patching_lock */);
|
unlink_from_method();
|
||||||
} // leave critical region under Patching_lock
|
|
||||||
|
} // leave critical region under CompiledMethod_lock
|
||||||
|
|
||||||
#if INCLUDE_JVMCI
|
#if INCLUDE_JVMCI
|
||||||
// Invalidate can't occur while holding the Patching lock
|
// Invalidate can't occur while holding the Patching lock
|
||||||
|
|
|
@ -119,7 +119,7 @@ class nmethod : public CompiledMethod {
|
||||||
// used by jvmti to track if an unload event has been posted for this nmethod.
|
// used by jvmti to track if an unload event has been posted for this nmethod.
|
||||||
bool _unload_reported;
|
bool _unload_reported;
|
||||||
|
|
||||||
// Protected by Patching_lock
|
// Protected by CompiledMethod_lock
|
||||||
volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
|
volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded}
|
||||||
|
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
@ -387,7 +387,7 @@ class nmethod : public CompiledMethod {
|
||||||
|
|
||||||
int comp_level() const { return _comp_level; }
|
int comp_level() const { return _comp_level; }
|
||||||
|
|
||||||
void unlink_from_method(bool acquire_lock);
|
void unlink_from_method();
|
||||||
|
|
||||||
// Support for oops in scopes and relocs:
|
// Support for oops in scopes and relocs:
|
||||||
// Note: index 0 is reserved for null.
|
// Note: index 0 is reserved for null.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -45,7 +45,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||||
// We don't need to take the lock when unlinking nmethods from
|
// We don't need to take the lock when unlinking nmethods from
|
||||||
// the Method, because it is only concurrently unlinked by
|
// the Method, because it is only concurrently unlinked by
|
||||||
// the entry barrier, which acquires the per nmethod lock.
|
// the entry barrier, which acquires the per nmethod lock.
|
||||||
nm->unlink_from_method(false /* acquire_lock */);
|
nm->unlink_from_method();
|
||||||
|
|
||||||
// We can end up calling nmethods that are unloading
|
// We can end up calling nmethods that are unloading
|
||||||
// since we clear compiled ICs lazily. Returning false
|
// since we clear compiled ICs lazily. Returning false
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -285,7 +285,7 @@ public:
|
||||||
// We don't need to take the lock when unlinking nmethods from
|
// We don't need to take the lock when unlinking nmethods from
|
||||||
// the Method, because it is only concurrently unlinked by
|
// the Method, because it is only concurrently unlinked by
|
||||||
// the entry barrier, which acquires the per nmethod lock.
|
// the entry barrier, which acquires the per nmethod lock.
|
||||||
nm->unlink_from_method(false /* acquire_lock */);
|
nm->unlink_from_method();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include "memory/universe.hpp"
|
#include "memory/universe.hpp"
|
||||||
#include "oops/objArrayKlass.hpp"
|
#include "oops/objArrayKlass.hpp"
|
||||||
#include "oops/typeArrayOop.inline.hpp"
|
#include "oops/typeArrayOop.inline.hpp"
|
||||||
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/jniHandles.inline.hpp"
|
#include "runtime/jniHandles.inline.hpp"
|
||||||
#include "runtime/javaCalls.hpp"
|
#include "runtime/javaCalls.hpp"
|
||||||
#include "jvmci/jniAccessMark.inline.hpp"
|
#include "jvmci/jniAccessMark.inline.hpp"
|
||||||
|
@ -1496,8 +1497,7 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, JVMCI_TRAPS) {
|
||||||
// Invalidating the HotSpotNmethod means we want the nmethod
|
// Invalidating the HotSpotNmethod means we want the nmethod
|
||||||
// to be deoptimized.
|
// to be deoptimized.
|
||||||
nm->mark_for_deoptimization();
|
nm->mark_for_deoptimization();
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A HotSpotNmethod instance can only reference a single nmethod
|
// A HotSpotNmethod instance can only reference a single nmethod
|
||||||
|
|
|
@ -103,7 +103,7 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
|
||||||
// Fix and bury in Method*
|
// Fix and bury in Method*
|
||||||
set_interpreter_entry(NULL); // sets i2i entry and from_int
|
set_interpreter_entry(NULL); // sets i2i entry and from_int
|
||||||
set_adapter_entry(NULL);
|
set_adapter_entry(NULL);
|
||||||
clear_code(false /* don't need a lock */); // from_c/from_i get set to c2i/i2i
|
Method::clear_code(); // from_c/from_i get set to c2i/i2i
|
||||||
|
|
||||||
if (access_flags.is_native()) {
|
if (access_flags.is_native()) {
|
||||||
clear_native_function();
|
clear_native_function();
|
||||||
|
@ -815,7 +815,7 @@ void Method::clear_native_function() {
|
||||||
set_native_function(
|
set_native_function(
|
||||||
SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
|
SharedRuntime::native_method_throw_unsatisfied_link_error_entry(),
|
||||||
!native_bind_event_is_interesting);
|
!native_bind_event_is_interesting);
|
||||||
clear_code();
|
this->unlink_code();
|
||||||
}
|
}
|
||||||
|
|
||||||
address Method::critical_native_function() {
|
address Method::critical_native_function() {
|
||||||
|
@ -938,8 +938,7 @@ void Method::set_not_osr_compilable(int comp_level, bool report, const char* rea
|
||||||
}
|
}
|
||||||
|
|
||||||
// Revert to using the interpreter and clear out the nmethod
|
// Revert to using the interpreter and clear out the nmethod
|
||||||
void Method::clear_code(bool acquire_lock /* = true */) {
|
void Method::clear_code() {
|
||||||
MutexLocker pl(acquire_lock ? Patching_lock : NULL, Mutex::_no_safepoint_check_flag);
|
|
||||||
// this may be NULL if c2i adapters have not been made yet
|
// this may be NULL if c2i adapters have not been made yet
|
||||||
// Only should happen at allocate time.
|
// Only should happen at allocate time.
|
||||||
if (adapter() == NULL) {
|
if (adapter() == NULL) {
|
||||||
|
@ -953,6 +952,25 @@ void Method::clear_code(bool acquire_lock /* = true */) {
|
||||||
_code = NULL;
|
_code = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Method::unlink_code(CompiledMethod *compare) {
|
||||||
|
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
// We need to check if either the _code or _from_compiled_code_entry_point
|
||||||
|
// refer to this nmethod because there is a race in setting these two fields
|
||||||
|
// in Method* as seen in bugid 4947125.
|
||||||
|
// If the vep() points to the zombie nmethod, the memory for the nmethod
|
||||||
|
// could be flushed and the compiler and vtable stubs could still call
|
||||||
|
// through it.
|
||||||
|
if (code() == compare ||
|
||||||
|
from_compiled_entry() == compare->verified_entry_point()) {
|
||||||
|
clear_code();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Method::unlink_code() {
|
||||||
|
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
clear_code();
|
||||||
|
}
|
||||||
|
|
||||||
#if INCLUDE_CDS
|
#if INCLUDE_CDS
|
||||||
// Called by class data sharing to remove any entry points (which are not shared)
|
// Called by class data sharing to remove any entry points (which are not shared)
|
||||||
void Method::unlink_method() {
|
void Method::unlink_method() {
|
||||||
|
@ -1179,7 +1197,7 @@ bool Method::check_code() const {
|
||||||
|
|
||||||
// Install compiled code. Instantly it can execute.
|
// Install compiled code. Instantly it can execute.
|
||||||
void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
|
void Method::set_code(const methodHandle& mh, CompiledMethod *code) {
|
||||||
MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
|
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||||
assert( code, "use clear_code to remove code" );
|
assert( code, "use clear_code to remove code" );
|
||||||
assert( mh->check_code(), "" );
|
assert( mh->check_code(), "" );
|
||||||
|
|
||||||
|
|
|
@ -463,7 +463,17 @@ class Method : public Metadata {
|
||||||
address verified_code_entry();
|
address verified_code_entry();
|
||||||
bool check_code() const; // Not inline to avoid circular ref
|
bool check_code() const; // Not inline to avoid circular ref
|
||||||
CompiledMethod* volatile code() const;
|
CompiledMethod* volatile code() const;
|
||||||
void clear_code(bool acquire_lock = true); // Clear out any compiled code
|
|
||||||
|
// Locks CompiledMethod_lock if not held.
|
||||||
|
void unlink_code(CompiledMethod *compare);
|
||||||
|
// Locks CompiledMethod_lock if not held.
|
||||||
|
void unlink_code();
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Either called with CompiledMethod_lock held or from constructor.
|
||||||
|
void clear_code();
|
||||||
|
|
||||||
|
public:
|
||||||
static void set_code(const methodHandle& mh, CompiledMethod* code);
|
static void set_code(const methodHandle& mh, CompiledMethod* code);
|
||||||
void set_adapter_entry(AdapterHandlerEntry* adapter) {
|
void set_adapter_entry(AdapterHandlerEntry* adapter) {
|
||||||
constMethod()->set_adapter_entry(adapter);
|
constMethod()->set_adapter_entry(adapter);
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include "prims/jvmtiExport.hpp"
|
#include "prims/jvmtiExport.hpp"
|
||||||
#include "prims/jvmtiImpl.hpp"
|
#include "prims/jvmtiImpl.hpp"
|
||||||
#include "prims/jvmtiThreadState.inline.hpp"
|
#include "prims/jvmtiThreadState.inline.hpp"
|
||||||
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/frame.hpp"
|
#include "runtime/frame.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/threadSMR.hpp"
|
#include "runtime/threadSMR.hpp"
|
||||||
|
@ -239,8 +240,7 @@ void VM_EnterInterpOnlyMode::doit() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (num_marked > 0) {
|
if (num_marked > 0) {
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
#include "oops/typeArrayOop.inline.hpp"
|
#include "oops/typeArrayOop.inline.hpp"
|
||||||
#include "prims/methodHandles.hpp"
|
#include "prims/methodHandles.hpp"
|
||||||
#include "runtime/compilationPolicy.hpp"
|
#include "runtime/compilationPolicy.hpp"
|
||||||
|
#include "runtime/deoptimization.hpp"
|
||||||
#include "runtime/fieldDescriptor.inline.hpp"
|
#include "runtime/fieldDescriptor.inline.hpp"
|
||||||
#include "runtime/handles.inline.hpp"
|
#include "runtime/handles.inline.hpp"
|
||||||
#include "runtime/interfaceSupport.inline.hpp"
|
#include "runtime/interfaceSupport.inline.hpp"
|
||||||
|
@ -1109,8 +1110,7 @@ void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
|
||||||
}
|
}
|
||||||
if (marked > 0) {
|
if (marked > 0) {
|
||||||
// At least one nmethod has been marked for deoptimization.
|
// At least one nmethod has been marked for deoptimization.
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1506,8 +1506,7 @@ JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject con
|
||||||
}
|
}
|
||||||
if (marked > 0) {
|
if (marked > 0) {
|
||||||
// At least one nmethod has been marked for deoptimization
|
// At least one nmethod has been marked for deoptimization
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -822,10 +822,8 @@ WB_ENTRY(jint, WB_DeoptimizeFrames(JNIEnv* env, jobject o, jboolean make_not_ent
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
|
WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
|
||||||
MutexLocker mu(Compile_lock);
|
|
||||||
CodeCache::mark_all_nmethods_for_deoptimization();
|
CodeCache::mark_all_nmethods_for_deoptimization();
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
|
WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
|
||||||
|
@ -842,8 +840,7 @@ WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jbool
|
||||||
}
|
}
|
||||||
result += CodeCache::mark_for_deoptimization(mh());
|
result += CodeCache::mark_for_deoptimization(mh());
|
||||||
if (result > 0) {
|
if (result > 0) {
|
||||||
VM_Deoptimize op;
|
Deoptimization::deoptimize_all_marked();
|
||||||
VMThread::execute(&op);
|
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
WB_END
|
WB_END
|
||||||
|
|
|
@ -628,6 +628,29 @@ static void post_class_revocation_event(EventBiasedLockClassRevocation* event, K
|
||||||
event->commit();
|
event->commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BiasedLocking::Condition BiasedLocking::revoke_own_locks_in_handshake(Handle obj, TRAPS) {
|
||||||
|
markOop mark = obj->mark();
|
||||||
|
|
||||||
|
if (!mark->has_bias_pattern()) {
|
||||||
|
return NOT_BIASED;
|
||||||
|
}
|
||||||
|
|
||||||
|
Klass *k = obj->klass();
|
||||||
|
markOop prototype_header = k->prototype_header();
|
||||||
|
assert(mark->biased_locker() == THREAD &&
|
||||||
|
prototype_header->bias_epoch() == mark->bias_epoch(), "Revoke failed, unhandled biased lock state");
|
||||||
|
ResourceMark rm;
|
||||||
|
log_info(biasedlocking)("Revoking bias by walking my own stack:");
|
||||||
|
EventBiasedLockSelfRevocation event;
|
||||||
|
BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
|
||||||
|
((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
|
||||||
|
assert(cond == BIAS_REVOKED, "why not?");
|
||||||
|
if (event.should_commit()) {
|
||||||
|
post_self_revocation_event(&event, k);
|
||||||
|
}
|
||||||
|
return cond;
|
||||||
|
}
|
||||||
|
|
||||||
BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
|
BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
|
||||||
assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
|
assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
|
||||||
|
|
||||||
|
|
|
@ -175,6 +175,7 @@ public:
|
||||||
|
|
||||||
// This should be called by JavaThreads to revoke the bias of an object
|
// This should be called by JavaThreads to revoke the bias of an object
|
||||||
static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS);
|
static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS);
|
||||||
|
static Condition revoke_own_locks_in_handshake(Handle obj, TRAPS);
|
||||||
|
|
||||||
// These do not allow rebiasing; they are used by deoptimization to
|
// These do not allow rebiasing; they are used by deoptimization to
|
||||||
// ensure that monitors on the stack can be migrated
|
// ensure that monitors on the stack can be migrated
|
||||||
|
|
|
@ -776,10 +776,35 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
|
||||||
return bt;
|
return bt;
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
|
class DeoptimizeMarkedTC : public ThreadClosure {
|
||||||
|
bool _in_handshake;
|
||||||
|
public:
|
||||||
|
DeoptimizeMarkedTC(bool in_handshake) : _in_handshake(in_handshake) {}
|
||||||
|
virtual void do_thread(Thread* thread) {
|
||||||
|
assert(thread->is_Java_thread(), "must be");
|
||||||
|
JavaThread* jt = (JavaThread*)thread;
|
||||||
|
jt->deoptimize_marked_methods(_in_handshake);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
int Deoptimization::deoptimize_dependents() {
|
void Deoptimization::deoptimize_all_marked() {
|
||||||
Threads::deoptimized_wrt_marked_nmethods();
|
ResourceMark rm;
|
||||||
return 0;
|
DeoptimizationMarker dm;
|
||||||
|
|
||||||
|
if (SafepointSynchronize::is_at_safepoint()) {
|
||||||
|
DeoptimizeMarkedTC deopt(false);
|
||||||
|
// Make the dependent methods not entrant
|
||||||
|
CodeCache::make_marked_nmethods_not_entrant();
|
||||||
|
Threads::java_threads_do(&deopt);
|
||||||
|
} else {
|
||||||
|
// Make the dependent methods not entrant
|
||||||
|
{
|
||||||
|
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
CodeCache::make_marked_nmethods_not_entrant();
|
||||||
|
}
|
||||||
|
DeoptimizeMarkedTC deopt(true);
|
||||||
|
Handshake::execute(&deopt);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Deoptimization::DeoptAction Deoptimization::_unloaded_action
|
Deoptimization::DeoptAction Deoptimization::_unloaded_action
|
||||||
|
@ -1243,14 +1268,7 @@ static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void get_monitors_from_stack(GrowableArray<Handle>* objects_to_revoke, JavaThread* thread, frame fr, RegisterMap* map) {
|
||||||
void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
|
|
||||||
if (!UseBiasedLocking) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
|
|
||||||
|
|
||||||
// Unfortunately we don't have a RegisterMap available in most of
|
// Unfortunately we don't have a RegisterMap available in most of
|
||||||
// the places we want to call this routine so we need to walk the
|
// the places we want to call this routine so we need to walk the
|
||||||
// stack again to update the register map.
|
// stack again to update the register map.
|
||||||
|
@ -1274,6 +1292,14 @@ void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, Reg
|
||||||
cvf = compiledVFrame::cast(cvf->sender());
|
cvf = compiledVFrame::cast(cvf->sender());
|
||||||
}
|
}
|
||||||
collect_monitors(cvf, objects_to_revoke);
|
collect_monitors(cvf, objects_to_revoke);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Deoptimization::revoke_using_safepoint(JavaThread* thread, frame fr, RegisterMap* map) {
|
||||||
|
if (!UseBiasedLocking) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
|
||||||
|
get_monitors_from_stack(objects_to_revoke, thread, fr, map);
|
||||||
|
|
||||||
if (SafepointSynchronize::is_at_safepoint()) {
|
if (SafepointSynchronize::is_at_safepoint()) {
|
||||||
BiasedLocking::revoke_at_safepoint(objects_to_revoke);
|
BiasedLocking::revoke_at_safepoint(objects_to_revoke);
|
||||||
|
@ -1282,6 +1308,21 @@ void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, Reg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Deoptimization::revoke_using_handshake(JavaThread* thread, frame fr, RegisterMap* map) {
|
||||||
|
if (!UseBiasedLocking) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
|
||||||
|
get_monitors_from_stack(objects_to_revoke, thread, fr, map);
|
||||||
|
|
||||||
|
int len = objects_to_revoke->length();
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
oop obj = (objects_to_revoke->at(i))();
|
||||||
|
BiasedLocking::revoke_own_locks_in_handshake(objects_to_revoke->at(i), thread);
|
||||||
|
assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
|
void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
|
||||||
assert(fr.can_be_deoptimized(), "checking frame type");
|
assert(fr.can_be_deoptimized(), "checking frame type");
|
||||||
|
@ -1310,11 +1351,16 @@ void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deopt
|
||||||
fr.deoptimize(thread);
|
fr.deoptimize(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
|
void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, bool in_handshake) {
|
||||||
deoptimize(thread, fr, map, Reason_constraint);
|
deopt_thread(in_handshake, thread, fr, map, Reason_constraint);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
|
void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
|
||||||
|
deopt_thread(false, thread, fr, map, reason);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Deoptimization::deopt_thread(bool in_handshake, JavaThread* thread,
|
||||||
|
frame fr, RegisterMap *map, DeoptReason reason) {
|
||||||
// Deoptimize only if the frame comes from compile code.
|
// Deoptimize only if the frame comes from compile code.
|
||||||
// Do not deoptimize the frame which is already patched
|
// Do not deoptimize the frame which is already patched
|
||||||
// during the execution of the loops below.
|
// during the execution of the loops below.
|
||||||
|
@ -1324,7 +1370,11 @@ void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map,
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
DeoptimizationMarker dm;
|
DeoptimizationMarker dm;
|
||||||
if (UseBiasedLocking) {
|
if (UseBiasedLocking) {
|
||||||
revoke_biases_of_monitors(thread, fr, map);
|
if (in_handshake) {
|
||||||
|
revoke_using_handshake(thread, fr, map);
|
||||||
|
} else {
|
||||||
|
revoke_using_safepoint(thread, fr, map);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
deoptimize_single_frame(thread, fr, reason);
|
deoptimize_single_frame(thread, fr, reason);
|
||||||
|
|
||||||
|
|
|
@ -135,12 +135,19 @@ class Deoptimization : AllStatic {
|
||||||
Unpack_LIMIT = 4
|
Unpack_LIMIT = 4
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void deoptimize_all_marked();
|
||||||
|
|
||||||
|
private:
|
||||||
// Checks all compiled methods. Invalid methods are deleted and
|
// Checks all compiled methods. Invalid methods are deleted and
|
||||||
// corresponding activations are deoptimized.
|
// corresponding activations are deoptimized.
|
||||||
static int deoptimize_dependents();
|
static int deoptimize_dependents();
|
||||||
|
static void revoke_using_handshake(JavaThread* thread, frame fr, RegisterMap* map);
|
||||||
|
static void revoke_using_safepoint(JavaThread* thread, frame fr, RegisterMap* map);
|
||||||
|
static void deopt_thread(bool in_handshake, JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason);
|
||||||
|
|
||||||
|
public:
|
||||||
// Deoptimizes a frame lazily. nmethod gets patched deopt happens on return to the frame
|
// Deoptimizes a frame lazily. nmethod gets patched deopt happens on return to the frame
|
||||||
static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
|
static void deoptimize(JavaThread* thread, frame fr, RegisterMap *map, bool in_handshake = false);
|
||||||
static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason);
|
static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason);
|
||||||
|
|
||||||
#if INCLUDE_JVMCI
|
#if INCLUDE_JVMCI
|
||||||
|
@ -153,7 +160,9 @@ class Deoptimization : AllStatic {
|
||||||
|
|
||||||
// Helper function to revoke biases of all monitors in frame if UseBiasedLocking
|
// Helper function to revoke biases of all monitors in frame if UseBiasedLocking
|
||||||
// is enabled
|
// is enabled
|
||||||
static void revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map);
|
static void revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
|
||||||
|
revoke_using_safepoint(thread, fr, map);
|
||||||
|
}
|
||||||
|
|
||||||
#if COMPILER2_OR_JVMCI
|
#if COMPILER2_OR_JVMCI
|
||||||
JVMCI_ONLY(public:)
|
JVMCI_ONLY(public:)
|
||||||
|
|
|
@ -62,7 +62,7 @@ class Monitor : public CHeapObj<mtSynchronizer> {
|
||||||
event,
|
event,
|
||||||
access = event + 1,
|
access = event + 1,
|
||||||
tty = access + 2,
|
tty = access + 2,
|
||||||
special = tty + 1,
|
special = tty + 2,
|
||||||
suspend_resume = special + 1,
|
suspend_resume = special + 1,
|
||||||
vmweak = suspend_resume + 2,
|
vmweak = suspend_resume + 2,
|
||||||
leaf = vmweak + 2,
|
leaf = vmweak + 2,
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
// Consider using GCC's __read_mostly.
|
// Consider using GCC's __read_mostly.
|
||||||
|
|
||||||
Mutex* Patching_lock = NULL;
|
Mutex* Patching_lock = NULL;
|
||||||
|
Mutex* CompiledMethod_lock = NULL;
|
||||||
Monitor* SystemDictionary_lock = NULL;
|
Monitor* SystemDictionary_lock = NULL;
|
||||||
Mutex* ProtectionDomainSet_lock = NULL;
|
Mutex* ProtectionDomainSet_lock = NULL;
|
||||||
Mutex* SharedDictionary_lock = NULL;
|
Mutex* SharedDictionary_lock = NULL;
|
||||||
|
@ -261,6 +262,8 @@ void mutex_init() {
|
||||||
def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);
|
def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);
|
||||||
|
|
||||||
def(Patching_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching.
|
def(Patching_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching.
|
||||||
|
def(OsrList_lock , PaddedMutex , special-1, true, Monitor::_safepoint_check_never);
|
||||||
|
def(CompiledMethod_lock , PaddedMutex , special-1, true, Monitor::_safepoint_check_never);
|
||||||
def(Service_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // used for service thread operations
|
def(Service_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // used for service thread operations
|
||||||
def(JmethodIdCreation_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for creating jmethodIDs.
|
def(JmethodIdCreation_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for creating jmethodIDs.
|
||||||
|
|
||||||
|
@ -276,7 +279,6 @@ void mutex_init() {
|
||||||
def(SymbolArena_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_never);
|
def(SymbolArena_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_never);
|
||||||
def(ProfilePrint_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // serial profile printing
|
def(ProfilePrint_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // serial profile printing
|
||||||
def(ExceptionCache_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // serial profile printing
|
def(ExceptionCache_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // serial profile printing
|
||||||
def(OsrList_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
|
|
||||||
def(Debug1_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
|
def(Debug1_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
def(FullGCALot_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // a lock to make FullGCALot MT safe
|
def(FullGCALot_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always); // a lock to make FullGCALot MT safe
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
// Mutexes used in the VM.
|
// Mutexes used in the VM.
|
||||||
|
|
||||||
extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code
|
extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code
|
||||||
|
extern Mutex* CompiledMethod_lock; // a lock used to guard a compiled method
|
||||||
extern Monitor* SystemDictionary_lock; // a lock on the system dictionary
|
extern Monitor* SystemDictionary_lock; // a lock on the system dictionary
|
||||||
extern Mutex* ProtectionDomainSet_lock; // a lock on the pd_set list in the system dictionary
|
extern Mutex* ProtectionDomainSet_lock; // a lock on the pd_set list in the system dictionary
|
||||||
extern Mutex* SharedDictionary_lock; // a lock on the CDS shared dictionary
|
extern Mutex* SharedDictionary_lock; // a lock on the CDS shared dictionary
|
||||||
|
|
|
@ -2832,18 +2832,17 @@ void JavaThread::make_zombies() {
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
|
|
||||||
|
|
||||||
void JavaThread::deoptimized_wrt_marked_nmethods() {
|
void JavaThread::deoptimize_marked_methods(bool in_handshake) {
|
||||||
if (!has_last_Java_frame()) return;
|
if (!has_last_Java_frame()) return;
|
||||||
// BiasedLocking needs an updated RegisterMap for the revoke monitors pass
|
// BiasedLocking needs an updated RegisterMap for the revoke monitors pass
|
||||||
StackFrameStream fst(this, UseBiasedLocking);
|
StackFrameStream fst(this, UseBiasedLocking);
|
||||||
for (; !fst.is_done(); fst.next()) {
|
for (; !fst.is_done(); fst.next()) {
|
||||||
if (fst.current()->should_be_deoptimized()) {
|
if (fst.current()->should_be_deoptimized()) {
|
||||||
Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
|
Deoptimization::deoptimize(this, *fst.current(), fst.register_map(), in_handshake);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// If the caller is a NamedThread, then remember, in the current scope,
|
// If the caller is a NamedThread, then remember, in the current scope,
|
||||||
// the given JavaThread in its _processed_thread field.
|
// the given JavaThread in its _processed_thread field.
|
||||||
class RememberProcessedThread: public StackObj {
|
class RememberProcessedThread: public StackObj {
|
||||||
|
@ -4578,13 +4577,6 @@ void Threads::metadata_handles_do(void f(Metadata*)) {
|
||||||
threads_do(&handles_closure);
|
threads_do(&handles_closure);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Threads::deoptimized_wrt_marked_nmethods() {
|
|
||||||
ALL_JAVA_THREADS(p) {
|
|
||||||
p->deoptimized_wrt_marked_nmethods();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Get count Java threads that are waiting to enter the specified monitor.
|
// Get count Java threads that are waiting to enter the specified monitor.
|
||||||
GrowableArray<JavaThread*>* Threads::get_pending_threads(ThreadsList * t_list,
|
GrowableArray<JavaThread*>* Threads::get_pending_threads(ThreadsList * t_list,
|
||||||
int count,
|
int count,
|
||||||
|
|
|
@ -1918,7 +1918,7 @@ class JavaThread: public Thread {
|
||||||
void deoptimize();
|
void deoptimize();
|
||||||
void make_zombies();
|
void make_zombies();
|
||||||
|
|
||||||
void deoptimized_wrt_marked_nmethods();
|
void deoptimize_marked_methods(bool in_handshake);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Returns the running thread as a JavaThread
|
// Returns the running thread as a JavaThread
|
||||||
|
|
|
@ -118,18 +118,6 @@ void VM_ClearICs::doit() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void VM_Deoptimize::doit() {
|
|
||||||
// We do not want any GCs to happen while we are in the middle of this VM operation
|
|
||||||
ResourceMark rm;
|
|
||||||
DeoptimizationMarker dm;
|
|
||||||
|
|
||||||
// Deoptimize all activations depending on marked nmethods
|
|
||||||
Deoptimization::deoptimize_dependents();
|
|
||||||
|
|
||||||
// Make the dependent methods not entrant
|
|
||||||
CodeCache::make_marked_nmethods_not_entrant();
|
|
||||||
}
|
|
||||||
|
|
||||||
void VM_MarkActiveNMethods::doit() {
|
void VM_MarkActiveNMethods::doit() {
|
||||||
NMethodSweeper::mark_active_nmethods();
|
NMethodSweeper::mark_active_nmethods();
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,6 @@
|
||||||
template(ClearICs) \
|
template(ClearICs) \
|
||||||
template(ForceSafepoint) \
|
template(ForceSafepoint) \
|
||||||
template(ForceAsyncSafepoint) \
|
template(ForceAsyncSafepoint) \
|
||||||
template(Deoptimize) \
|
|
||||||
template(DeoptimizeFrame) \
|
template(DeoptimizeFrame) \
|
||||||
template(DeoptimizeAll) \
|
template(DeoptimizeAll) \
|
||||||
template(ZombieAll) \
|
template(ZombieAll) \
|
||||||
|
@ -318,14 +317,6 @@ class VM_GTestExecuteAtSafepoint: public VM_Operation {
|
||||||
VM_GTestExecuteAtSafepoint() {}
|
VM_GTestExecuteAtSafepoint() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
class VM_Deoptimize: public VM_Operation {
|
|
||||||
public:
|
|
||||||
VM_Deoptimize() {}
|
|
||||||
VMOp_Type type() const { return VMOp_Deoptimize; }
|
|
||||||
void doit();
|
|
||||||
bool allow_nested_vm_operations() const { return true; }
|
|
||||||
};
|
|
||||||
|
|
||||||
class VM_MarkActiveNMethods: public VM_Operation {
|
class VM_MarkActiveNMethods: public VM_Operation {
|
||||||
public:
|
public:
|
||||||
VM_MarkActiveNMethods() {}
|
VM_MarkActiveNMethods() {}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -33,23 +33,6 @@
|
||||||
|
|
||||||
#ifdef SOLARIS
|
#ifdef SOLARIS
|
||||||
|
|
||||||
class VM_DeoptimizeTheWorld : public VM_Operation {
|
|
||||||
public:
|
|
||||||
VMOp_Type type() const {
|
|
||||||
return VMOp_DeoptimizeTheWorld;
|
|
||||||
}
|
|
||||||
void doit() {
|
|
||||||
CodeCache::mark_all_nmethods_for_deoptimization();
|
|
||||||
ResourceMark rm;
|
|
||||||
DeoptimizationMarker dm;
|
|
||||||
// Deoptimize all activations depending on marked methods
|
|
||||||
Deoptimization::deoptimize_dependents();
|
|
||||||
|
|
||||||
// Mark the dependent methods non entrant
|
|
||||||
CodeCache::make_marked_nmethods_not_entrant();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static void set_bool_flag(const char* flag, bool value) {
|
static void set_bool_flag(const char* flag, bool value) {
|
||||||
JVMFlag::boolAtPut((char*)flag, strlen(flag), &value,
|
JVMFlag::boolAtPut((char*)flag, strlen(flag), &value,
|
||||||
JVMFlag::ATTACH_ON_DEMAND);
|
JVMFlag::ATTACH_ON_DEMAND);
|
||||||
|
@ -74,8 +57,8 @@ void DTrace::enable_dprobes(int probes) {
|
||||||
|
|
||||||
if (changed) {
|
if (changed) {
|
||||||
// one or more flags changed, need to deoptimize
|
// one or more flags changed, need to deoptimize
|
||||||
VM_DeoptimizeTheWorld op;
|
CodeCache::mark_all_nmethods_for_deoptimization();
|
||||||
VMThread::execute(&op);
|
Deoptimization::deoptimize_all_marked();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,8 +80,8 @@ void DTrace::disable_dprobes(int probes) {
|
||||||
}
|
}
|
||||||
if (changed) {
|
if (changed) {
|
||||||
// one or more flags changed, need to deoptimize
|
// one or more flags changed, need to deoptimize
|
||||||
VM_DeoptimizeTheWorld op;
|
CodeCache::mark_all_nmethods_for_deoptimization();
|
||||||
VMThread::execute(&op);
|
Deoptimization::deoptimize_all_marked();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test UnexpectedDeoptimizationAllTest
|
||||||
|
* @key stress
|
||||||
|
* @summary stressing code cache by forcing unexpected deoptimizations of all methods
|
||||||
|
* @library /test/lib /
|
||||||
|
* @modules java.base/jdk.internal.misc
|
||||||
|
* java.management
|
||||||
|
*
|
||||||
|
* @build sun.hotspot.WhiteBox compiler.codecache.stress.Helper compiler.codecache.stress.TestCaseImpl
|
||||||
|
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
|
||||||
|
* -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
|
||||||
|
* -XX:-SegmentedCodeCache
|
||||||
|
* compiler.codecache.stress.UnexpectedDeoptimizationAllTest
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||||
|
* -XX:+WhiteBoxAPI -XX:-DeoptimizeRandom
|
||||||
|
* -XX:CompileCommand=dontinline,compiler.codecache.stress.Helper$TestCase::method
|
||||||
|
* -XX:+SegmentedCodeCache
|
||||||
|
* compiler.codecache.stress.UnexpectedDeoptimizationAllTest
|
||||||
|
*/
|
||||||
|
|
||||||
|
package compiler.codecache.stress;
|
||||||
|
|
||||||
|
public class UnexpectedDeoptimizationAllTest implements Runnable {
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
new CodeCacheStressRunner(new UnexpectedDeoptimizationAllTest()).runTest();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
Helper.WHITE_BOX.deoptimizeAll();
|
||||||
|
try {
|
||||||
|
Thread.sleep(10);
|
||||||
|
} catch (Exception e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue