8300926: Several startup regressions ~6-70% in 21-b6 all platforms

Reviewed-by: eosterlund, dcubed, coleenp
This commit is contained in:
Robbin Ehn 2023-03-13 09:34:00 +00:00
parent 31e1e3975b
commit c183fce954
24 changed files with 340 additions and 234 deletions

View file

@ -209,10 +209,8 @@ void LambdaFormInvokers::regenerate_class(char* class_name, ClassFileStream& st,
assert(result->java_mirror() != nullptr, "must be"); assert(result->java_mirror() != nullptr, "must be");
add_regenerated_class(result->java_mirror()); add_regenerated_class(result->java_mirror());
{ SystemDictionary::add_to_hierarchy(THREAD, result);
MutexLocker mu_r(THREAD, Compile_lock); // add_to_hierarchy asserts this.
SystemDictionary::add_to_hierarchy(result);
}
// new class not linked yet. // new class not linked yet.
MetaspaceShared::try_link_class(THREAD, result); MetaspaceShared::try_link_class(THREAD, result);
assert(!HAS_PENDING_EXCEPTION, "Invariant"); assert(!HAS_PENDING_EXCEPTION, "Invariant");

View file

@ -70,6 +70,7 @@
#include "prims/jvmtiExport.hpp" #include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp" #include "prims/methodHandles.hpp"
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "runtime/javaCalls.hpp" #include "runtime/javaCalls.hpp"
@ -899,12 +900,9 @@ InstanceKlass* SystemDictionary::resolve_hidden_class_from_stream(
k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror())); k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror()));
} }
{
MutexLocker mu_r(THREAD, Compile_lock);
// Add to class hierarchy, and do possible deoptimizations. // Add to class hierarchy, and do possible deoptimizations.
add_to_hierarchy(k); add_to_hierarchy(THREAD, k);
// But, do not add to dictionary. // But, do not add to dictionary.
}
k->link_class(CHECK_NULL); k->link_class(CHECK_NULL);
@ -1489,13 +1487,11 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load
JavaCalls::call(&result, m, &args, CHECK); JavaCalls::call(&result, m, &args, CHECK);
} }
// Add the new class. We need recompile lock during update of CHA. // Add to class hierarchy, and do possible deoptimizations.
add_to_hierarchy(THREAD, k);
{ {
MutexLocker mu_r(THREAD, Compile_lock); MutexLocker mu_r(THREAD, Compile_lock);
// Add to class hierarchy, and do possible deoptimizations.
add_to_hierarchy(k);
// Add to systemDictionary - so other classes can see it. // Add to systemDictionary - so other classes can see it.
// Grabs and releases SystemDictionary_lock // Grabs and releases SystemDictionary_lock
update_dictionary(THREAD, k, loader_data); update_dictionary(THREAD, k, loader_data);
@ -1612,15 +1608,24 @@ InstanceKlass* SystemDictionary::find_or_define_instance_class(Symbol* class_nam
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Update hierarchy. This is done before the new klass has been added to the SystemDictionary. The Compile_lock // Update hierarchy. This is done before the new klass has been added to the SystemDictionary. The Compile_lock
// is held, to ensure that the compiler is not using the class hierarchy, and that deoptimization will kick in // is grabbed, to ensure that the compiler is not using the class hierarchy.
// before a new class is used.
void SystemDictionary::add_to_hierarchy(InstanceKlass* k) { void SystemDictionary::add_to_hierarchy(JavaThread* current, InstanceKlass* k) {
assert(k != nullptr, "just checking"); assert(k != nullptr, "just checking");
if (Universe::is_fully_initialized()) { assert(!SafepointSynchronize::is_at_safepoint(), "must NOT be at safepoint");
assert_locked_or_safepoint(Compile_lock);
// In case we are not using CHA based vtables we need to make sure the loaded
// deopt is completed before anyone links this class.
// Linking is done with _init_monitor held, by loading and deopting with it
// held we make sure the deopt is completed before linking.
if (!UseVtableBasedCHA) {
k->init_monitor()->lock();
} }
DeoptimizationScope deopt_scope;
{
MutexLocker ml(current, Compile_lock);
k->set_init_state(InstanceKlass::loaded); k->set_init_state(InstanceKlass::loaded);
// make sure init_state store is already done. // make sure init_state store is already done.
// The compiler reads the hierarchy outside of the Compile_lock. // The compiler reads the hierarchy outside of the Compile_lock.
@ -1630,10 +1635,17 @@ void SystemDictionary::add_to_hierarchy(InstanceKlass* k) {
k->append_to_sibling_list(); // add to superklass/sibling list k->append_to_sibling_list(); // add to superklass/sibling list
k->process_interfaces(); // handle all "implements" declarations k->process_interfaces(); // handle all "implements" declarations
// Now flush all code that depended on old class hierarchy. // Now mark all code that depended on old class hierarchy.
// Note: must be done *after* linking k into the hierarchy (was bug 12/9/97) // Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
if (Universe::is_fully_initialized()) { if (Universe::is_fully_initialized()) {
CodeCache::flush_dependents_on(k); CodeCache::mark_dependents_on(&deopt_scope, k);
}
}
// Perform the deopt handshake outside Compile_lock.
deopt_scope.deoptimize_marked();
if (!UseVtableBasedCHA) {
k->init_monitor()->unlock();
} }
} }

View file

@ -366,8 +366,8 @@ public:
// Return Symbol or throw exception if name given is can not be a valid Symbol. // Return Symbol or throw exception if name given is can not be a valid Symbol.
static Symbol* class_name_symbol(const char* name, Symbol* exception, TRAPS); static Symbol* class_name_symbol(const char* name, Symbol* exception, TRAPS);
// Setup link to hierarchy // Setup link to hierarchy and deoptimize
static void add_to_hierarchy(InstanceKlass* k); static void add_to_hierarchy(JavaThread* current, InstanceKlass* k);
protected: protected:
// Basic find on loaded classes // Basic find on loaded classes

View file

@ -850,13 +850,11 @@ InstanceKlass* SystemDictionaryShared::prepare_shared_lambda_proxy_class(Instanc
assert(nest_host == shared_nest_host, "mismatched nest host"); assert(nest_host == shared_nest_host, "mismatched nest host");
EventClassLoad class_load_start_event; EventClassLoad class_load_start_event;
{
MutexLocker mu_r(THREAD, Compile_lock);
// Add to class hierarchy, and do possible deoptimizations. // Add to class hierarchy, and do possible deoptimizations.
SystemDictionary::add_to_hierarchy(loaded_lambda); SystemDictionary::add_to_hierarchy(THREAD, loaded_lambda);
// But, do not add to dictionary. // But, do not add to dictionary.
}
loaded_lambda->link_class(CHECK_NULL); loaded_lambda->link_class(CHECK_NULL);
// notify jvmti // notify jvmti
if (JvmtiExport::should_post_class_load()) { if (JvmtiExport::should_post_class_load()) {

View file

@ -249,7 +249,7 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load
SystemDictionary::load_shared_class_misc(klass, loader_data); SystemDictionary::load_shared_class_misc(klass, loader_data);
Dictionary* dictionary = loader_data->dictionary(); Dictionary* dictionary = loader_data->dictionary();
dictionary->add_klass(THREAD, klass->name(), klass); dictionary->add_klass(THREAD, klass->name(), klass);
SystemDictionary::add_to_hierarchy(klass); SystemDictionary::add_to_hierarchy(THREAD, klass);
assert(klass->is_loaded(), "Must be in at least loaded state"); assert(klass->is_loaded(), "Must be in at least loaded state");
} }

View file

@ -1243,9 +1243,8 @@ void CodeCache::cleanup_inline_caches_whitebox() {
// Keeps track of time spent for checking dependencies // Keeps track of time spent for checking dependencies
NOT_PRODUCT(static elapsedTimer dependentCheckTime;) NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0;
// search the hierarchy looking for nmethods which are affected by the loading of this class // search the hierarchy looking for nmethods which are affected by the loading of this class
@ -1257,7 +1256,7 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
NoSafepointVerifier nsv; NoSafepointVerifier nsv;
for (DepChange::ContextStream str(changes, nsv); str.next(); ) { for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
Klass* d = str.klass(); Klass* d = str.klass();
number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); InstanceKlass::cast(d)->mark_dependent_nmethods(deopt_scope, changes);
} }
#ifndef PRODUCT #ifndef PRODUCT
@ -1269,8 +1268,6 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
dependentCheckTime.stop(); dependentCheckTime.stop();
} }
#endif #endif
return number_of_marked_CodeBlobs;
} }
CompiledMethod* CodeCache::find_compiled(void* start) { CompiledMethod* CodeCache::find_compiled(void* start) {
@ -1325,13 +1322,12 @@ void CodeCache::old_nmethods_do(MetadataClosure* f) {
} }
// Walk compiled methods and mark dependent methods for deoptimization. // Walk compiled methods and mark dependent methods for deoptimization.
int CodeCache::mark_dependents_for_evol_deoptimization() { void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
// Each redefinition creates a new set of nmethods that have references to "old" Methods // Each redefinition creates a new set of nmethods that have references to "old" Methods
// So delete old method table and create a new one. // So delete old method table and create a new one.
reset_old_method_table(); reset_old_method_table();
int number_of_marked_CodeBlobs = 0;
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
while(iter.next()) { while(iter.next()) {
CompiledMethod* nm = iter.method(); CompiledMethod* nm = iter.method();
@ -1339,25 +1335,20 @@ int CodeCache::mark_dependents_for_evol_deoptimization() {
// This includes methods whose inline caches point to old methods, so // This includes methods whose inline caches point to old methods, so
// inline cache clearing is unnecessary. // inline cache clearing is unnecessary.
if (nm->has_evol_metadata()) { if (nm->has_evol_metadata()) {
nm->mark_for_deoptimization(); deopt_scope->mark(nm);
add_to_old_table(nm); add_to_old_table(nm);
number_of_marked_CodeBlobs++; }
} }
} }
// return total count of nmethods marked for deoptimization, if zero the caller void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
// can skip deoptimization
return number_of_marked_CodeBlobs;
}
void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
while(iter.next()) { while(iter.next()) {
CompiledMethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (!nm->method()->is_method_handle_intrinsic()) { if (!nm->method()->is_method_handle_intrinsic()) {
if (nm->can_be_deoptimized()) { if (nm->can_be_deoptimized()) {
nm->mark_for_deoptimization(); deopt_scope->mark(nm);
} }
if (nm->has_evol_metadata()) { if (nm->has_evol_metadata()) {
add_to_old_table(nm); add_to_old_table(nm);
@ -1366,48 +1357,30 @@ void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
} }
} }
// Flushes compiled methods dependent on redefined classes, that have already been
// marked for deoptimization.
void CodeCache::flush_evol_dependents() {
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
// CodeCache can only be updated by a thread_in_VM and they will all be
// stopped during the safepoint so CodeCache will be safe to update without
// holding the CodeCache_lock.
// At least one nmethod has been marked for deoptimization
Deoptimization::deoptimize_all_marked();
}
#endif // INCLUDE_JVMTI #endif // INCLUDE_JVMTI
// Mark methods for deopt (if safe or possible). // Mark methods for deopt (if safe or possible).
void CodeCache::mark_all_nmethods_for_deoptimization() { void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
while(iter.next()) { while(iter.next()) {
CompiledMethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (!nm->is_native_method()) { if (!nm->is_native_method()) {
nm->mark_for_deoptimization(); deopt_scope->mark(nm);
} }
} }
} }
int CodeCache::mark_for_deoptimization(Method* dependee) { void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0;
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
while(iter.next()) { while(iter.next()) {
CompiledMethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (nm->is_dependent_on_method(dependee)) { if (nm->is_dependent_on_method(dependee)) {
ResourceMark rm; deopt_scope->mark(nm);
nm->mark_for_deoptimization();
number_of_marked_CodeBlobs++;
} }
} }
return number_of_marked_CodeBlobs;
} }
void CodeCache::make_marked_nmethods_deoptimized() { void CodeCache::make_marked_nmethods_deoptimized() {
@ -1416,51 +1389,38 @@ void CodeCache::make_marked_nmethods_deoptimized() {
CompiledMethod* nm = iter.method(); CompiledMethod* nm = iter.method();
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
nm->make_not_entrant(); nm->make_not_entrant();
make_nmethod_deoptimized(nm);
}
}
}
void CodeCache::make_nmethod_deoptimized(CompiledMethod* nm) {
if (nm->is_marked_for_deoptimization() && nm->can_be_deoptimized()) {
nm->make_deoptimized(); nm->make_deoptimized();
} }
} }
}
// Flushes compiled methods dependent on dependee. // Marks compiled methods dependent on dependee.
void CodeCache::flush_dependents_on(InstanceKlass* dependee) { void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
assert_lock_strong(Compile_lock); assert_lock_strong(Compile_lock);
if (!has_nmethods_with_dependencies()) { if (!has_nmethods_with_dependencies()) {
return; return;
} }
int marked = 0;
if (dependee->is_linked()) { if (dependee->is_linked()) {
// Class initialization state change. // Class initialization state change.
KlassInitDepChange changes(dependee); KlassInitDepChange changes(dependee);
marked = mark_for_deoptimization(changes); mark_for_deoptimization(deopt_scope, changes);
} else { } else {
// New class is loaded. // New class is loaded.
NewKlassDepChange changes(dependee); NewKlassDepChange changes(dependee);
marked = mark_for_deoptimization(changes); mark_for_deoptimization(deopt_scope, changes);
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization
Deoptimization::deoptimize_all_marked();
} }
} }
// Flushes compiled methods dependent on dependee // Marks compiled methods dependent on dependee
void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
// --- Compile_lock is not held. However we are at a safepoint. assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert_locked_or_safepoint(Compile_lock);
DeoptimizationScope deopt_scope;
// Compute the dependent nmethods // Compute the dependent nmethods
if (mark_for_deoptimization(m_h()) > 0) { mark_for_deoptimization(&deopt_scope, m_h());
Deoptimization::deoptimize_all_marked(); deopt_scope.deoptimize_marked();
}
} }
void CodeCache::verify() { void CodeCache::verify() {

View file

@ -78,6 +78,7 @@ class KlassDepChange;
class OopClosure; class OopClosure;
class ShenandoahParallelCodeHeapIterator; class ShenandoahParallelCodeHeapIterator;
class NativePostCallNop; class NativePostCallNop;
class DeoptimizationScope;
class CodeCache : AllStatic { class CodeCache : AllStatic {
friend class VMStructs; friend class VMStructs;
@ -301,27 +302,25 @@ class CodeCache : AllStatic {
// Deoptimization // Deoptimization
private: private:
static int mark_for_deoptimization(KlassDepChange& changes); static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
public: public:
static void mark_all_nmethods_for_deoptimization(); static void mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope);
static int mark_for_deoptimization(Method* dependee); static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee);
static void make_marked_nmethods_deoptimized(); static void make_marked_nmethods_deoptimized();
static void make_nmethod_deoptimized(CompiledMethod* nm);
// Flushing and deoptimization // Marks dependents during classloading
static void flush_dependents_on(InstanceKlass* dependee); static void mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee);
// RedefineClasses support // RedefineClasses support
// Flushing and deoptimization in case of evolution // Marks in case of evolution
static int mark_dependents_for_evol_deoptimization(); static void mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
static void mark_all_nmethods_for_evol_deoptimization(); static void mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
static void flush_evol_dependents();
static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN; static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN;
static void unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN; static void unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN;
// Support for fullspeed debugging // Support for fullspeed debugging
static void flush_dependents_on_method(const methodHandle& dependee); static void mark_dependents_on_method_for_breakpoint(const methodHandle& dependee);
// tells if there are nmethods with dependencies // tells if there are nmethods with dependencies
static bool has_nmethods_with_dependencies(); static bool has_nmethods_with_dependencies();

View file

@ -54,7 +54,8 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty
int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
bool caller_must_gc_arguments, bool compiled) bool caller_must_gc_arguments, bool compiled)
: CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
_mark_for_deoptimization_status(not_marked), _deoptimization_status(not_marked),
_deoptimization_generation(0),
_method(method), _method(method),
_gc_data(nullptr) _gc_data(nullptr)
{ {
@ -66,7 +67,8 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty
OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled)
: CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
_mark_for_deoptimization_status(not_marked), _deoptimization_status(not_marked),
_deoptimization_generation(0),
_method(method), _method(method),
_gc_data(nullptr) _gc_data(nullptr)
{ {
@ -113,12 +115,10 @@ const char* CompiledMethod::state() const {
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) { void CompiledMethod::set_deoptimized_done() {
// assert(can_be_deoptimized(), ""); // in some places we check before marking, in others not. MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, if (_deoptimization_status != deoptimize_done) { // can't go backwards
Mutex::_no_safepoint_check_flag); Atomic::store(&_deoptimization_status, deoptimize_done);
if (_mark_for_deoptimization_status != deoptimize_done) { // can't go backwards
_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
} }
} }

View file

@ -140,17 +140,19 @@ public:
class CompiledMethod : public CodeBlob { class CompiledMethod : public CodeBlob {
friend class VMStructs; friend class VMStructs;
friend class DeoptimizationScope;
void init_defaults(); void init_defaults();
protected: protected:
enum MarkForDeoptimizationStatus : u1 { enum DeoptimizationStatus : u1 {
not_marked, not_marked,
deoptimize, deoptimize,
deoptimize_noupdate, deoptimize_noupdate,
deoptimize_done deoptimize_done
}; };
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
// Used to track in which deoptimize handshake this method will be deoptimized.
uint64_t _deoptimization_generation;
// set during construction // set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access. unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
@ -174,6 +176,11 @@ protected:
virtual void flush() = 0; virtual void flush() = 0;
private:
DeoptimizationStatus deoptimization_status() const {
return Atomic::load(&_deoptimization_status);
}
protected: protected:
CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
@ -236,11 +243,9 @@ public:
bool is_at_poll_return(address pc); bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc); bool is_at_poll_or_poll_return(address pc);
bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
void mark_for_deoptimization(bool inc_recompile_counts = true); bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
void set_deoptimized_done();
bool has_been_deoptimized() const { return _mark_for_deoptimization_status == deoptimize_done; }
void mark_deoptimized() { _mark_for_deoptimization_status = deoptimize_done; }
virtual void make_deoptimized() { assert(false, "not supported"); }; virtual void make_deoptimized() { assert(false, "not supported"); };
@ -248,8 +253,8 @@ public:
// Update recompile counts when either the update is explicitly requested (deoptimize) // Update recompile counts when either the update is explicitly requested (deoptimize)
// or the nmethod is not marked for deoptimization at all (not_marked). // or the nmethod is not marked for deoptimization at all (not_marked).
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant. // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
return _mark_for_deoptimization_status != deoptimize_noupdate && DeoptimizationStatus status = deoptimization_status();
_mark_for_deoptimization_status != deoptimize_done; return status != deoptimize_noupdate && status != deoptimize_done;
} }
// tells whether frames described by this nmethod can be deoptimized // tells whether frames described by this nmethod can be deoptimized

View file

@ -682,8 +682,6 @@ class DepChange : public StackObj {
virtual bool is_klass_init_change() const { return false; } virtual bool is_klass_init_change() const { return false; }
virtual bool is_call_site_change() const { return false; } virtual bool is_call_site_change() const { return false; }
virtual void mark_for_deoptimization(nmethod* nm) = 0;
// Subclass casting with assertions. // Subclass casting with assertions.
KlassDepChange* as_klass_change() { KlassDepChange* as_klass_change() {
assert(is_klass_change(), "bad cast"); assert(is_klass_change(), "bad cast");
@ -780,10 +778,6 @@ class KlassDepChange : public DepChange {
// What kind of DepChange is this? // What kind of DepChange is this?
virtual bool is_klass_change() const { return true; } virtual bool is_klass_change() const { return true; }
virtual void mark_for_deoptimization(nmethod* nm) {
nm->mark_for_deoptimization(/*inc_recompile_counts=*/true);
}
InstanceKlass* type() { return _type; } InstanceKlass* type() { return _type; }
// involves_context(k) is true if k == _type or any of its super types // involves_context(k) is true if k == _type or any of its super types
@ -822,10 +816,6 @@ class CallSiteDepChange : public DepChange {
// What kind of DepChange is this? // What kind of DepChange is this?
virtual bool is_call_site_change() const { return true; } virtual bool is_call_site_change() const { return true; }
virtual void mark_for_deoptimization(nmethod* nm) {
nm->mark_for_deoptimization(/*inc_recompile_counts=*/false);
}
oop call_site() const { return _call_site(); } oop call_site() const { return _call_site(); }
oop method_handle() const { return _method_handle(); } oop method_handle() const { return _method_handle(); }
}; };

View file

@ -28,6 +28,7 @@
#include "code/dependencyContext.hpp" #include "code/dependencyContext.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp" #include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "runtime/perfData.hpp" #include "runtime/perfData.hpp"
@ -62,17 +63,14 @@ void DependencyContext::init() {
// //
// Walk the list of dependent nmethods searching for nmethods which // Walk the list of dependent nmethods searching for nmethods which
// are dependent on the changes that were passed in and mark them for // are dependent on the changes that were passed in and mark them for
// deoptimization. Returns the number of nmethods found. // deoptimization.
// //
int DependencyContext::mark_dependent_nmethods(DepChange& changes) { void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) {
int found = 0;
for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
nmethod* nm = b->get_nmethod(); nmethod* nm = b->get_nmethod();
if (b->count() > 0) { if (b->count() > 0) {
if (nm->is_marked_for_deoptimization()) { if (nm->is_marked_for_deoptimization()) {
// Also count already (concurrently) marked nmethods to make sure deopt_scope->dependent(nm);
// deoptimization is triggered before execution in this thread continues.
found++;
} else if (nm->check_dependency_on(changes)) { } else if (nm->check_dependency_on(changes)) {
if (TraceDependencies) { if (TraceDependencies) {
ResourceMark rm; ResourceMark rm;
@ -81,12 +79,10 @@ int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
nm->print(); nm->print();
nm->print_dependencies(); nm->print_dependencies();
} }
changes.mark_for_deoptimization(nm); deopt_scope->mark(nm, !changes.is_call_site_change());
found++;
} }
} }
} }
return found;
} }
// //
@ -189,21 +185,18 @@ void DependencyContext::remove_all_dependents() {
assert(b == nullptr, "All dependents should be unloading"); assert(b == nullptr, "All dependents should be unloading");
} }
int DependencyContext::remove_and_mark_for_deoptimization_all_dependents() { void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope) {
nmethodBucket* b = dependencies_not_unloading(); nmethodBucket* b = dependencies_not_unloading();
set_dependencies(nullptr); set_dependencies(nullptr);
int marked = 0;
while (b != nullptr) { while (b != nullptr) {
nmethod* nm = b->get_nmethod(); nmethod* nm = b->get_nmethod();
if (b->count() > 0) { if (b->count() > 0) {
// Also count already (concurrently) marked nmethods to make sure // Also count already (concurrently) marked nmethods to make sure
// deoptimization is triggered before execution in this thread continues. // deoptimization is triggered before execution in this thread continues.
nm->mark_for_deoptimization(); deopt_scope->mark(nm);
marked++;
} }
b = release_and_get_next_not_unloading(b); b = release_and_get_next_not_unloading(b);
} }
return marked;
} }
#ifndef PRODUCT #ifndef PRODUCT

View file

@ -32,6 +32,7 @@
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
class nmethod; class nmethod;
class DeoptimizationScope;
class DepChange; class DepChange;
// //
@ -117,10 +118,10 @@ class DependencyContext : public StackObj {
static void init(); static void init();
int mark_dependent_nmethods(DepChange& changes); void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes);
void add_dependent_nmethod(nmethod* nm); void add_dependent_nmethod(nmethod* nm);
void remove_all_dependents(); void remove_all_dependents();
int remove_and_mark_for_deoptimization_all_dependents(); void remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope);
void clean_unloading_dependents(); void clean_unloading_dependents();
static nmethodBucket* release_and_get_next_not_unloading(nmethodBucket* b); static nmethodBucket* release_and_get_next_not_unloading(nmethodBucket* b);
static void purge_dependency_contexts(); static void purge_dependency_contexts();

View file

@ -1160,6 +1160,8 @@ void nmethod::finalize_relocations() {
void nmethod::make_deoptimized() { void nmethod::make_deoptimized() {
if (!Continuations::enabled()) { if (!Continuations::enabled()) {
// Don't deopt this again.
set_deoptimized_done();
return; return;
} }
@ -1167,6 +1169,12 @@ void nmethod::make_deoptimized() {
CompiledICLocker ml(this); CompiledICLocker ml(this);
assert(CompiledICLocker::is_safe(this), "mt unsafe call"); assert(CompiledICLocker::is_safe(this), "mt unsafe call");
// If post call nops have been already patched, we can just bail-out.
if (has_been_deoptimized()) {
return;
}
ResourceMark rm; ResourceMark rm;
RelocIterator iter(this, oops_reloc_begin()); RelocIterator iter(this, oops_reloc_begin());
@ -1202,7 +1210,7 @@ void nmethod::make_deoptimized() {
} }
} }
// Don't deopt this again. // Don't deopt this again.
mark_deoptimized(); set_deoptimized_done();
} }
void nmethod::verify_clean_inline_caches() { void nmethod::verify_clean_inline_caches() {

View file

@ -1591,7 +1591,11 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JV
// the address field to still be pointing at the nmethod. // the address field to still be pointing at the nmethod.
} else { } else {
// Deoptimize the nmethod immediately. // Deoptimize the nmethod immediately.
Deoptimization::deoptimize_all_marked(nm); DeoptimizationScope deopt_scope;
deopt_scope.mark(nm);
nm->make_not_entrant();
nm->make_deoptimized();
deopt_scope.deoptimize_marked();
// A HotSpotNmethod instance can only reference a single nmethod // A HotSpotNmethod instance can only reference a single nmethod
// during its lifetime so simply clear it here. // during its lifetime so simply clear it here.

View file

@ -74,6 +74,7 @@
#include "prims/jvmtiThreadState.hpp" #include "prims/jvmtiThreadState.hpp"
#include "prims/methodComparator.hpp" #include "prims/methodComparator.hpp"
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/atomic.hpp" #include "runtime/atomic.hpp"
#include "runtime/fieldDescriptor.inline.hpp" #include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
@ -1178,15 +1179,20 @@ void InstanceKlass::initialize_impl(TRAPS) {
void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaThread* current) { void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaThread* current) {
MonitorLocker ml(current, _init_monitor); MonitorLocker ml(current, _init_monitor);
// Now flush all code that assume the class is not linked.
// Set state under the Compile_lock also.
if (state == linked && UseVtableBasedCHA && Universe::is_fully_initialized()) { if (state == linked && UseVtableBasedCHA && Universe::is_fully_initialized()) {
DeoptimizationScope deopt_scope;
{
// Now mark all code that assumes the class is not linked.
// Set state under the Compile_lock also.
MutexLocker ml(current, Compile_lock); MutexLocker ml(current, Compile_lock);
set_init_thread(nullptr); // reset _init_thread before changing _init_state set_init_thread(nullptr); // reset _init_thread before changing _init_state
set_init_state(state); set_init_state(state);
CodeCache::flush_dependents_on(this); CodeCache::mark_dependents_on(&deopt_scope, this);
}
// Perform the deopt handshake outside Compile_lock.
deopt_scope.deoptimize_marked();
} else { } else {
set_init_thread(nullptr); // reset _init_thread before changing _init_state set_init_thread(nullptr); // reset _init_thread before changing _init_state
set_init_state(state); set_init_state(state);
@ -2325,8 +2331,8 @@ inline DependencyContext InstanceKlass::dependencies() {
return dep_context; return dep_context;
} }
int InstanceKlass::mark_dependent_nmethods(KlassDepChange& changes) { void InstanceKlass::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
return dependencies().mark_dependent_nmethods(changes); dependencies().mark_dependent_nmethods(deopt_scope, changes);
} }
void InstanceKlass::add_dependent_nmethod(nmethod* nm) { void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
@ -3272,7 +3278,7 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
return found; return found;
} }
int InstanceKlass::mark_osr_nmethods(const Method* m) { int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
nmethod* osr = osr_nmethods_head(); nmethod* osr = osr_nmethods_head();
@ -3280,7 +3286,7 @@ int InstanceKlass::mark_osr_nmethods(const Method* m) {
while (osr != nullptr) { while (osr != nullptr) {
assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
if (osr->method() == m) { if (osr->method() == m) {
osr->mark_for_deoptimization(); deopt_scope->mark(osr);
found++; found++;
} }
osr = osr->osr_link(); osr = osr->osr_link();

View file

@ -39,6 +39,7 @@
#include "jfr/support/jfrKlassExtension.hpp" #include "jfr/support/jfrKlassExtension.hpp"
#endif #endif
class DeoptimizationScope;
class klassItable; class klassItable;
class RecordComponent; class RecordComponent;
@ -861,7 +862,7 @@ public:
// maintenance of deoptimization dependencies // maintenance of deoptimization dependencies
inline DependencyContext dependencies(); inline DependencyContext dependencies();
int mark_dependent_nmethods(KlassDepChange& changes); void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
void add_dependent_nmethod(nmethod* nm); void add_dependent_nmethod(nmethod* nm);
void clean_dependency_context(); void clean_dependency_context();
@ -870,7 +871,7 @@ public:
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; }; void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
void add_osr_nmethod(nmethod* n); void add_osr_nmethod(nmethod* n);
bool remove_osr_nmethod(nmethod* n); bool remove_osr_nmethod(nmethod* n);
int mark_osr_nmethods(const Method* m); int mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m);
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const; nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
#if INCLUDE_JVMTI #if INCLUDE_JVMTI

View file

@ -2008,7 +2008,7 @@ void BreakpointInfo::set(Method* method) {
// Deoptimize all dependents on this method // Deoptimize all dependents on this method
HandleMark hm(thread); HandleMark hm(thread);
methodHandle mh(thread, method); methodHandle mh(thread, method);
CodeCache::flush_dependents_on_method(mh); CodeCache::mark_dependents_on_method_for_breakpoint(mh);
} }
} }

View file

@ -905,10 +905,6 @@ public:
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != nullptr; return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != nullptr;
} }
int mark_osr_nmethods() {
return method_holder()->mark_osr_nmethods(this);
}
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
} }

View file

@ -4098,22 +4098,18 @@ void VM_RedefineClasses::transfer_old_native_function_registrations(InstanceKlas
void VM_RedefineClasses::flush_dependent_code() { void VM_RedefineClasses::flush_dependent_code() {
assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
bool deopt_needed; DeoptimizationScope deopt_scope;
// This is the first redefinition, mark all the nmethods for deoptimization // This is the first redefinition, mark all the nmethods for deoptimization
if (!JvmtiExport::all_dependencies_are_recorded()) { if (!JvmtiExport::all_dependencies_are_recorded()) {
CodeCache::mark_all_nmethods_for_evol_deoptimization(&deopt_scope);
log_debug(redefine, class, nmethod)("Marked all nmethods for deopt"); log_debug(redefine, class, nmethod)("Marked all nmethods for deopt");
CodeCache::mark_all_nmethods_for_evol_deoptimization();
deopt_needed = true;
} else { } else {
int deopt = CodeCache::mark_dependents_for_evol_deoptimization(); CodeCache::mark_dependents_for_evol_deoptimization(&deopt_scope);
log_debug(redefine, class, nmethod)("Marked %d dependent nmethods for deopt", deopt); log_debug(redefine, class, nmethod)("Marked dependent nmethods for deopt");
deopt_needed = (deopt != 0);
} }
if (deopt_needed) { deopt_scope.deoptimize_marked();
CodeCache::flush_evol_dependents();
}
// From now on we know that the dependency information is complete // From now on we know that the dependency information is complete
JvmtiExport::set_all_dependencies_are_recorded(true); JvmtiExport::set_all_dependencies_are_recorded(true);

View file

@ -950,22 +950,17 @@ void MethodHandles::clean_dependency_context(oop call_site) {
deps.clean_unloading_dependents(); deps.clean_unloading_dependents();
} }
void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) { void MethodHandles::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, Handle call_site, Handle target) {
assert_lock_strong(Compile_lock); assert_lock_strong(Compile_lock);
int marked = 0;
CallSiteDepChange changes(call_site, target); CallSiteDepChange changes(call_site, target);
{ {
NoSafepointVerifier nsv; NoSafepointVerifier nsv;
MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site()); oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site());
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context); DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
marked = deps.mark_dependent_nmethods(changes); deps.mark_dependent_nmethods(deopt_scope, changes);
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization.
Deoptimization::deoptimize_all_marked();
} }
} }
@ -1218,11 +1213,15 @@ JVM_END
JVM_ENTRY(void, MHN_setCallSiteTargetNormal(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) { JVM_ENTRY(void, MHN_setCallSiteTargetNormal(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) {
Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh)); Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh));
Handle target (THREAD, JNIHandles::resolve_non_null(target_jh)); Handle target (THREAD, JNIHandles::resolve_non_null(target_jh));
DeoptimizationScope deopt_scope;
{ {
// Walk all nmethods depending on this call site. // Walk all nmethods depending on this call site.
MutexLocker mu(thread, Compile_lock); MutexLocker mu(thread, Compile_lock);
MethodHandles::flush_dependent_nmethods(call_site, target); MethodHandles::mark_dependent_nmethods(&deopt_scope, call_site, target);
java_lang_invoke_CallSite::set_target(call_site(), target()); java_lang_invoke_CallSite::set_target(call_site(), target());
// This is assumed to be an 'atomic' operation by verification.
// So keep it under lock for now.
deopt_scope.deoptimize_marked();
} }
} }
JVM_END JVM_END
@ -1230,11 +1229,15 @@ JVM_END
JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) { JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) {
Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh)); Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh));
Handle target (THREAD, JNIHandles::resolve_non_null(target_jh)); Handle target (THREAD, JNIHandles::resolve_non_null(target_jh));
DeoptimizationScope deopt_scope;
{ {
// Walk all nmethods depending on this call site. // Walk all nmethods depending on this call site.
MutexLocker mu(thread, Compile_lock); MutexLocker mu(thread, Compile_lock);
MethodHandles::flush_dependent_nmethods(call_site, target); MethodHandles::mark_dependent_nmethods(&deopt_scope, call_site, target);
java_lang_invoke_CallSite::set_target_volatile(call_site(), target()); java_lang_invoke_CallSite::set_target_volatile(call_site(), target());
// This is assumed to be an 'atomic' operation by verification.
// So keep it under lock for now.
deopt_scope.deoptimize_marked();
} }
} }
JVM_END JVM_END
@ -1324,21 +1327,15 @@ JVM_END
// deallocate their dependency information. // deallocate their dependency information.
JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) { JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) {
Handle context(THREAD, JNIHandles::resolve_non_null(context_jh)); Handle context(THREAD, JNIHandles::resolve_non_null(context_jh));
{ DeoptimizationScope deopt_scope;
// Walk all nmethods depending on this call site.
MutexLocker mu1(thread, Compile_lock);
int marked = 0;
{ {
NoSafepointVerifier nsv; NoSafepointVerifier nsv;
MutexLocker mu2(THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexLocker ml(THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag);
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context()); DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
marked = deps.remove_and_mark_for_deoptimization_all_dependents(); deps.remove_and_mark_for_deoptimization_all_dependents(&deopt_scope);
} // This is assumed to be an 'atomic' operation by verification.
if (marked > 0) { // So keep it under lock for now.
// At least one nmethod has been marked for deoptimization deopt_scope.deoptimize_marked();
Deoptimization::deoptimize_all_marked();
}
} }
} }
JVM_END JVM_END

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,7 @@ class MethodHandles: AllStatic {
static void add_dependent_nmethod(oop call_site, nmethod* nm); static void add_dependent_nmethod(oop call_site, nmethod* nm);
static void clean_dependency_context(oop call_site); static void clean_dependency_context(oop call_site);
static void flush_dependent_nmethods(Handle call_site, Handle target); static void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, Handle call_site, Handle target);
// Generate MethodHandles adapters. // Generate MethodHandles adapters.
static void generate_adapters(); static void generate_adapters();

View file

@ -778,26 +778,34 @@ WB_ENTRY(jboolean, WB_IsFrameDeoptimized(JNIEnv* env, jobject o, jint depth))
WB_END WB_END
WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o)) WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
CodeCache::mark_all_nmethods_for_deoptimization(); DeoptimizationScope deopt_scope;
Deoptimization::deoptimize_all_marked(); CodeCache::mark_all_nmethods_for_deoptimization(&deopt_scope);
deopt_scope.deoptimize_marked();
WB_END WB_END
WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr)) WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
jmethodID jmid = reflected_method_to_jmid(thread, env, method); jmethodID jmid = reflected_method_to_jmid(thread, env, method);
int result = 0; int result = 0;
CHECK_JNI_EXCEPTION_(env, result); CHECK_JNI_EXCEPTION_(env, result);
DeoptimizationScope deopt_scope;
{
MutexLocker mu(Compile_lock); MutexLocker mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
if (is_osr) { if (is_osr) {
result += mh->mark_osr_nmethods(); result += mh->method_holder()->mark_osr_nmethods(&deopt_scope, mh());
} else if (mh->code() != nullptr) { } else {
mh->code()->mark_for_deoptimization(); MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
if (mh->code() != nullptr) {
deopt_scope.mark(mh->code());
++result; ++result;
} }
result += CodeCache::mark_for_deoptimization(mh());
if (result > 0) {
Deoptimization::deoptimize_all_marked();
} }
CodeCache::mark_for_deoptimization(&deopt_scope, mh());
}
deopt_scope.deoptimize_marked();
return result; return result;
WB_END WB_END

View file

@ -98,6 +98,121 @@
#include "jfr/metadata/jfrSerializer.hpp" #include "jfr/metadata/jfrSerializer.hpp"
#endif #endif
uint64_t DeoptimizationScope::_committed_deopt_gen = 0;
uint64_t DeoptimizationScope::_active_deopt_gen = 1;
bool DeoptimizationScope::_committing_in_progress = false;
DeoptimizationScope::DeoptimizationScope() : _required_gen(0) {
DEBUG_ONLY(_deopted = false;)
MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
// If there is nothing to deopt _required_gen is the same as comitted.
_required_gen = DeoptimizationScope::_committed_deopt_gen;
}
DeoptimizationScope::~DeoptimizationScope() {
assert(_deopted, "Deopt not executed");
}
void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
// If it's already marked but we still need it to be deopted.
if (cm->is_marked_for_deoptimization()) {
dependent(cm);
return;
}
CompiledMethod::DeoptimizationStatus status =
inc_recompile_counts ? CompiledMethod::deoptimize : CompiledMethod::deoptimize_noupdate;
Atomic::store(&cm->_deoptimization_status, status);
// Make sure active is not committed
assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be");
assert(cm->_deoptimization_generation == 0, "Is already marked");
cm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen;
_required_gen = DeoptimizationScope::_active_deopt_gen;
}
void DeoptimizationScope::dependent(CompiledMethod* cm) {
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
// A method marked by someone else may have a _required_gen lower than what we marked with.
// Therefore only store it if it's higher than _required_gen.
if (_required_gen < cm->_deoptimization_generation) {
_required_gen = cm->_deoptimization_generation;
}
}
void DeoptimizationScope::deoptimize_marked() {
assert(!_deopted, "Already deopted");
// We are not alive yet.
if (!Universe::is_fully_initialized()) {
DEBUG_ONLY(_deopted = true;)
return;
}
// Safepoints are a special case, handled here.
if (SafepointSynchronize::is_at_safepoint()) {
DeoptimizationScope::_committed_deopt_gen = DeoptimizationScope::_active_deopt_gen;
DeoptimizationScope::_active_deopt_gen++;
Deoptimization::deoptimize_all_marked();
DEBUG_ONLY(_deopted = true;)
return;
}
uint64_t comitting = 0;
bool wait = false;
while (true) {
{
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
// First we check if we or someone else already deopted the gen we want.
if (DeoptimizationScope::_committed_deopt_gen >= _required_gen) {
DEBUG_ONLY(_deopted = true;)
return;
}
if (!_committing_in_progress) {
// The version we are about to commit.
comitting = DeoptimizationScope::_active_deopt_gen;
// Make sure new marks use a higher gen.
DeoptimizationScope::_active_deopt_gen++;
_committing_in_progress = true;
wait = false;
} else {
// Another thread is handshaking and committing a gen.
wait = true;
}
}
if (wait) {
// Wait and let the concurrent handshake be performed.
ThreadBlockInVM tbivm(JavaThread::current());
os::naked_yield();
} else {
// Performs the handshake.
Deoptimization::deoptimize_all_marked(); // May safepoint and an additional deopt may have occurred.
DEBUG_ONLY(_deopted = true;)
{
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
Mutex::_no_safepoint_check_flag);
// Make sure that committed doesn't go backwards.
// Should only happen if we did a deopt during a safepoint above.
if (DeoptimizationScope::_committed_deopt_gen < comitting) {
DeoptimizationScope::_committed_deopt_gen = comitting;
}
_committing_in_progress = false;
assert(DeoptimizationScope::_committed_deopt_gen >= _required_gen, "Must be");
return;
}
}
}
}
Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame,
int caller_adjustment, int caller_adjustment,
int caller_actual_parameters, int caller_actual_parameters,
@ -915,17 +1030,11 @@ class DeoptimizeMarkedClosure : public HandshakeClosure {
} }
}; };
void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) { void Deoptimization::deoptimize_all_marked() {
ResourceMark rm; ResourceMark rm;
// Make the dependent methods not entrant // Make the dependent methods not entrant
if (nmethod_only != nullptr) {
nmethod_only->mark_for_deoptimization();
nmethod_only->make_not_entrant();
CodeCache::make_nmethod_deoptimized(nmethod_only);
} else {
CodeCache::make_marked_nmethods_deoptimized(); CodeCache::make_marked_nmethods_deoptimized();
}
DeoptimizeMarkedClosure deopt; DeoptimizeMarkedClosure deopt;
if (SafepointSynchronize::is_at_safepoint()) { if (SafepointSynchronize::is_at_safepoint()) {

View file

@ -40,6 +40,32 @@ class compiledVFrame;
template<class E> class GrowableArray; template<class E> class GrowableArray;
class DeoptimizationScope {
private:
// What gen we have done the deopt handshake for.
static uint64_t _committed_deopt_gen;
// What gen to mark a method with, hence larger than _committed_deopt_gen.
static uint64_t _active_deopt_gen;
// Indicate an in-progress deopt handshake.
static bool _committing_in_progress;
// The required gen we need to execute/wait for
uint64_t _required_gen;
DEBUG_ONLY(bool _deopted;)
public:
DeoptimizationScope();
~DeoptimizationScope();
// Mark a method, if already marked as dependent.
void mark(CompiledMethod* cm, bool inc_recompile_counts = true);
// Record this as a dependent method.
void dependent(CompiledMethod* cm);
// Execute the deoptimization.
// Make the nmethods not entrant, stackwalks and patch return pcs and sets post call nops.
void deoptimize_marked();
};
class Deoptimization : AllStatic { class Deoptimization : AllStatic {
friend class VMStructs; friend class VMStructs;
friend class EscapeBarrier; friend class EscapeBarrier;
@ -149,10 +175,9 @@ class Deoptimization : AllStatic {
#endif #endif
// Make all nmethods that are marked_for_deoptimization not_entrant and deoptimize any live // Make all nmethods that are marked_for_deoptimization not_entrant and deoptimize any live
// activations using those nmethods. If an nmethod is passed as an argument then it is // activations using those nmethods. Scan of the code cache is done to
// marked_for_deoptimization and made not_entrant. Otherwise a scan of the code cache is done to
// find all marked nmethods and they are made not_entrant. // find all marked nmethods and they are made not_entrant.
static void deoptimize_all_marked(nmethod* nmethod_only = nullptr); static void deoptimize_all_marked();
public: public:
// Deoptimizes a frame lazily. Deopt happens on return to the frame. // Deoptimizes a frame lazily. Deopt happens on return to the frame.