From c183fce9543ca15f5db632babecdb7797d0745e4 Mon Sep 17 00:00:00 2001 From: Robbin Ehn Date: Mon, 13 Mar 2023 09:34:00 +0000 Subject: [PATCH] 8300926: Several startup regressions ~6-70% in 21-b6 all platforms Reviewed-by: eosterlund, dcubed, coleenp --- src/hotspot/share/cds/lambdaFormInvokers.cpp | 6 +- .../share/classfile/systemDictionary.cpp | 66 +++++---- .../share/classfile/systemDictionary.hpp | 4 +- .../classfile/systemDictionaryShared.cpp | 10 +- src/hotspot/share/classfile/vmClasses.cpp | 2 +- src/hotspot/share/code/codeCache.cpp | 82 +++--------- src/hotspot/share/code/codeCache.hpp | 21 ++- src/hotspot/share/code/compiledMethod.cpp | 16 +-- src/hotspot/share/code/compiledMethod.hpp | 25 ++-- src/hotspot/share/code/dependencies.hpp | 10 -- src/hotspot/share/code/dependencyContext.cpp | 21 +-- src/hotspot/share/code/dependencyContext.hpp | 5 +- src/hotspot/share/code/nmethod.cpp | 10 +- src/hotspot/share/jvmci/jvmciEnv.cpp | 6 +- src/hotspot/share/oops/instanceKlass.cpp | 26 ++-- src/hotspot/share/oops/instanceKlass.hpp | 5 +- src/hotspot/share/oops/method.cpp | 2 +- src/hotspot/share/oops/method.hpp | 4 - .../share/prims/jvmtiRedefineClasses.cpp | 14 +- src/hotspot/share/prims/methodHandles.cpp | 45 +++---- src/hotspot/share/prims/methodHandles.hpp | 4 +- src/hotspot/share/prims/whitebox.cpp | 34 +++-- src/hotspot/share/runtime/deoptimization.cpp | 125 ++++++++++++++++-- src/hotspot/share/runtime/deoptimization.hpp | 31 ++++- 24 files changed, 340 insertions(+), 234 deletions(-) diff --git a/src/hotspot/share/cds/lambdaFormInvokers.cpp b/src/hotspot/share/cds/lambdaFormInvokers.cpp index e841fbb891e..fc1e5a73ec7 100644 --- a/src/hotspot/share/cds/lambdaFormInvokers.cpp +++ b/src/hotspot/share/cds/lambdaFormInvokers.cpp @@ -209,10 +209,8 @@ void LambdaFormInvokers::regenerate_class(char* class_name, ClassFileStream& st, assert(result->java_mirror() != nullptr, "must be"); add_regenerated_class(result->java_mirror()); - { - MutexLocker mu_r(THREAD, Compile_lock); // add_to_hierarchy asserts this. - SystemDictionary::add_to_hierarchy(result); - } + SystemDictionary::add_to_hierarchy(THREAD, result); + // new class not linked yet. MetaspaceShared::try_link_class(THREAD, result); assert(!HAS_PENDING_EXCEPTION, "Invariant"); diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp index d982ad6f457..7c06d5f78cb 100644 --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -70,6 +70,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" +#include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" @@ -899,12 +900,9 @@ InstanceKlass* SystemDictionary::resolve_hidden_class_from_stream( k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror())); } - { - MutexLocker mu_r(THREAD, Compile_lock); - // Add to class hierarchy, and do possible deoptimizations. - add_to_hierarchy(k); - // But, do not add to dictionary. - } + // Add to class hierarchy, and do possible deoptimizations. + add_to_hierarchy(THREAD, k); + // But, do not add to dictionary. k->link_class(CHECK_NULL); @@ -1489,13 +1487,11 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load JavaCalls::call(&result, m, &args, CHECK); } - // Add the new class. We need recompile lock during update of CHA. + // Add to class hierarchy, and do possible deoptimizations. + add_to_hierarchy(THREAD, k); + { MutexLocker mu_r(THREAD, Compile_lock); - - // Add to class hierarchy, and do possible deoptimizations. - add_to_hierarchy(k); - // Add to systemDictionary - so other classes can see it. // Grabs and releases SystemDictionary_lock update_dictionary(THREAD, k, loader_data); @@ -1612,28 +1608,44 @@ InstanceKlass* SystemDictionary::find_or_define_instance_class(Symbol* class_nam // ---------------------------------------------------------------------------- // Update hierarchy. This is done before the new klass has been added to the SystemDictionary. The Compile_lock -// is held, to ensure that the compiler is not using the class hierarchy, and that deoptimization will kick in -// before a new class is used. +// is grabbed, to ensure that the compiler is not using the class hierarchy. -void SystemDictionary::add_to_hierarchy(InstanceKlass* k) { +void SystemDictionary::add_to_hierarchy(JavaThread* current, InstanceKlass* k) { assert(k != nullptr, "just checking"); - if (Universe::is_fully_initialized()) { - assert_locked_or_safepoint(Compile_lock); + assert(!SafepointSynchronize::is_at_safepoint(), "must NOT be at safepoint"); + + // In case we are not using CHA based vtables we need to make sure the loaded + // deopt is completed before anyone links this class. + // Linking is done with _init_monitor held, by loading and deopting with it + // held we make sure the deopt is completed before linking. + if (!UseVtableBasedCHA) { + k->init_monitor()->lock(); } - k->set_init_state(InstanceKlass::loaded); - // make sure init_state store is already done. - // The compiler reads the hierarchy outside of the Compile_lock. - // Access ordering is used to add to hierarchy. + DeoptimizationScope deopt_scope; + { + MutexLocker ml(current, Compile_lock); - // Link into hierarchy. - k->append_to_sibling_list(); // add to superklass/sibling list - k->process_interfaces(); // handle all "implements" declarations + k->set_init_state(InstanceKlass::loaded); + // make sure init_state store is already done. + // The compiler reads the hierarchy outside of the Compile_lock. + // Access ordering is used to add to hierarchy. - // Now flush all code that depended on old class hierarchy. - // Note: must be done *after* linking k into the hierarchy (was bug 12/9/97) - if (Universe::is_fully_initialized()) { - CodeCache::flush_dependents_on(k); + // Link into hierarchy. + k->append_to_sibling_list(); // add to superklass/sibling list + k->process_interfaces(); // handle all "implements" declarations + + // Now mark all code that depended on old class hierarchy. + // Note: must be done *after* linking k into the hierarchy (was bug 12/9/97) + if (Universe::is_fully_initialized()) { + CodeCache::mark_dependents_on(&deopt_scope, k); + } + } + // Perform the deopt handshake outside Compile_lock. + deopt_scope.deoptimize_marked(); + + if (!UseVtableBasedCHA) { + k->init_monitor()->unlock(); } } diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp index 2807d74b88b..fc90979772e 100644 --- a/src/hotspot/share/classfile/systemDictionary.hpp +++ b/src/hotspot/share/classfile/systemDictionary.hpp @@ -366,8 +366,8 @@ public: // Return Symbol or throw exception if name given is can not be a valid Symbol. static Symbol* class_name_symbol(const char* name, Symbol* exception, TRAPS); - // Setup link to hierarchy - static void add_to_hierarchy(InstanceKlass* k); + // Setup link to hierarchy and deoptimize + static void add_to_hierarchy(JavaThread* current, InstanceKlass* k); protected: // Basic find on loaded classes diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 71bdfab62cc..fde858ab6a7 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -850,13 +850,11 @@ InstanceKlass* SystemDictionaryShared::prepare_shared_lambda_proxy_class(Instanc assert(nest_host == shared_nest_host, "mismatched nest host"); EventClassLoad class_load_start_event; - { - MutexLocker mu_r(THREAD, Compile_lock); - // Add to class hierarchy, and do possible deoptimizations. - SystemDictionary::add_to_hierarchy(loaded_lambda); - // But, do not add to dictionary. - } + // Add to class hierarchy, and do possible deoptimizations. + SystemDictionary::add_to_hierarchy(THREAD, loaded_lambda); + // But, do not add to dictionary. + loaded_lambda->link_class(CHECK_NULL); // notify jvmti if (JvmtiExport::should_post_class_load()) { diff --git a/src/hotspot/share/classfile/vmClasses.cpp b/src/hotspot/share/classfile/vmClasses.cpp index df6830d464a..67ba23cc55c 100644 --- a/src/hotspot/share/classfile/vmClasses.cpp +++ b/src/hotspot/share/classfile/vmClasses.cpp @@ -249,7 +249,7 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load SystemDictionary::load_shared_class_misc(klass, loader_data); Dictionary* dictionary = loader_data->dictionary(); dictionary->add_klass(THREAD, klass->name(), klass); - SystemDictionary::add_to_hierarchy(klass); + SystemDictionary::add_to_hierarchy(THREAD, klass); assert(klass->is_loaded(), "Must be in at least loaded state"); } diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index ac6ca49f065..4adeab249e9 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -1243,9 +1243,8 @@ void CodeCache::cleanup_inline_caches_whitebox() { // Keeps track of time spent for checking dependencies NOT_PRODUCT(static elapsedTimer dependentCheckTime;) -int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { +void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - int number_of_marked_CodeBlobs = 0; // search the hierarchy looking for nmethods which are affected by the loading of this class @@ -1257,7 +1256,7 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { NoSafepointVerifier nsv; for (DepChange::ContextStream str(changes, nsv); str.next(); ) { Klass* d = str.klass(); - number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); + InstanceKlass::cast(d)->mark_dependent_nmethods(deopt_scope, changes); } #ifndef PRODUCT @@ -1269,8 +1268,6 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { dependentCheckTime.stop(); } #endif - - return number_of_marked_CodeBlobs; } CompiledMethod* CodeCache::find_compiled(void* start) { @@ -1325,13 +1322,12 @@ void CodeCache::old_nmethods_do(MetadataClosure* f) { } // Walk compiled methods and mark dependent methods for deoptimization. -int CodeCache::mark_dependents_for_evol_deoptimization() { +void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); // Each redefinition creates a new set of nmethods that have references to "old" Methods // So delete old method table and create a new one. reset_old_method_table(); - int number_of_marked_CodeBlobs = 0; CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); while(iter.next()) { CompiledMethod* nm = iter.method(); @@ -1339,25 +1335,20 @@ int CodeCache::mark_dependents_for_evol_deoptimization() { // This includes methods whose inline caches point to old methods, so // inline cache clearing is unnecessary. if (nm->has_evol_metadata()) { - nm->mark_for_deoptimization(); + deopt_scope->mark(nm); add_to_old_table(nm); - number_of_marked_CodeBlobs++; } } - - // return total count of nmethods marked for deoptimization, if zero the caller - // can skip deoptimization - return number_of_marked_CodeBlobs; } -void CodeCache::mark_all_nmethods_for_evol_deoptimization() { +void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); CompiledMethodIterator iter(CompiledMethodIterator::all_blobs); while(iter.next()) { CompiledMethod* nm = iter.method(); if (!nm->method()->is_method_handle_intrinsic()) { if (nm->can_be_deoptimized()) { - nm->mark_for_deoptimization(); + deopt_scope->mark(nm); } if (nm->has_evol_metadata()) { add_to_old_table(nm); @@ -1366,48 +1357,30 @@ void CodeCache::mark_all_nmethods_for_evol_deoptimization() { } } -// Flushes compiled methods dependent on redefined classes, that have already been -// marked for deoptimization. -void CodeCache::flush_evol_dependents() { - assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); - - // CodeCache can only be updated by a thread_in_VM and they will all be - // stopped during the safepoint so CodeCache will be safe to update without - // holding the CodeCache_lock. - - // At least one nmethod has been marked for deoptimization - - Deoptimization::deoptimize_all_marked(); -} #endif // INCLUDE_JVMTI // Mark methods for deopt (if safe or possible). -void CodeCache::mark_all_nmethods_for_deoptimization() { +void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); if (!nm->is_native_method()) { - nm->mark_for_deoptimization(); + deopt_scope->mark(nm); } } } -int CodeCache::mark_for_deoptimization(Method* dependee) { +void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - int number_of_marked_CodeBlobs = 0; CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); if (nm->is_dependent_on_method(dependee)) { - ResourceMark rm; - nm->mark_for_deoptimization(); - number_of_marked_CodeBlobs++; + deopt_scope->mark(nm); } } - - return number_of_marked_CodeBlobs; } void CodeCache::make_marked_nmethods_deoptimized() { @@ -1416,51 +1389,38 @@ void CodeCache::make_marked_nmethods_deoptimized() { CompiledMethod* nm = iter.method(); if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { nm->make_not_entrant(); - make_nmethod_deoptimized(nm); + nm->make_deoptimized(); } } } -void CodeCache::make_nmethod_deoptimized(CompiledMethod* nm) { - if (nm->is_marked_for_deoptimization() && nm->can_be_deoptimized()) { - nm->make_deoptimized(); - } -} - -// Flushes compiled methods dependent on dependee. -void CodeCache::flush_dependents_on(InstanceKlass* dependee) { +// Marks compiled methods dependent on dependee. +void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) { assert_lock_strong(Compile_lock); if (!has_nmethods_with_dependencies()) { return; } - int marked = 0; if (dependee->is_linked()) { // Class initialization state change. KlassInitDepChange changes(dependee); - marked = mark_for_deoptimization(changes); + mark_for_deoptimization(deopt_scope, changes); } else { // New class is loaded. NewKlassDepChange changes(dependee); - marked = mark_for_deoptimization(changes); - } - - if (marked > 0) { - // At least one nmethod has been marked for deoptimization - Deoptimization::deoptimize_all_marked(); + mark_for_deoptimization(deopt_scope, changes); } } -// Flushes compiled methods dependent on dependee -void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { - // --- Compile_lock is not held. However we are at a safepoint. - assert_locked_or_safepoint(Compile_lock); +// Marks compiled methods dependent on dependee +void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + DeoptimizationScope deopt_scope; // Compute the dependent nmethods - if (mark_for_deoptimization(m_h()) > 0) { - Deoptimization::deoptimize_all_marked(); - } + mark_for_deoptimization(&deopt_scope, m_h()); + deopt_scope.deoptimize_marked(); } void CodeCache::verify() { diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp index 58c69b73b09..6fb3a6bd981 100644 --- a/src/hotspot/share/code/codeCache.hpp +++ b/src/hotspot/share/code/codeCache.hpp @@ -78,6 +78,7 @@ class KlassDepChange; class OopClosure; class ShenandoahParallelCodeHeapIterator; class NativePostCallNop; +class DeoptimizationScope; class CodeCache : AllStatic { friend class VMStructs; @@ -301,27 +302,25 @@ class CodeCache : AllStatic { // Deoptimization private: - static int mark_for_deoptimization(KlassDepChange& changes); + static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes); public: - static void mark_all_nmethods_for_deoptimization(); - static int mark_for_deoptimization(Method* dependee); + static void mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope); + static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee); static void make_marked_nmethods_deoptimized(); - static void make_nmethod_deoptimized(CompiledMethod* nm); - // Flushing and deoptimization - static void flush_dependents_on(InstanceKlass* dependee); + // Marks dependents during classloading + static void mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee); // RedefineClasses support - // Flushing and deoptimization in case of evolution - static int mark_dependents_for_evol_deoptimization(); - static void mark_all_nmethods_for_evol_deoptimization(); - static void flush_evol_dependents(); + // Marks in case of evolution + static void mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope); + static void mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope); static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN; static void unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN; // Support for fullspeed debugging - static void flush_dependents_on_method(const methodHandle& dependee); + static void mark_dependents_on_method_for_breakpoint(const methodHandle& dependee); // tells if there are nmethods with dependencies static bool has_nmethods_with_dependencies(); diff --git a/src/hotspot/share/code/compiledMethod.cpp b/src/hotspot/share/code/compiledMethod.cpp index 442de79c13a..4da319644de 100644 --- a/src/hotspot/share/code/compiledMethod.cpp +++ b/src/hotspot/share/code/compiledMethod.cpp @@ -54,7 +54,8 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), - _mark_for_deoptimization_status(not_marked), + _deoptimization_status(not_marked), + _deoptimization_generation(0), _method(method), _gc_data(nullptr) { @@ -66,7 +67,8 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled) : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled), - _mark_for_deoptimization_status(not_marked), + _deoptimization_status(not_marked), + _deoptimization_generation(0), _method(method), _gc_data(nullptr) { @@ -113,12 +115,10 @@ const char* CompiledMethod::state() const { } //----------------------------------------------------------------------------- -void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) { - // assert(can_be_deoptimized(), ""); // in some places we check before marking, in others not. - MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, - Mutex::_no_safepoint_check_flag); - if (_mark_for_deoptimization_status != deoptimize_done) { // can't go backwards - _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); +void CompiledMethod::set_deoptimized_done() { + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + if (_deoptimization_status != deoptimize_done) { // can't go backwards + Atomic::store(&_deoptimization_status, deoptimize_done); } } diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp index 8a4f08e3b12..912ca1b3f88 100644 --- a/src/hotspot/share/code/compiledMethod.hpp +++ b/src/hotspot/share/code/compiledMethod.hpp @@ -140,17 +140,19 @@ public: class CompiledMethod : public CodeBlob { friend class VMStructs; - + friend class DeoptimizationScope; void init_defaults(); protected: - enum MarkForDeoptimizationStatus : u1 { + enum DeoptimizationStatus : u1 { not_marked, deoptimize, deoptimize_noupdate, deoptimize_done }; - MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization + volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization + // Used to track in which deoptimize handshake this method will be deoptimized. + uint64_t _deoptimization_generation; // set during construction unsigned int _has_unsafe_access:1; // May fault due to unsafe access. @@ -174,6 +176,11 @@ protected: virtual void flush() = 0; +private: + DeoptimizationStatus deoptimization_status() const { + return Atomic::load(&_deoptimization_status); + } + protected: CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled); @@ -236,11 +243,9 @@ public: bool is_at_poll_return(address pc); bool is_at_poll_or_poll_return(address pc); - bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } - void mark_for_deoptimization(bool inc_recompile_counts = true); - - bool has_been_deoptimized() const { return _mark_for_deoptimization_status == deoptimize_done; } - void mark_deoptimized() { _mark_for_deoptimization_status = deoptimize_done; } + bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; } + bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; } + void set_deoptimized_done(); virtual void make_deoptimized() { assert(false, "not supported"); }; @@ -248,8 +253,8 @@ public: // Update recompile counts when either the update is explicitly requested (deoptimize) // or the nmethod is not marked for deoptimization at all (not_marked). // The latter happens during uncommon traps when deoptimized nmethod is made not entrant. - return _mark_for_deoptimization_status != deoptimize_noupdate && - _mark_for_deoptimization_status != deoptimize_done; + DeoptimizationStatus status = deoptimization_status(); + return status != deoptimize_noupdate && status != deoptimize_done; } // tells whether frames described by this nmethod can be deoptimized diff --git a/src/hotspot/share/code/dependencies.hpp b/src/hotspot/share/code/dependencies.hpp index c74ac522f81..bccc707dde8 100644 --- a/src/hotspot/share/code/dependencies.hpp +++ b/src/hotspot/share/code/dependencies.hpp @@ -682,8 +682,6 @@ class DepChange : public StackObj { virtual bool is_klass_init_change() const { return false; } virtual bool is_call_site_change() const { return false; } - virtual void mark_for_deoptimization(nmethod* nm) = 0; - // Subclass casting with assertions. KlassDepChange* as_klass_change() { assert(is_klass_change(), "bad cast"); @@ -780,10 +778,6 @@ class KlassDepChange : public DepChange { // What kind of DepChange is this? virtual bool is_klass_change() const { return true; } - virtual void mark_for_deoptimization(nmethod* nm) { - nm->mark_for_deoptimization(/*inc_recompile_counts=*/true); - } - InstanceKlass* type() { return _type; } // involves_context(k) is true if k == _type or any of its super types @@ -822,10 +816,6 @@ class CallSiteDepChange : public DepChange { // What kind of DepChange is this? virtual bool is_call_site_change() const { return true; } - virtual void mark_for_deoptimization(nmethod* nm) { - nm->mark_for_deoptimization(/*inc_recompile_counts=*/false); - } - oop call_site() const { return _call_site(); } oop method_handle() const { return _method_handle(); } }; diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp index 87dec8b6d77..d3ef2d3db2a 100644 --- a/src/hotspot/share/code/dependencyContext.cpp +++ b/src/hotspot/share/code/dependencyContext.cpp @@ -28,6 +28,7 @@ #include "code/dependencyContext.hpp" #include "memory/resourceArea.hpp" #include "runtime/atomic.hpp" +#include "runtime/deoptimization.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" #include "runtime/perfData.hpp" @@ -62,17 +63,14 @@ void DependencyContext::init() { // // Walk the list of dependent nmethods searching for nmethods which // are dependent on the changes that were passed in and mark them for -// deoptimization. Returns the number of nmethods found. +// deoptimization. // -int DependencyContext::mark_dependent_nmethods(DepChange& changes) { - int found = 0; +void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) { for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) { nmethod* nm = b->get_nmethod(); if (b->count() > 0) { if (nm->is_marked_for_deoptimization()) { - // Also count already (concurrently) marked nmethods to make sure - // deoptimization is triggered before execution in this thread continues. - found++; + deopt_scope->dependent(nm); } else if (nm->check_dependency_on(changes)) { if (TraceDependencies) { ResourceMark rm; @@ -81,12 +79,10 @@ int DependencyContext::mark_dependent_nmethods(DepChange& changes) { nm->print(); nm->print_dependencies(); } - changes.mark_for_deoptimization(nm); - found++; + deopt_scope->mark(nm, !changes.is_call_site_change()); } } } - return found; } // @@ -189,21 +185,18 @@ void DependencyContext::remove_all_dependents() { assert(b == nullptr, "All dependents should be unloading"); } -int DependencyContext::remove_and_mark_for_deoptimization_all_dependents() { +void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope) { nmethodBucket* b = dependencies_not_unloading(); set_dependencies(nullptr); - int marked = 0; while (b != nullptr) { nmethod* nm = b->get_nmethod(); if (b->count() > 0) { // Also count already (concurrently) marked nmethods to make sure // deoptimization is triggered before execution in this thread continues. - nm->mark_for_deoptimization(); - marked++; + deopt_scope->mark(nm); } b = release_and_get_next_not_unloading(b); } - return marked; } #ifndef PRODUCT diff --git a/src/hotspot/share/code/dependencyContext.hpp b/src/hotspot/share/code/dependencyContext.hpp index e9cb7824e01..972a593f82e 100644 --- a/src/hotspot/share/code/dependencyContext.hpp +++ b/src/hotspot/share/code/dependencyContext.hpp @@ -32,6 +32,7 @@ #include "runtime/safepoint.hpp" class nmethod; +class DeoptimizationScope; class DepChange; // @@ -117,10 +118,10 @@ class DependencyContext : public StackObj { static void init(); - int mark_dependent_nmethods(DepChange& changes); + void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes); void add_dependent_nmethod(nmethod* nm); void remove_all_dependents(); - int remove_and_mark_for_deoptimization_all_dependents(); + void remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope); void clean_unloading_dependents(); static nmethodBucket* release_and_get_next_not_unloading(nmethodBucket* b); static void purge_dependency_contexts(); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index d6d50199642..bf471054f44 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -1160,6 +1160,8 @@ void nmethod::finalize_relocations() { void nmethod::make_deoptimized() { if (!Continuations::enabled()) { + // Don't deopt this again. + set_deoptimized_done(); return; } @@ -1167,6 +1169,12 @@ void nmethod::make_deoptimized() { CompiledICLocker ml(this); assert(CompiledICLocker::is_safe(this), "mt unsafe call"); + + // If post call nops have been already patched, we can just bail-out. + if (has_been_deoptimized()) { + return; + } + ResourceMark rm; RelocIterator iter(this, oops_reloc_begin()); @@ -1202,7 +1210,7 @@ void nmethod::make_deoptimized() { } } // Don't deopt this again. - mark_deoptimized(); + set_deoptimized_done(); } void nmethod::verify_clean_inline_caches() { diff --git a/src/hotspot/share/jvmci/jvmciEnv.cpp b/src/hotspot/share/jvmci/jvmciEnv.cpp index aafe00bb6fa..ff7587b8321 100644 --- a/src/hotspot/share/jvmci/jvmciEnv.cpp +++ b/src/hotspot/share/jvmci/jvmciEnv.cpp @@ -1591,7 +1591,11 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JV // the address field to still be pointing at the nmethod. } else { // Deoptimize the nmethod immediately. - Deoptimization::deoptimize_all_marked(nm); + DeoptimizationScope deopt_scope; + deopt_scope.mark(nm); + nm->make_not_entrant(); + nm->make_deoptimized(); + deopt_scope.deoptimize_marked(); // A HotSpotNmethod instance can only reference a single nmethod // during its lifetime so simply clear it here. diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 03ebf658553..f8f4e0b98de 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -74,6 +74,7 @@ #include "prims/jvmtiThreadState.hpp" #include "prims/methodComparator.hpp" #include "runtime/arguments.hpp" +#include "runtime/deoptimization.hpp" #include "runtime/atomic.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" @@ -1178,15 +1179,20 @@ void InstanceKlass::initialize_impl(TRAPS) { void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaThread* current) { MonitorLocker ml(current, _init_monitor); - // Now flush all code that assume the class is not linked. - // Set state under the Compile_lock also. if (state == linked && UseVtableBasedCHA && Universe::is_fully_initialized()) { - MutexLocker ml(current, Compile_lock); + DeoptimizationScope deopt_scope; + { + // Now mark all code that assumes the class is not linked. + // Set state under the Compile_lock also. + MutexLocker ml(current, Compile_lock); - set_init_thread(nullptr); // reset _init_thread before changing _init_state - set_init_state(state); + set_init_thread(nullptr); // reset _init_thread before changing _init_state + set_init_state(state); - CodeCache::flush_dependents_on(this); + CodeCache::mark_dependents_on(&deopt_scope, this); + } + // Perform the deopt handshake outside Compile_lock. + deopt_scope.deoptimize_marked(); } else { set_init_thread(nullptr); // reset _init_thread before changing _init_state set_init_state(state); @@ -2325,8 +2331,8 @@ inline DependencyContext InstanceKlass::dependencies() { return dep_context; } -int InstanceKlass::mark_dependent_nmethods(KlassDepChange& changes) { - return dependencies().mark_dependent_nmethods(changes); +void InstanceKlass::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes) { + dependencies().mark_dependent_nmethods(deopt_scope, changes); } void InstanceKlass::add_dependent_nmethod(nmethod* nm) { @@ -3272,7 +3278,7 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) { return found; } -int InstanceKlass::mark_osr_nmethods(const Method* m) { +int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m) { MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); nmethod* osr = osr_nmethods_head(); @@ -3280,7 +3286,7 @@ int InstanceKlass::mark_osr_nmethods(const Method* m) { while (osr != nullptr) { assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); if (osr->method() == m) { - osr->mark_for_deoptimization(); + deopt_scope->mark(osr); found++; } osr = osr->osr_link(); diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index 3264113e597..d5626663020 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -39,6 +39,7 @@ #include "jfr/support/jfrKlassExtension.hpp" #endif +class DeoptimizationScope; class klassItable; class RecordComponent; @@ -861,7 +862,7 @@ public: // maintenance of deoptimization dependencies inline DependencyContext dependencies(); - int mark_dependent_nmethods(KlassDepChange& changes); + void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes); void add_dependent_nmethod(nmethod* nm); void clean_dependency_context(); @@ -870,7 +871,7 @@ public: void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; }; void add_osr_nmethod(nmethod* n); bool remove_osr_nmethod(nmethod* n); - int mark_osr_nmethods(const Method* m); + int mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m); nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const; #if INCLUDE_JVMTI diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 90fb0c2616b..8f8e9d1793c 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -2008,7 +2008,7 @@ void BreakpointInfo::set(Method* method) { // Deoptimize all dependents on this method HandleMark hm(thread); methodHandle mh(thread, method); - CodeCache::flush_dependents_on_method(mh); + CodeCache::mark_dependents_on_method_for_breakpoint(mh); } } diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 4a154373654..5757f64eed2 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -905,10 +905,6 @@ public: return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != nullptr; } - int mark_osr_nmethods() { - return method_holder()->mark_osr_nmethods(this); - } - nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); } diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp index 0c28d5aec12..38190c42124 100644 --- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp +++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp @@ -4098,22 +4098,18 @@ void VM_RedefineClasses::transfer_old_native_function_registrations(InstanceKlas void VM_RedefineClasses::flush_dependent_code() { assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); - bool deopt_needed; + DeoptimizationScope deopt_scope; // This is the first redefinition, mark all the nmethods for deoptimization if (!JvmtiExport::all_dependencies_are_recorded()) { + CodeCache::mark_all_nmethods_for_evol_deoptimization(&deopt_scope); log_debug(redefine, class, nmethod)("Marked all nmethods for deopt"); - CodeCache::mark_all_nmethods_for_evol_deoptimization(); - deopt_needed = true; } else { - int deopt = CodeCache::mark_dependents_for_evol_deoptimization(); - log_debug(redefine, class, nmethod)("Marked %d dependent nmethods for deopt", deopt); - deopt_needed = (deopt != 0); + CodeCache::mark_dependents_for_evol_deoptimization(&deopt_scope); + log_debug(redefine, class, nmethod)("Marked dependent nmethods for deopt"); } - if (deopt_needed) { - CodeCache::flush_evol_dependents(); - } + deopt_scope.deoptimize_marked(); // From now on we know that the dependency information is complete JvmtiExport::set_all_dependencies_are_recorded(true); diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp index 8e4247e6791..610b90ed723 100644 --- a/src/hotspot/share/prims/methodHandles.cpp +++ b/src/hotspot/share/prims/methodHandles.cpp @@ -950,22 +950,17 @@ void MethodHandles::clean_dependency_context(oop call_site) { deps.clean_unloading_dependents(); } -void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) { +void MethodHandles::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, Handle call_site, Handle target) { assert_lock_strong(Compile_lock); - int marked = 0; CallSiteDepChange changes(call_site, target); { NoSafepointVerifier nsv; - MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site()); DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context); - marked = deps.mark_dependent_nmethods(changes); - } - if (marked > 0) { - // At least one nmethod has been marked for deoptimization. - Deoptimization::deoptimize_all_marked(); + deps.mark_dependent_nmethods(deopt_scope, changes); } } @@ -1218,11 +1213,15 @@ JVM_END JVM_ENTRY(void, MHN_setCallSiteTargetNormal(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) { Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh)); Handle target (THREAD, JNIHandles::resolve_non_null(target_jh)); + DeoptimizationScope deopt_scope; { // Walk all nmethods depending on this call site. MutexLocker mu(thread, Compile_lock); - MethodHandles::flush_dependent_nmethods(call_site, target); + MethodHandles::mark_dependent_nmethods(&deopt_scope, call_site, target); java_lang_invoke_CallSite::set_target(call_site(), target()); + // This is assumed to be an 'atomic' operation by verification. + // So keep it under lock for now. + deopt_scope.deoptimize_marked(); } } JVM_END @@ -1230,11 +1229,15 @@ JVM_END JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) { Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh)); Handle target (THREAD, JNIHandles::resolve_non_null(target_jh)); + DeoptimizationScope deopt_scope; { // Walk all nmethods depending on this call site. MutexLocker mu(thread, Compile_lock); - MethodHandles::flush_dependent_nmethods(call_site, target); + MethodHandles::mark_dependent_nmethods(&deopt_scope, call_site, target); java_lang_invoke_CallSite::set_target_volatile(call_site(), target()); + // This is assumed to be an 'atomic' operation by verification. + // So keep it under lock for now. + deopt_scope.deoptimize_marked(); } } JVM_END @@ -1324,21 +1327,15 @@ JVM_END // deallocate their dependency information. JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) { Handle context(THREAD, JNIHandles::resolve_non_null(context_jh)); + DeoptimizationScope deopt_scope; { - // Walk all nmethods depending on this call site. - MutexLocker mu1(thread, Compile_lock); - - int marked = 0; - { - NoSafepointVerifier nsv; - MutexLocker mu2(THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag); - DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context()); - marked = deps.remove_and_mark_for_deoptimization_all_dependents(); - } - if (marked > 0) { - // At least one nmethod has been marked for deoptimization - Deoptimization::deoptimize_all_marked(); - } + NoSafepointVerifier nsv; + MutexLocker ml(THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag); + DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context()); + deps.remove_and_mark_for_deoptimization_all_dependents(&deopt_scope); + // This is assumed to be an 'atomic' operation by verification. + // So keep it under lock for now. + deopt_scope.deoptimize_marked(); } } JVM_END diff --git a/src/hotspot/share/prims/methodHandles.hpp b/src/hotspot/share/prims/methodHandles.hpp index 3691d5bc3ff..a44f5a66449 100644 --- a/src/hotspot/share/prims/methodHandles.hpp +++ b/src/hotspot/share/prims/methodHandles.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,7 +79,7 @@ class MethodHandles: AllStatic { static void add_dependent_nmethod(oop call_site, nmethod* nm); static void clean_dependency_context(oop call_site); - static void flush_dependent_nmethods(Handle call_site, Handle target); + static void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, Handle call_site, Handle target); // Generate MethodHandles adapters. static void generate_adapters(); diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 588c2d181ae..e4befe647bc 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -778,26 +778,34 @@ WB_ENTRY(jboolean, WB_IsFrameDeoptimized(JNIEnv* env, jobject o, jint depth)) WB_END WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o)) - CodeCache::mark_all_nmethods_for_deoptimization(); - Deoptimization::deoptimize_all_marked(); + DeoptimizationScope deopt_scope; + CodeCache::mark_all_nmethods_for_deoptimization(&deopt_scope); + deopt_scope.deoptimize_marked(); WB_END WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr)) jmethodID jmid = reflected_method_to_jmid(thread, env, method); int result = 0; CHECK_JNI_EXCEPTION_(env, result); - MutexLocker mu(Compile_lock); - methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - if (is_osr) { - result += mh->mark_osr_nmethods(); - } else if (mh->code() != nullptr) { - mh->code()->mark_for_deoptimization(); - ++result; - } - result += CodeCache::mark_for_deoptimization(mh()); - if (result > 0) { - Deoptimization::deoptimize_all_marked(); + + DeoptimizationScope deopt_scope; + { + MutexLocker mu(Compile_lock); + methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); + if (is_osr) { + result += mh->method_holder()->mark_osr_nmethods(&deopt_scope, mh()); + } else { + MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + if (mh->code() != nullptr) { + deopt_scope.mark(mh->code()); + ++result; + } + } + CodeCache::mark_for_deoptimization(&deopt_scope, mh()); } + + deopt_scope.deoptimize_marked(); + return result; WB_END diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 5eafb2281d6..8348556f795 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -98,6 +98,121 @@ #include "jfr/metadata/jfrSerializer.hpp" #endif +uint64_t DeoptimizationScope::_committed_deopt_gen = 0; +uint64_t DeoptimizationScope::_active_deopt_gen = 1; +bool DeoptimizationScope::_committing_in_progress = false; + +DeoptimizationScope::DeoptimizationScope() : _required_gen(0) { + DEBUG_ONLY(_deopted = false;) + + MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + // If there is nothing to deopt _required_gen is the same as comitted. + _required_gen = DeoptimizationScope::_committed_deopt_gen; +} + +DeoptimizationScope::~DeoptimizationScope() { + assert(_deopted, "Deopt not executed"); +} + +void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) { + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, + Mutex::_no_safepoint_check_flag); + + // If it's already marked but we still need it to be deopted. + if (cm->is_marked_for_deoptimization()) { + dependent(cm); + return; + } + + CompiledMethod::DeoptimizationStatus status = + inc_recompile_counts ? CompiledMethod::deoptimize : CompiledMethod::deoptimize_noupdate; + Atomic::store(&cm->_deoptimization_status, status); + + // Make sure active is not committed + assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be"); + assert(cm->_deoptimization_generation == 0, "Is already marked"); + + cm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen; + _required_gen = DeoptimizationScope::_active_deopt_gen; +} + +void DeoptimizationScope::dependent(CompiledMethod* cm) { + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, + Mutex::_no_safepoint_check_flag); + // A method marked by someone else may have a _required_gen lower than what we marked with. + // Therefore only store it if it's higher than _required_gen. + if (_required_gen < cm->_deoptimization_generation) { + _required_gen = cm->_deoptimization_generation; + } +} + +void DeoptimizationScope::deoptimize_marked() { + assert(!_deopted, "Already deopted"); + + // We are not alive yet. + if (!Universe::is_fully_initialized()) { + DEBUG_ONLY(_deopted = true;) + return; + } + + // Safepoints are a special case, handled here. + if (SafepointSynchronize::is_at_safepoint()) { + DeoptimizationScope::_committed_deopt_gen = DeoptimizationScope::_active_deopt_gen; + DeoptimizationScope::_active_deopt_gen++; + Deoptimization::deoptimize_all_marked(); + DEBUG_ONLY(_deopted = true;) + return; + } + + uint64_t comitting = 0; + bool wait = false; + while (true) { + { + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, + Mutex::_no_safepoint_check_flag); + // First we check if we or someone else already deopted the gen we want. + if (DeoptimizationScope::_committed_deopt_gen >= _required_gen) { + DEBUG_ONLY(_deopted = true;) + return; + } + if (!_committing_in_progress) { + // The version we are about to commit. + comitting = DeoptimizationScope::_active_deopt_gen; + // Make sure new marks use a higher gen. + DeoptimizationScope::_active_deopt_gen++; + _committing_in_progress = true; + wait = false; + } else { + // Another thread is handshaking and committing a gen. + wait = true; + } + } + if (wait) { + // Wait and let the concurrent handshake be performed. + ThreadBlockInVM tbivm(JavaThread::current()); + os::naked_yield(); + } else { + // Performs the handshake. + Deoptimization::deoptimize_all_marked(); // May safepoint and an additional deopt may have occurred. + DEBUG_ONLY(_deopted = true;) + { + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, + Mutex::_no_safepoint_check_flag); + // Make sure that committed doesn't go backwards. + // Should only happen if we did a deopt during a safepoint above. + if (DeoptimizationScope::_committed_deopt_gen < comitting) { + DeoptimizationScope::_committed_deopt_gen = comitting; + } + _committing_in_progress = false; + + assert(DeoptimizationScope::_committed_deopt_gen >= _required_gen, "Must be"); + + return; + } + } + } +} + Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, int caller_adjustment, int caller_actual_parameters, @@ -915,17 +1030,11 @@ class DeoptimizeMarkedClosure : public HandshakeClosure { } }; -void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) { +void Deoptimization::deoptimize_all_marked() { ResourceMark rm; // Make the dependent methods not entrant - if (nmethod_only != nullptr) { - nmethod_only->mark_for_deoptimization(); - nmethod_only->make_not_entrant(); - CodeCache::make_nmethod_deoptimized(nmethod_only); - } else { - CodeCache::make_marked_nmethods_deoptimized(); - } + CodeCache::make_marked_nmethods_deoptimized(); DeoptimizeMarkedClosure deopt; if (SafepointSynchronize::is_at_safepoint()) { diff --git a/src/hotspot/share/runtime/deoptimization.hpp b/src/hotspot/share/runtime/deoptimization.hpp index 7fe3701b448..01da82b3dad 100644 --- a/src/hotspot/share/runtime/deoptimization.hpp +++ b/src/hotspot/share/runtime/deoptimization.hpp @@ -40,6 +40,32 @@ class compiledVFrame; template class GrowableArray; +class DeoptimizationScope { + private: + // What gen we have done the deopt handshake for. + static uint64_t _committed_deopt_gen; + // What gen to mark a method with, hence larger than _committed_deopt_gen. + static uint64_t _active_deopt_gen; + // Indicate an in-progress deopt handshake. + static bool _committing_in_progress; + + // The required gen we need to execute/wait for + uint64_t _required_gen; + DEBUG_ONLY(bool _deopted;) + + public: + DeoptimizationScope(); + ~DeoptimizationScope(); + // Mark a method, if already marked as dependent. + void mark(CompiledMethod* cm, bool inc_recompile_counts = true); + // Record this as a dependent method. + void dependent(CompiledMethod* cm); + + // Execute the deoptimization. + // Make the nmethods not entrant, stackwalks and patch return pcs and sets post call nops. + void deoptimize_marked(); +}; + class Deoptimization : AllStatic { friend class VMStructs; friend class EscapeBarrier; @@ -149,10 +175,9 @@ class Deoptimization : AllStatic { #endif // Make all nmethods that are marked_for_deoptimization not_entrant and deoptimize any live - // activations using those nmethods. If an nmethod is passed as an argument then it is - // marked_for_deoptimization and made not_entrant. Otherwise a scan of the code cache is done to + // activations using those nmethods. Scan of the code cache is done to // find all marked nmethods and they are made not_entrant. - static void deoptimize_all_marked(nmethod* nmethod_only = nullptr); + static void deoptimize_all_marked(); public: // Deoptimizes a frame lazily. Deopt happens on return to the frame.