mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8300926: Several startup regressions ~6-70% in 21-b6 all platforms
Reviewed-by: eosterlund, dcubed, coleenp
This commit is contained in:
parent
31e1e3975b
commit
c183fce954
24 changed files with 340 additions and 234 deletions
|
@ -209,10 +209,8 @@ void LambdaFormInvokers::regenerate_class(char* class_name, ClassFileStream& st,
|
|||
assert(result->java_mirror() != nullptr, "must be");
|
||||
add_regenerated_class(result->java_mirror());
|
||||
|
||||
{
|
||||
MutexLocker mu_r(THREAD, Compile_lock); // add_to_hierarchy asserts this.
|
||||
SystemDictionary::add_to_hierarchy(result);
|
||||
}
|
||||
SystemDictionary::add_to_hierarchy(THREAD, result);
|
||||
|
||||
// new class not linked yet.
|
||||
MetaspaceShared::try_link_class(THREAD, result);
|
||||
assert(!HAS_PENDING_EXCEPTION, "Invariant");
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaCalls.hpp"
|
||||
|
@ -899,12 +900,9 @@ InstanceKlass* SystemDictionary::resolve_hidden_class_from_stream(
|
|||
k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror()));
|
||||
}
|
||||
|
||||
{
|
||||
MutexLocker mu_r(THREAD, Compile_lock);
|
||||
// Add to class hierarchy, and do possible deoptimizations.
|
||||
add_to_hierarchy(k);
|
||||
// But, do not add to dictionary.
|
||||
}
|
||||
// Add to class hierarchy, and do possible deoptimizations.
|
||||
add_to_hierarchy(THREAD, k);
|
||||
// But, do not add to dictionary.
|
||||
|
||||
k->link_class(CHECK_NULL);
|
||||
|
||||
|
@ -1489,13 +1487,11 @@ void SystemDictionary::define_instance_class(InstanceKlass* k, Handle class_load
|
|||
JavaCalls::call(&result, m, &args, CHECK);
|
||||
}
|
||||
|
||||
// Add the new class. We need recompile lock during update of CHA.
|
||||
// Add to class hierarchy, and do possible deoptimizations.
|
||||
add_to_hierarchy(THREAD, k);
|
||||
|
||||
{
|
||||
MutexLocker mu_r(THREAD, Compile_lock);
|
||||
|
||||
// Add to class hierarchy, and do possible deoptimizations.
|
||||
add_to_hierarchy(k);
|
||||
|
||||
// Add to systemDictionary - so other classes can see it.
|
||||
// Grabs and releases SystemDictionary_lock
|
||||
update_dictionary(THREAD, k, loader_data);
|
||||
|
@ -1612,28 +1608,44 @@ InstanceKlass* SystemDictionary::find_or_define_instance_class(Symbol* class_nam
|
|||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Update hierarchy. This is done before the new klass has been added to the SystemDictionary. The Compile_lock
|
||||
// is held, to ensure that the compiler is not using the class hierarchy, and that deoptimization will kick in
|
||||
// before a new class is used.
|
||||
// is grabbed, to ensure that the compiler is not using the class hierarchy.
|
||||
|
||||
void SystemDictionary::add_to_hierarchy(InstanceKlass* k) {
|
||||
void SystemDictionary::add_to_hierarchy(JavaThread* current, InstanceKlass* k) {
|
||||
assert(k != nullptr, "just checking");
|
||||
if (Universe::is_fully_initialized()) {
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "must NOT be at safepoint");
|
||||
|
||||
// In case we are not using CHA based vtables we need to make sure the loaded
|
||||
// deopt is completed before anyone links this class.
|
||||
// Linking is done with _init_monitor held, by loading and deopting with it
|
||||
// held we make sure the deopt is completed before linking.
|
||||
if (!UseVtableBasedCHA) {
|
||||
k->init_monitor()->lock();
|
||||
}
|
||||
|
||||
k->set_init_state(InstanceKlass::loaded);
|
||||
// make sure init_state store is already done.
|
||||
// The compiler reads the hierarchy outside of the Compile_lock.
|
||||
// Access ordering is used to add to hierarchy.
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
MutexLocker ml(current, Compile_lock);
|
||||
|
||||
// Link into hierarchy.
|
||||
k->append_to_sibling_list(); // add to superklass/sibling list
|
||||
k->process_interfaces(); // handle all "implements" declarations
|
||||
k->set_init_state(InstanceKlass::loaded);
|
||||
// make sure init_state store is already done.
|
||||
// The compiler reads the hierarchy outside of the Compile_lock.
|
||||
// Access ordering is used to add to hierarchy.
|
||||
|
||||
// Now flush all code that depended on old class hierarchy.
|
||||
// Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
|
||||
if (Universe::is_fully_initialized()) {
|
||||
CodeCache::flush_dependents_on(k);
|
||||
// Link into hierarchy.
|
||||
k->append_to_sibling_list(); // add to superklass/sibling list
|
||||
k->process_interfaces(); // handle all "implements" declarations
|
||||
|
||||
// Now mark all code that depended on old class hierarchy.
|
||||
// Note: must be done *after* linking k into the hierarchy (was bug 12/9/97)
|
||||
if (Universe::is_fully_initialized()) {
|
||||
CodeCache::mark_dependents_on(&deopt_scope, k);
|
||||
}
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
|
||||
if (!UseVtableBasedCHA) {
|
||||
k->init_monitor()->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -366,8 +366,8 @@ public:
|
|||
// Return Symbol or throw exception if name given is can not be a valid Symbol.
|
||||
static Symbol* class_name_symbol(const char* name, Symbol* exception, TRAPS);
|
||||
|
||||
// Setup link to hierarchy
|
||||
static void add_to_hierarchy(InstanceKlass* k);
|
||||
// Setup link to hierarchy and deoptimize
|
||||
static void add_to_hierarchy(JavaThread* current, InstanceKlass* k);
|
||||
protected:
|
||||
|
||||
// Basic find on loaded classes
|
||||
|
|
|
@ -850,13 +850,11 @@ InstanceKlass* SystemDictionaryShared::prepare_shared_lambda_proxy_class(Instanc
|
|||
assert(nest_host == shared_nest_host, "mismatched nest host");
|
||||
|
||||
EventClassLoad class_load_start_event;
|
||||
{
|
||||
MutexLocker mu_r(THREAD, Compile_lock);
|
||||
|
||||
// Add to class hierarchy, and do possible deoptimizations.
|
||||
SystemDictionary::add_to_hierarchy(loaded_lambda);
|
||||
// But, do not add to dictionary.
|
||||
}
|
||||
// Add to class hierarchy, and do possible deoptimizations.
|
||||
SystemDictionary::add_to_hierarchy(THREAD, loaded_lambda);
|
||||
// But, do not add to dictionary.
|
||||
|
||||
loaded_lambda->link_class(CHECK_NULL);
|
||||
// notify jvmti
|
||||
if (JvmtiExport::should_post_class_load()) {
|
||||
|
|
|
@ -249,7 +249,7 @@ void vmClasses::resolve_shared_class(InstanceKlass* klass, ClassLoaderData* load
|
|||
SystemDictionary::load_shared_class_misc(klass, loader_data);
|
||||
Dictionary* dictionary = loader_data->dictionary();
|
||||
dictionary->add_klass(THREAD, klass->name(), klass);
|
||||
SystemDictionary::add_to_hierarchy(klass);
|
||||
SystemDictionary::add_to_hierarchy(THREAD, klass);
|
||||
assert(klass->is_loaded(), "Must be in at least loaded state");
|
||||
}
|
||||
|
||||
|
|
|
@ -1243,9 +1243,8 @@ void CodeCache::cleanup_inline_caches_whitebox() {
|
|||
// Keeps track of time spent for checking dependencies
|
||||
NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
|
||||
|
||||
int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
|
||||
void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
|
||||
// search the hierarchy looking for nmethods which are affected by the loading of this class
|
||||
|
||||
|
@ -1257,7 +1256,7 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
|
|||
NoSafepointVerifier nsv;
|
||||
for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
|
||||
Klass* d = str.klass();
|
||||
number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
|
||||
InstanceKlass::cast(d)->mark_dependent_nmethods(deopt_scope, changes);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
@ -1269,8 +1268,6 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
|
|||
dependentCheckTime.stop();
|
||||
}
|
||||
#endif
|
||||
|
||||
return number_of_marked_CodeBlobs;
|
||||
}
|
||||
|
||||
CompiledMethod* CodeCache::find_compiled(void* start) {
|
||||
|
@ -1325,13 +1322,12 @@ void CodeCache::old_nmethods_do(MetadataClosure* f) {
|
|||
}
|
||||
|
||||
// Walk compiled methods and mark dependent methods for deoptimization.
|
||||
int CodeCache::mark_dependents_for_evol_deoptimization() {
|
||||
void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
|
||||
// Each redefinition creates a new set of nmethods that have references to "old" Methods
|
||||
// So delete old method table and create a new one.
|
||||
reset_old_method_table();
|
||||
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
|
@ -1339,25 +1335,20 @@ int CodeCache::mark_dependents_for_evol_deoptimization() {
|
|||
// This includes methods whose inline caches point to old methods, so
|
||||
// inline cache clearing is unnecessary.
|
||||
if (nm->has_evol_metadata()) {
|
||||
nm->mark_for_deoptimization();
|
||||
deopt_scope->mark(nm);
|
||||
add_to_old_table(nm);
|
||||
number_of_marked_CodeBlobs++;
|
||||
}
|
||||
}
|
||||
|
||||
// return total count of nmethods marked for deoptimization, if zero the caller
|
||||
// can skip deoptimization
|
||||
return number_of_marked_CodeBlobs;
|
||||
}
|
||||
|
||||
void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
|
||||
void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
if (!nm->method()->is_method_handle_intrinsic()) {
|
||||
if (nm->can_be_deoptimized()) {
|
||||
nm->mark_for_deoptimization();
|
||||
deopt_scope->mark(nm);
|
||||
}
|
||||
if (nm->has_evol_metadata()) {
|
||||
add_to_old_table(nm);
|
||||
|
@ -1366,48 +1357,30 @@ void CodeCache::mark_all_nmethods_for_evol_deoptimization() {
|
|||
}
|
||||
}
|
||||
|
||||
// Flushes compiled methods dependent on redefined classes, that have already been
|
||||
// marked for deoptimization.
|
||||
void CodeCache::flush_evol_dependents() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
|
||||
|
||||
// CodeCache can only be updated by a thread_in_VM and they will all be
|
||||
// stopped during the safepoint so CodeCache will be safe to update without
|
||||
// holding the CodeCache_lock.
|
||||
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
}
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
// Mark methods for deopt (if safe or possible).
|
||||
void CodeCache::mark_all_nmethods_for_deoptimization() {
|
||||
void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
if (!nm->is_native_method()) {
|
||||
nm->mark_for_deoptimization();
|
||||
deopt_scope->mark(nm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int CodeCache::mark_for_deoptimization(Method* dependee) {
|
||||
void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) {
|
||||
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
int number_of_marked_CodeBlobs = 0;
|
||||
|
||||
CompiledMethodIterator iter(CompiledMethodIterator::only_not_unloading);
|
||||
while(iter.next()) {
|
||||
CompiledMethod* nm = iter.method();
|
||||
if (nm->is_dependent_on_method(dependee)) {
|
||||
ResourceMark rm;
|
||||
nm->mark_for_deoptimization();
|
||||
number_of_marked_CodeBlobs++;
|
||||
deopt_scope->mark(nm);
|
||||
}
|
||||
}
|
||||
|
||||
return number_of_marked_CodeBlobs;
|
||||
}
|
||||
|
||||
void CodeCache::make_marked_nmethods_deoptimized() {
|
||||
|
@ -1416,51 +1389,38 @@ void CodeCache::make_marked_nmethods_deoptimized() {
|
|||
CompiledMethod* nm = iter.method();
|
||||
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
|
||||
nm->make_not_entrant();
|
||||
make_nmethod_deoptimized(nm);
|
||||
nm->make_deoptimized();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CodeCache::make_nmethod_deoptimized(CompiledMethod* nm) {
|
||||
if (nm->is_marked_for_deoptimization() && nm->can_be_deoptimized()) {
|
||||
nm->make_deoptimized();
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes compiled methods dependent on dependee.
|
||||
void CodeCache::flush_dependents_on(InstanceKlass* dependee) {
|
||||
// Marks compiled methods dependent on dependee.
|
||||
void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) {
|
||||
assert_lock_strong(Compile_lock);
|
||||
|
||||
if (!has_nmethods_with_dependencies()) {
|
||||
return;
|
||||
}
|
||||
|
||||
int marked = 0;
|
||||
if (dependee->is_linked()) {
|
||||
// Class initialization state change.
|
||||
KlassInitDepChange changes(dependee);
|
||||
marked = mark_for_deoptimization(changes);
|
||||
mark_for_deoptimization(deopt_scope, changes);
|
||||
} else {
|
||||
// New class is loaded.
|
||||
NewKlassDepChange changes(dependee);
|
||||
marked = mark_for_deoptimization(changes);
|
||||
}
|
||||
|
||||
if (marked > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
mark_for_deoptimization(deopt_scope, changes);
|
||||
}
|
||||
}
|
||||
|
||||
// Flushes compiled methods dependent on dependee
|
||||
void CodeCache::flush_dependents_on_method(const methodHandle& m_h) {
|
||||
// --- Compile_lock is not held. However we are at a safepoint.
|
||||
assert_locked_or_safepoint(Compile_lock);
|
||||
// Marks compiled methods dependent on dependee
|
||||
void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
|
||||
DeoptimizationScope deopt_scope;
|
||||
// Compute the dependent nmethods
|
||||
if (mark_for_deoptimization(m_h()) > 0) {
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
}
|
||||
mark_for_deoptimization(&deopt_scope, m_h());
|
||||
deopt_scope.deoptimize_marked();
|
||||
}
|
||||
|
||||
void CodeCache::verify() {
|
||||
|
|
|
@ -78,6 +78,7 @@ class KlassDepChange;
|
|||
class OopClosure;
|
||||
class ShenandoahParallelCodeHeapIterator;
|
||||
class NativePostCallNop;
|
||||
class DeoptimizationScope;
|
||||
|
||||
class CodeCache : AllStatic {
|
||||
friend class VMStructs;
|
||||
|
@ -301,27 +302,25 @@ class CodeCache : AllStatic {
|
|||
|
||||
// Deoptimization
|
||||
private:
|
||||
static int mark_for_deoptimization(KlassDepChange& changes);
|
||||
static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
|
||||
|
||||
public:
|
||||
static void mark_all_nmethods_for_deoptimization();
|
||||
static int mark_for_deoptimization(Method* dependee);
|
||||
static void mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope);
|
||||
static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee);
|
||||
static void make_marked_nmethods_deoptimized();
|
||||
static void make_nmethod_deoptimized(CompiledMethod* nm);
|
||||
|
||||
// Flushing and deoptimization
|
||||
static void flush_dependents_on(InstanceKlass* dependee);
|
||||
// Marks dependents during classloading
|
||||
static void mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee);
|
||||
|
||||
// RedefineClasses support
|
||||
// Flushing and deoptimization in case of evolution
|
||||
static int mark_dependents_for_evol_deoptimization();
|
||||
static void mark_all_nmethods_for_evol_deoptimization();
|
||||
static void flush_evol_dependents();
|
||||
// Marks in case of evolution
|
||||
static void mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
|
||||
static void mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope);
|
||||
static void old_nmethods_do(MetadataClosure* f) NOT_JVMTI_RETURN;
|
||||
static void unregister_old_nmethod(CompiledMethod* c) NOT_JVMTI_RETURN;
|
||||
|
||||
// Support for fullspeed debugging
|
||||
static void flush_dependents_on_method(const methodHandle& dependee);
|
||||
static void mark_dependents_on_method_for_breakpoint(const methodHandle& dependee);
|
||||
|
||||
// tells if there are nmethods with dependencies
|
||||
static bool has_nmethods_with_dependencies();
|
||||
|
|
|
@ -54,7 +54,8 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty
|
|||
int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
|
||||
bool caller_must_gc_arguments, bool compiled)
|
||||
: CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
|
||||
_mark_for_deoptimization_status(not_marked),
|
||||
_deoptimization_status(not_marked),
|
||||
_deoptimization_generation(0),
|
||||
_method(method),
|
||||
_gc_data(nullptr)
|
||||
{
|
||||
|
@ -66,7 +67,8 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty
|
|||
OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled)
|
||||
: CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
|
||||
frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments, compiled),
|
||||
_mark_for_deoptimization_status(not_marked),
|
||||
_deoptimization_status(not_marked),
|
||||
_deoptimization_generation(0),
|
||||
_method(method),
|
||||
_gc_data(nullptr)
|
||||
{
|
||||
|
@ -113,12 +115,10 @@ const char* CompiledMethod::state() const {
|
|||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) {
|
||||
// assert(can_be_deoptimized(), ""); // in some places we check before marking, in others not.
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
if (_mark_for_deoptimization_status != deoptimize_done) { // can't go backwards
|
||||
_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
|
||||
void CompiledMethod::set_deoptimized_done() {
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_deoptimization_status != deoptimize_done) { // can't go backwards
|
||||
Atomic::store(&_deoptimization_status, deoptimize_done);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -140,17 +140,19 @@ public:
|
|||
|
||||
class CompiledMethod : public CodeBlob {
|
||||
friend class VMStructs;
|
||||
|
||||
friend class DeoptimizationScope;
|
||||
void init_defaults();
|
||||
protected:
|
||||
enum MarkForDeoptimizationStatus : u1 {
|
||||
enum DeoptimizationStatus : u1 {
|
||||
not_marked,
|
||||
deoptimize,
|
||||
deoptimize_noupdate,
|
||||
deoptimize_done
|
||||
};
|
||||
|
||||
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
|
||||
volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
|
||||
// Used to track in which deoptimize handshake this method will be deoptimized.
|
||||
uint64_t _deoptimization_generation;
|
||||
|
||||
// set during construction
|
||||
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
|
||||
|
@ -174,6 +176,11 @@ protected:
|
|||
|
||||
virtual void flush() = 0;
|
||||
|
||||
private:
|
||||
DeoptimizationStatus deoptimization_status() const {
|
||||
return Atomic::load(&_deoptimization_status);
|
||||
}
|
||||
|
||||
protected:
|
||||
CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
|
||||
CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
|
||||
|
@ -236,11 +243,9 @@ public:
|
|||
bool is_at_poll_return(address pc);
|
||||
bool is_at_poll_or_poll_return(address pc);
|
||||
|
||||
bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
|
||||
void mark_for_deoptimization(bool inc_recompile_counts = true);
|
||||
|
||||
bool has_been_deoptimized() const { return _mark_for_deoptimization_status == deoptimize_done; }
|
||||
void mark_deoptimized() { _mark_for_deoptimization_status = deoptimize_done; }
|
||||
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
|
||||
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
|
||||
void set_deoptimized_done();
|
||||
|
||||
virtual void make_deoptimized() { assert(false, "not supported"); };
|
||||
|
||||
|
@ -248,8 +253,8 @@ public:
|
|||
// Update recompile counts when either the update is explicitly requested (deoptimize)
|
||||
// or the nmethod is not marked for deoptimization at all (not_marked).
|
||||
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
|
||||
return _mark_for_deoptimization_status != deoptimize_noupdate &&
|
||||
_mark_for_deoptimization_status != deoptimize_done;
|
||||
DeoptimizationStatus status = deoptimization_status();
|
||||
return status != deoptimize_noupdate && status != deoptimize_done;
|
||||
}
|
||||
|
||||
// tells whether frames described by this nmethod can be deoptimized
|
||||
|
|
|
@ -682,8 +682,6 @@ class DepChange : public StackObj {
|
|||
virtual bool is_klass_init_change() const { return false; }
|
||||
virtual bool is_call_site_change() const { return false; }
|
||||
|
||||
virtual void mark_for_deoptimization(nmethod* nm) = 0;
|
||||
|
||||
// Subclass casting with assertions.
|
||||
KlassDepChange* as_klass_change() {
|
||||
assert(is_klass_change(), "bad cast");
|
||||
|
@ -780,10 +778,6 @@ class KlassDepChange : public DepChange {
|
|||
// What kind of DepChange is this?
|
||||
virtual bool is_klass_change() const { return true; }
|
||||
|
||||
virtual void mark_for_deoptimization(nmethod* nm) {
|
||||
nm->mark_for_deoptimization(/*inc_recompile_counts=*/true);
|
||||
}
|
||||
|
||||
InstanceKlass* type() { return _type; }
|
||||
|
||||
// involves_context(k) is true if k == _type or any of its super types
|
||||
|
@ -822,10 +816,6 @@ class CallSiteDepChange : public DepChange {
|
|||
// What kind of DepChange is this?
|
||||
virtual bool is_call_site_change() const { return true; }
|
||||
|
||||
virtual void mark_for_deoptimization(nmethod* nm) {
|
||||
nm->mark_for_deoptimization(/*inc_recompile_counts=*/false);
|
||||
}
|
||||
|
||||
oop call_site() const { return _call_site(); }
|
||||
oop method_handle() const { return _method_handle(); }
|
||||
};
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "code/dependencyContext.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
|
@ -62,17 +63,14 @@ void DependencyContext::init() {
|
|||
//
|
||||
// Walk the list of dependent nmethods searching for nmethods which
|
||||
// are dependent on the changes that were passed in and mark them for
|
||||
// deoptimization. Returns the number of nmethods found.
|
||||
// deoptimization.
|
||||
//
|
||||
int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
|
||||
int found = 0;
|
||||
void DependencyContext::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes) {
|
||||
for (nmethodBucket* b = dependencies_not_unloading(); b != nullptr; b = b->next_not_unloading()) {
|
||||
nmethod* nm = b->get_nmethod();
|
||||
if (b->count() > 0) {
|
||||
if (nm->is_marked_for_deoptimization()) {
|
||||
// Also count already (concurrently) marked nmethods to make sure
|
||||
// deoptimization is triggered before execution in this thread continues.
|
||||
found++;
|
||||
deopt_scope->dependent(nm);
|
||||
} else if (nm->check_dependency_on(changes)) {
|
||||
if (TraceDependencies) {
|
||||
ResourceMark rm;
|
||||
|
@ -81,12 +79,10 @@ int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
|
|||
nm->print();
|
||||
nm->print_dependencies();
|
||||
}
|
||||
changes.mark_for_deoptimization(nm);
|
||||
found++;
|
||||
deopt_scope->mark(nm, !changes.is_call_site_change());
|
||||
}
|
||||
}
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
//
|
||||
|
@ -189,21 +185,18 @@ void DependencyContext::remove_all_dependents() {
|
|||
assert(b == nullptr, "All dependents should be unloading");
|
||||
}
|
||||
|
||||
int DependencyContext::remove_and_mark_for_deoptimization_all_dependents() {
|
||||
void DependencyContext::remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope) {
|
||||
nmethodBucket* b = dependencies_not_unloading();
|
||||
set_dependencies(nullptr);
|
||||
int marked = 0;
|
||||
while (b != nullptr) {
|
||||
nmethod* nm = b->get_nmethod();
|
||||
if (b->count() > 0) {
|
||||
// Also count already (concurrently) marked nmethods to make sure
|
||||
// deoptimization is triggered before execution in this thread continues.
|
||||
nm->mark_for_deoptimization();
|
||||
marked++;
|
||||
deopt_scope->mark(nm);
|
||||
}
|
||||
b = release_and_get_next_not_unloading(b);
|
||||
}
|
||||
return marked;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "runtime/safepoint.hpp"
|
||||
|
||||
class nmethod;
|
||||
class DeoptimizationScope;
|
||||
class DepChange;
|
||||
|
||||
//
|
||||
|
@ -117,10 +118,10 @@ class DependencyContext : public StackObj {
|
|||
|
||||
static void init();
|
||||
|
||||
int mark_dependent_nmethods(DepChange& changes);
|
||||
void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, DepChange& changes);
|
||||
void add_dependent_nmethod(nmethod* nm);
|
||||
void remove_all_dependents();
|
||||
int remove_and_mark_for_deoptimization_all_dependents();
|
||||
void remove_and_mark_for_deoptimization_all_dependents(DeoptimizationScope* deopt_scope);
|
||||
void clean_unloading_dependents();
|
||||
static nmethodBucket* release_and_get_next_not_unloading(nmethodBucket* b);
|
||||
static void purge_dependency_contexts();
|
||||
|
|
|
@ -1160,6 +1160,8 @@ void nmethod::finalize_relocations() {
|
|||
|
||||
void nmethod::make_deoptimized() {
|
||||
if (!Continuations::enabled()) {
|
||||
// Don't deopt this again.
|
||||
set_deoptimized_done();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1167,6 +1169,12 @@ void nmethod::make_deoptimized() {
|
|||
|
||||
CompiledICLocker ml(this);
|
||||
assert(CompiledICLocker::is_safe(this), "mt unsafe call");
|
||||
|
||||
// If post call nops have been already patched, we can just bail-out.
|
||||
if (has_been_deoptimized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this, oops_reloc_begin());
|
||||
|
||||
|
@ -1202,7 +1210,7 @@ void nmethod::make_deoptimized() {
|
|||
}
|
||||
}
|
||||
// Don't deopt this again.
|
||||
mark_deoptimized();
|
||||
set_deoptimized_done();
|
||||
}
|
||||
|
||||
void nmethod::verify_clean_inline_caches() {
|
||||
|
|
|
@ -1591,7 +1591,11 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JV
|
|||
// the address field to still be pointing at the nmethod.
|
||||
} else {
|
||||
// Deoptimize the nmethod immediately.
|
||||
Deoptimization::deoptimize_all_marked(nm);
|
||||
DeoptimizationScope deopt_scope;
|
||||
deopt_scope.mark(nm);
|
||||
nm->make_not_entrant();
|
||||
nm->make_deoptimized();
|
||||
deopt_scope.deoptimize_marked();
|
||||
|
||||
// A HotSpotNmethod instance can only reference a single nmethod
|
||||
// during its lifetime so simply clear it here.
|
||||
|
|
|
@ -74,6 +74,7 @@
|
|||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "prims/methodComparator.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
@ -1178,15 +1179,20 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
|||
void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaThread* current) {
|
||||
MonitorLocker ml(current, _init_monitor);
|
||||
|
||||
// Now flush all code that assume the class is not linked.
|
||||
// Set state under the Compile_lock also.
|
||||
if (state == linked && UseVtableBasedCHA && Universe::is_fully_initialized()) {
|
||||
MutexLocker ml(current, Compile_lock);
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Now mark all code that assumes the class is not linked.
|
||||
// Set state under the Compile_lock also.
|
||||
MutexLocker ml(current, Compile_lock);
|
||||
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
|
||||
CodeCache::flush_dependents_on(this);
|
||||
CodeCache::mark_dependents_on(&deopt_scope, this);
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
} else {
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
|
@ -2325,8 +2331,8 @@ inline DependencyContext InstanceKlass::dependencies() {
|
|||
return dep_context;
|
||||
}
|
||||
|
||||
int InstanceKlass::mark_dependent_nmethods(KlassDepChange& changes) {
|
||||
return dependencies().mark_dependent_nmethods(changes);
|
||||
void InstanceKlass::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes) {
|
||||
dependencies().mark_dependent_nmethods(deopt_scope, changes);
|
||||
}
|
||||
|
||||
void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
|
||||
|
@ -3272,7 +3278,7 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
|||
return found;
|
||||
}
|
||||
|
||||
int InstanceKlass::mark_osr_nmethods(const Method* m) {
|
||||
int InstanceKlass::mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m) {
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
nmethod* osr = osr_nmethods_head();
|
||||
|
@ -3280,7 +3286,7 @@ int InstanceKlass::mark_osr_nmethods(const Method* m) {
|
|||
while (osr != nullptr) {
|
||||
assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
|
||||
if (osr->method() == m) {
|
||||
osr->mark_for_deoptimization();
|
||||
deopt_scope->mark(osr);
|
||||
found++;
|
||||
}
|
||||
osr = osr->osr_link();
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#endif
|
||||
|
||||
class DeoptimizationScope;
|
||||
class klassItable;
|
||||
class RecordComponent;
|
||||
|
||||
|
@ -861,7 +862,7 @@ public:
|
|||
|
||||
// maintenance of deoptimization dependencies
|
||||
inline DependencyContext dependencies();
|
||||
int mark_dependent_nmethods(KlassDepChange& changes);
|
||||
void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
|
||||
void add_dependent_nmethod(nmethod* nm);
|
||||
void clean_dependency_context();
|
||||
|
||||
|
@ -870,7 +871,7 @@ public:
|
|||
void set_osr_nmethods_head(nmethod* h) { _osr_nmethods_head = h; };
|
||||
void add_osr_nmethod(nmethod* n);
|
||||
bool remove_osr_nmethod(nmethod* n);
|
||||
int mark_osr_nmethods(const Method* m);
|
||||
int mark_osr_nmethods(DeoptimizationScope* deopt_scope, const Method* m);
|
||||
nmethod* lookup_osr_nmethod(const Method* m, int bci, int level, bool match_level) const;
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
|
|
|
@ -2008,7 +2008,7 @@ void BreakpointInfo::set(Method* method) {
|
|||
// Deoptimize all dependents on this method
|
||||
HandleMark hm(thread);
|
||||
methodHandle mh(thread, method);
|
||||
CodeCache::flush_dependents_on_method(mh);
|
||||
CodeCache::mark_dependents_on_method_for_breakpoint(mh);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -905,10 +905,6 @@ public:
|
|||
return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != nullptr;
|
||||
}
|
||||
|
||||
int mark_osr_nmethods() {
|
||||
return method_holder()->mark_osr_nmethods(this);
|
||||
}
|
||||
|
||||
nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
|
||||
return method_holder()->lookup_osr_nmethod(this, bci, level, match_level);
|
||||
}
|
||||
|
|
|
@ -4098,22 +4098,18 @@ void VM_RedefineClasses::transfer_old_native_function_registrations(InstanceKlas
|
|||
void VM_RedefineClasses::flush_dependent_code() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
|
||||
|
||||
bool deopt_needed;
|
||||
DeoptimizationScope deopt_scope;
|
||||
|
||||
// This is the first redefinition, mark all the nmethods for deoptimization
|
||||
if (!JvmtiExport::all_dependencies_are_recorded()) {
|
||||
CodeCache::mark_all_nmethods_for_evol_deoptimization(&deopt_scope);
|
||||
log_debug(redefine, class, nmethod)("Marked all nmethods for deopt");
|
||||
CodeCache::mark_all_nmethods_for_evol_deoptimization();
|
||||
deopt_needed = true;
|
||||
} else {
|
||||
int deopt = CodeCache::mark_dependents_for_evol_deoptimization();
|
||||
log_debug(redefine, class, nmethod)("Marked %d dependent nmethods for deopt", deopt);
|
||||
deopt_needed = (deopt != 0);
|
||||
CodeCache::mark_dependents_for_evol_deoptimization(&deopt_scope);
|
||||
log_debug(redefine, class, nmethod)("Marked dependent nmethods for deopt");
|
||||
}
|
||||
|
||||
if (deopt_needed) {
|
||||
CodeCache::flush_evol_dependents();
|
||||
}
|
||||
deopt_scope.deoptimize_marked();
|
||||
|
||||
// From now on we know that the dependency information is complete
|
||||
JvmtiExport::set_all_dependencies_are_recorded(true);
|
||||
|
|
|
@ -950,22 +950,17 @@ void MethodHandles::clean_dependency_context(oop call_site) {
|
|||
deps.clean_unloading_dependents();
|
||||
}
|
||||
|
||||
void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
|
||||
void MethodHandles::mark_dependent_nmethods(DeoptimizationScope* deopt_scope, Handle call_site, Handle target) {
|
||||
assert_lock_strong(Compile_lock);
|
||||
|
||||
int marked = 0;
|
||||
CallSiteDepChange changes(call_site, target);
|
||||
{
|
||||
NoSafepointVerifier nsv;
|
||||
MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
oop context = java_lang_invoke_CallSite::context_no_keepalive(call_site());
|
||||
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
|
||||
marked = deps.mark_dependent_nmethods(changes);
|
||||
}
|
||||
if (marked > 0) {
|
||||
// At least one nmethod has been marked for deoptimization.
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
deps.mark_dependent_nmethods(deopt_scope, changes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1218,11 +1213,15 @@ JVM_END
|
|||
JVM_ENTRY(void, MHN_setCallSiteTargetNormal(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) {
|
||||
Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh));
|
||||
Handle target (THREAD, JNIHandles::resolve_non_null(target_jh));
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Walk all nmethods depending on this call site.
|
||||
MutexLocker mu(thread, Compile_lock);
|
||||
MethodHandles::flush_dependent_nmethods(call_site, target);
|
||||
MethodHandles::mark_dependent_nmethods(&deopt_scope, call_site, target);
|
||||
java_lang_invoke_CallSite::set_target(call_site(), target());
|
||||
// This is assumed to be an 'atomic' operation by verification.
|
||||
// So keep it under lock for now.
|
||||
deopt_scope.deoptimize_marked();
|
||||
}
|
||||
}
|
||||
JVM_END
|
||||
|
@ -1230,11 +1229,15 @@ JVM_END
|
|||
JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobject call_site_jh, jobject target_jh)) {
|
||||
Handle call_site(THREAD, JNIHandles::resolve_non_null(call_site_jh));
|
||||
Handle target (THREAD, JNIHandles::resolve_non_null(target_jh));
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Walk all nmethods depending on this call site.
|
||||
MutexLocker mu(thread, Compile_lock);
|
||||
MethodHandles::flush_dependent_nmethods(call_site, target);
|
||||
MethodHandles::mark_dependent_nmethods(&deopt_scope, call_site, target);
|
||||
java_lang_invoke_CallSite::set_target_volatile(call_site(), target());
|
||||
// This is assumed to be an 'atomic' operation by verification.
|
||||
// So keep it under lock for now.
|
||||
deopt_scope.deoptimize_marked();
|
||||
}
|
||||
}
|
||||
JVM_END
|
||||
|
@ -1324,21 +1327,15 @@ JVM_END
|
|||
// deallocate their dependency information.
|
||||
JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) {
|
||||
Handle context(THREAD, JNIHandles::resolve_non_null(context_jh));
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Walk all nmethods depending on this call site.
|
||||
MutexLocker mu1(thread, Compile_lock);
|
||||
|
||||
int marked = 0;
|
||||
{
|
||||
NoSafepointVerifier nsv;
|
||||
MutexLocker mu2(THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
|
||||
marked = deps.remove_and_mark_for_deoptimization_all_dependents();
|
||||
}
|
||||
if (marked > 0) {
|
||||
// At least one nmethod has been marked for deoptimization
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
}
|
||||
NoSafepointVerifier nsv;
|
||||
MutexLocker ml(THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
|
||||
deps.remove_and_mark_for_deoptimization_all_dependents(&deopt_scope);
|
||||
// This is assumed to be an 'atomic' operation by verification.
|
||||
// So keep it under lock for now.
|
||||
deopt_scope.deoptimize_marked();
|
||||
}
|
||||
}
|
||||
JVM_END
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -79,7 +79,7 @@ class MethodHandles: AllStatic {
|
|||
static void add_dependent_nmethod(oop call_site, nmethod* nm);
|
||||
static void clean_dependency_context(oop call_site);
|
||||
|
||||
static void flush_dependent_nmethods(Handle call_site, Handle target);
|
||||
static void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, Handle call_site, Handle target);
|
||||
|
||||
// Generate MethodHandles adapters.
|
||||
static void generate_adapters();
|
||||
|
|
|
@ -778,26 +778,34 @@ WB_ENTRY(jboolean, WB_IsFrameDeoptimized(JNIEnv* env, jobject o, jint depth))
|
|||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
|
||||
CodeCache::mark_all_nmethods_for_deoptimization();
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
DeoptimizationScope deopt_scope;
|
||||
CodeCache::mark_all_nmethods_for_deoptimization(&deopt_scope);
|
||||
deopt_scope.deoptimize_marked();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
|
||||
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
|
||||
int result = 0;
|
||||
CHECK_JNI_EXCEPTION_(env, result);
|
||||
MutexLocker mu(Compile_lock);
|
||||
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
|
||||
if (is_osr) {
|
||||
result += mh->mark_osr_nmethods();
|
||||
} else if (mh->code() != nullptr) {
|
||||
mh->code()->mark_for_deoptimization();
|
||||
++result;
|
||||
}
|
||||
result += CodeCache::mark_for_deoptimization(mh());
|
||||
if (result > 0) {
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
MutexLocker mu(Compile_lock);
|
||||
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
|
||||
if (is_osr) {
|
||||
result += mh->method_holder()->mark_osr_nmethods(&deopt_scope, mh());
|
||||
} else {
|
||||
MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (mh->code() != nullptr) {
|
||||
deopt_scope.mark(mh->code());
|
||||
++result;
|
||||
}
|
||||
}
|
||||
CodeCache::mark_for_deoptimization(&deopt_scope, mh());
|
||||
}
|
||||
|
||||
deopt_scope.deoptimize_marked();
|
||||
|
||||
return result;
|
||||
WB_END
|
||||
|
||||
|
|
|
@ -98,6 +98,121 @@
|
|||
#include "jfr/metadata/jfrSerializer.hpp"
|
||||
#endif
|
||||
|
||||
uint64_t DeoptimizationScope::_committed_deopt_gen = 0;
|
||||
uint64_t DeoptimizationScope::_active_deopt_gen = 1;
|
||||
bool DeoptimizationScope::_committing_in_progress = false;
|
||||
|
||||
DeoptimizationScope::DeoptimizationScope() : _required_gen(0) {
|
||||
DEBUG_ONLY(_deopted = false;)
|
||||
|
||||
MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
|
||||
// If there is nothing to deopt _required_gen is the same as comitted.
|
||||
_required_gen = DeoptimizationScope::_committed_deopt_gen;
|
||||
}
|
||||
|
||||
DeoptimizationScope::~DeoptimizationScope() {
|
||||
assert(_deopted, "Deopt not executed");
|
||||
}
|
||||
|
||||
void DeoptimizationScope::mark(CompiledMethod* cm, bool inc_recompile_counts) {
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// If it's already marked but we still need it to be deopted.
|
||||
if (cm->is_marked_for_deoptimization()) {
|
||||
dependent(cm);
|
||||
return;
|
||||
}
|
||||
|
||||
CompiledMethod::DeoptimizationStatus status =
|
||||
inc_recompile_counts ? CompiledMethod::deoptimize : CompiledMethod::deoptimize_noupdate;
|
||||
Atomic::store(&cm->_deoptimization_status, status);
|
||||
|
||||
// Make sure active is not committed
|
||||
assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be");
|
||||
assert(cm->_deoptimization_generation == 0, "Is already marked");
|
||||
|
||||
cm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen;
|
||||
_required_gen = DeoptimizationScope::_active_deopt_gen;
|
||||
}
|
||||
|
||||
void DeoptimizationScope::dependent(CompiledMethod* cm) {
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
// A method marked by someone else may have a _required_gen lower than what we marked with.
|
||||
// Therefore only store it if it's higher than _required_gen.
|
||||
if (_required_gen < cm->_deoptimization_generation) {
|
||||
_required_gen = cm->_deoptimization_generation;
|
||||
}
|
||||
}
|
||||
|
||||
void DeoptimizationScope::deoptimize_marked() {
|
||||
assert(!_deopted, "Already deopted");
|
||||
|
||||
// We are not alive yet.
|
||||
if (!Universe::is_fully_initialized()) {
|
||||
DEBUG_ONLY(_deopted = true;)
|
||||
return;
|
||||
}
|
||||
|
||||
// Safepoints are a special case, handled here.
|
||||
if (SafepointSynchronize::is_at_safepoint()) {
|
||||
DeoptimizationScope::_committed_deopt_gen = DeoptimizationScope::_active_deopt_gen;
|
||||
DeoptimizationScope::_active_deopt_gen++;
|
||||
Deoptimization::deoptimize_all_marked();
|
||||
DEBUG_ONLY(_deopted = true;)
|
||||
return;
|
||||
}
|
||||
|
||||
uint64_t comitting = 0;
|
||||
bool wait = false;
|
||||
while (true) {
|
||||
{
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
// First we check if we or someone else already deopted the gen we want.
|
||||
if (DeoptimizationScope::_committed_deopt_gen >= _required_gen) {
|
||||
DEBUG_ONLY(_deopted = true;)
|
||||
return;
|
||||
}
|
||||
if (!_committing_in_progress) {
|
||||
// The version we are about to commit.
|
||||
comitting = DeoptimizationScope::_active_deopt_gen;
|
||||
// Make sure new marks use a higher gen.
|
||||
DeoptimizationScope::_active_deopt_gen++;
|
||||
_committing_in_progress = true;
|
||||
wait = false;
|
||||
} else {
|
||||
// Another thread is handshaking and committing a gen.
|
||||
wait = true;
|
||||
}
|
||||
}
|
||||
if (wait) {
|
||||
// Wait and let the concurrent handshake be performed.
|
||||
ThreadBlockInVM tbivm(JavaThread::current());
|
||||
os::naked_yield();
|
||||
} else {
|
||||
// Performs the handshake.
|
||||
Deoptimization::deoptimize_all_marked(); // May safepoint and an additional deopt may have occurred.
|
||||
DEBUG_ONLY(_deopted = true;)
|
||||
{
|
||||
MutexLocker ml(CompiledMethod_lock->owned_by_self() ? nullptr : CompiledMethod_lock,
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
// Make sure that committed doesn't go backwards.
|
||||
// Should only happen if we did a deopt during a safepoint above.
|
||||
if (DeoptimizationScope::_committed_deopt_gen < comitting) {
|
||||
DeoptimizationScope::_committed_deopt_gen = comitting;
|
||||
}
|
||||
_committing_in_progress = false;
|
||||
|
||||
assert(DeoptimizationScope::_committed_deopt_gen >= _required_gen, "Must be");
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame,
|
||||
int caller_adjustment,
|
||||
int caller_actual_parameters,
|
||||
|
@ -915,17 +1030,11 @@ class DeoptimizeMarkedClosure : public HandshakeClosure {
|
|||
}
|
||||
};
|
||||
|
||||
void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
|
||||
void Deoptimization::deoptimize_all_marked() {
|
||||
ResourceMark rm;
|
||||
|
||||
// Make the dependent methods not entrant
|
||||
if (nmethod_only != nullptr) {
|
||||
nmethod_only->mark_for_deoptimization();
|
||||
nmethod_only->make_not_entrant();
|
||||
CodeCache::make_nmethod_deoptimized(nmethod_only);
|
||||
} else {
|
||||
CodeCache::make_marked_nmethods_deoptimized();
|
||||
}
|
||||
CodeCache::make_marked_nmethods_deoptimized();
|
||||
|
||||
DeoptimizeMarkedClosure deopt;
|
||||
if (SafepointSynchronize::is_at_safepoint()) {
|
||||
|
|
|
@ -40,6 +40,32 @@ class compiledVFrame;
|
|||
|
||||
template<class E> class GrowableArray;
|
||||
|
||||
class DeoptimizationScope {
|
||||
private:
|
||||
// What gen we have done the deopt handshake for.
|
||||
static uint64_t _committed_deopt_gen;
|
||||
// What gen to mark a method with, hence larger than _committed_deopt_gen.
|
||||
static uint64_t _active_deopt_gen;
|
||||
// Indicate an in-progress deopt handshake.
|
||||
static bool _committing_in_progress;
|
||||
|
||||
// The required gen we need to execute/wait for
|
||||
uint64_t _required_gen;
|
||||
DEBUG_ONLY(bool _deopted;)
|
||||
|
||||
public:
|
||||
DeoptimizationScope();
|
||||
~DeoptimizationScope();
|
||||
// Mark a method, if already marked as dependent.
|
||||
void mark(CompiledMethod* cm, bool inc_recompile_counts = true);
|
||||
// Record this as a dependent method.
|
||||
void dependent(CompiledMethod* cm);
|
||||
|
||||
// Execute the deoptimization.
|
||||
// Make the nmethods not entrant, stackwalks and patch return pcs and sets post call nops.
|
||||
void deoptimize_marked();
|
||||
};
|
||||
|
||||
class Deoptimization : AllStatic {
|
||||
friend class VMStructs;
|
||||
friend class EscapeBarrier;
|
||||
|
@ -149,10 +175,9 @@ class Deoptimization : AllStatic {
|
|||
#endif
|
||||
|
||||
// Make all nmethods that are marked_for_deoptimization not_entrant and deoptimize any live
|
||||
// activations using those nmethods. If an nmethod is passed as an argument then it is
|
||||
// marked_for_deoptimization and made not_entrant. Otherwise a scan of the code cache is done to
|
||||
// activations using those nmethods. Scan of the code cache is done to
|
||||
// find all marked nmethods and they are made not_entrant.
|
||||
static void deoptimize_all_marked(nmethod* nmethod_only = nullptr);
|
||||
static void deoptimize_all_marked();
|
||||
|
||||
public:
|
||||
// Deoptimizes a frame lazily. Deopt happens on return to the frame.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue