7024970: 2/3 assert(ServiceThread::is_service_thread(Thread::current())) failed: Service thread must post enqueue

Change nmethod_lock() to also prevent zombification of the nmethod. CompiledMethodUnload events also need to lock the nmethod. Clean ups in nmethod::make_not_entrant_or_zombie()

Reviewed-by: dholmes, kamg, never, dsamersoff, ysr, coleenp, acorn
This commit is contained in:
Daniel D. Daugherty 2011-03-15 06:37:31 -07:00
parent 0c3498d374
commit c4e301bffa
4 changed files with 52 additions and 26 deletions

View file

@ -1180,14 +1180,17 @@ void nmethod::mark_as_seen_on_stack() {
set_stack_traversal_mark(NMethodSweeper::traversal_count()); set_stack_traversal_mark(NMethodSweeper::traversal_count());
} }
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) // Tell if a non-entrant method can be converted to a zombie (i.e.,
// there are no activations on the stack, not in use by the VM,
// and not in use by the ServiceThread)
bool nmethod::can_not_entrant_be_converted() { bool nmethod::can_not_entrant_be_converted() {
assert(is_not_entrant(), "must be a non-entrant method"); assert(is_not_entrant(), "must be a non-entrant method");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal // Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the // count can be greater than the stack traversal count before it hits the
// nmethod for the second time. // nmethod for the second time.
return stack_traversal_mark()+1 < NMethodSweeper::traversal_count(); return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() &&
!is_locked_by_vm();
} }
void nmethod::inc_decompile_count() { void nmethod::inc_decompile_count() {
@ -1294,6 +1297,7 @@ void nmethod::log_state_change() const {
// Common functionality for both make_not_entrant and make_zombie // Common functionality for both make_not_entrant and make_zombie
bool nmethod::make_not_entrant_or_zombie(unsigned int state) { bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
assert(!is_zombie(), "should not already be a zombie");
// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
nmethodLocker nml(this); nmethodLocker nml(this);
@ -1301,11 +1305,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
No_Safepoint_Verifier nsv; No_Safepoint_Verifier nsv;
{ {
// If the method is already zombie there is nothing to do
if (is_zombie()) {
return false;
}
// invalidate osr nmethod before acquiring the patching lock since // invalidate osr nmethod before acquiring the patching lock since
// they both acquire leaf locks and we don't want a deadlock. // they both acquire leaf locks and we don't want a deadlock.
// This logic is equivalent to the logic below for patching the // This logic is equivalent to the logic below for patching the
@ -1375,13 +1374,12 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
flush_dependencies(NULL); flush_dependencies(NULL);
} }
{ // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event // event and it hasn't already been reported for this nmethod then
// and it hasn't already been reported for this nmethod then report it now. // report it now. The event may have been reported earilier if the GC
// (the event may have been reported earilier if the GC marked it for unloading). // marked it for unloading). JvmtiDeferredEventQueue support means
Pause_No_Safepoint_Verifier pnsv(&nsv); // we no longer go to a safepoint here.
post_compiled_method_unload(); post_compiled_method_unload();
}
#ifdef ASSERT #ifdef ASSERT
// It's no longer safe to access the oops section since zombie // It's no longer safe to access the oops section since zombie
@ -1566,7 +1564,7 @@ void nmethod::post_compiled_method_unload() {
if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded"); assert(!unload_reported(), "already unloaded");
JvmtiDeferredEvent event = JvmtiDeferredEvent event =
JvmtiDeferredEvent::compiled_method_unload_event( JvmtiDeferredEvent::compiled_method_unload_event(this,
_jmethod_id, insts_begin()); _jmethod_id, insts_begin());
if (SafepointSynchronize::is_at_safepoint()) { if (SafepointSynchronize::is_at_safepoint()) {
// Don't want to take the queueing lock. Add it as pending and // Don't want to take the queueing lock. Add it as pending and
@ -2171,10 +2169,12 @@ nmethodLocker::nmethodLocker(address pc) {
lock_nmethod(_nm); lock_nmethod(_nm);
} }
void nmethodLocker::lock_nmethod(nmethod* nm) { // Only JvmtiDeferredEvent::compiled_method_unload_event()
// should pass zombie_ok == true.
void nmethodLocker::lock_nmethod(nmethod* nm, bool zombie_ok) {
if (nm == NULL) return; if (nm == NULL) return;
Atomic::inc(&nm->_lock_count); Atomic::inc(&nm->_lock_count);
guarantee(!nm->is_zombie(), "cannot lock a zombie method"); guarantee(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method");
} }
void nmethodLocker::unlock_nmethod(nmethod* nm) { void nmethodLocker::unlock_nmethod(nmethod* nm) {

View file

@ -194,7 +194,10 @@ class nmethod : public CodeBlob {
NOT_PRODUCT(bool _has_debug_info; ) NOT_PRODUCT(bool _has_debug_info; )
// Nmethod Flushing lock (if non-zero, then the nmethod is not removed) // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
// and is not made into a zombie. However, once the nmethod is made into
// a zombie, it will be locked one final time if CompiledMethodUnload
// event processing needs to be done.
jint _lock_count; jint _lock_count;
// not_entrant method removal. Each mark_sweep pass will update // not_entrant method removal. Each mark_sweep pass will update
@ -522,8 +525,9 @@ public:
void flush(); void flush();
public: public:
// If returning true, it is unsafe to remove this nmethod even though it is a zombie // When true is returned, it is unsafe to remove this nmethod even if
// nmethod, since the VM might have a reference to it. Should only be called from a safepoint. // it is a zombie, since the VM or the ServiceThread might still be
// using it.
bool is_locked_by_vm() const { return _lock_count >0; } bool is_locked_by_vm() const { return _lock_count >0; }
// See comment at definition of _last_seen_on_stack // See comment at definition of _last_seen_on_stack
@ -689,13 +693,20 @@ public:
}; };
// Locks an nmethod so its code will not get removed, even if it is a zombie/not_entrant method // Locks an nmethod so its code will not get removed and it will not
// be made into a zombie, even if it is a not_entrant method. After the
// nmethod becomes a zombie, if CompiledMethodUnload event processing
// needs to be done, then lock_nmethod() is used directly to keep the
// generated code from being reused too early.
class nmethodLocker : public StackObj { class nmethodLocker : public StackObj {
nmethod* _nm; nmethod* _nm;
public: public:
static void lock_nmethod(nmethod* nm); // note: nm can be NULL // note: nm can be NULL
// Only JvmtiDeferredEvent::compiled_method_unload_event()
// should pass zombie_ok == true.
static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
static void unlock_nmethod(nmethod* nm); // (ditto) static void unlock_nmethod(nmethod* nm); // (ditto)
nmethodLocker(address pc); // derive nm from pc nmethodLocker(address pc); // derive nm from pc

View file

@ -919,15 +919,24 @@ JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
nmethod* nm) { nmethod* nm) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD); JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
event._event_data.compiled_method_load = nm; event._event_data.compiled_method_load = nm;
nmethodLocker::lock_nmethod(nm); // will be unlocked when posted // Keep the nmethod alive until the ServiceThread can process
// this deferred event.
nmethodLocker::lock_nmethod(nm);
return event; return event;
} }
JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event( JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
jmethodID id, const void* code) { nmethod* nm, jmethodID id, const void* code) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD); JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
event._event_data.compiled_method_unload.nm = nm;
event._event_data.compiled_method_unload.method_id = id; event._event_data.compiled_method_unload.method_id = id;
event._event_data.compiled_method_unload.code_begin = code; event._event_data.compiled_method_unload.code_begin = code;
// Keep the nmethod alive until the ServiceThread can process
// this deferred event. This will keep the memory for the
// generated code from being reused too early. We pass
// zombie_ok == true here so that our nmethod that was just
// made into a zombie can be locked.
nmethodLocker::lock_nmethod(nm, true /* zombie_ok */);
return event; return event;
} }
JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event( JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event(
@ -946,14 +955,19 @@ void JvmtiDeferredEvent::post() {
case TYPE_COMPILED_METHOD_LOAD: { case TYPE_COMPILED_METHOD_LOAD: {
nmethod* nm = _event_data.compiled_method_load; nmethod* nm = _event_data.compiled_method_load;
JvmtiExport::post_compiled_method_load(nm); JvmtiExport::post_compiled_method_load(nm);
// done with the deferred event so unlock the nmethod
nmethodLocker::unlock_nmethod(nm); nmethodLocker::unlock_nmethod(nm);
break; break;
} }
case TYPE_COMPILED_METHOD_UNLOAD: case TYPE_COMPILED_METHOD_UNLOAD: {
nmethod* nm = _event_data.compiled_method_unload.nm;
JvmtiExport::post_compiled_method_unload( JvmtiExport::post_compiled_method_unload(
_event_data.compiled_method_unload.method_id, _event_data.compiled_method_unload.method_id,
_event_data.compiled_method_unload.code_begin); _event_data.compiled_method_unload.code_begin);
// done with the deferred event so unlock the nmethod
nmethodLocker::unlock_nmethod(nm);
break; break;
}
case TYPE_DYNAMIC_CODE_GENERATED: case TYPE_DYNAMIC_CODE_GENERATED:
JvmtiExport::post_dynamic_code_generated_internal( JvmtiExport::post_dynamic_code_generated_internal(
_event_data.dynamic_code_generated.name, _event_data.dynamic_code_generated.name,

View file

@ -458,6 +458,7 @@ class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
union { union {
nmethod* compiled_method_load; nmethod* compiled_method_load;
struct { struct {
nmethod* nm;
jmethodID method_id; jmethodID method_id;
const void* code_begin; const void* code_begin;
} compiled_method_unload; } compiled_method_unload;
@ -477,7 +478,7 @@ class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
// Factory methods // Factory methods
static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm) static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
KERNEL_RETURN_(JvmtiDeferredEvent()); KERNEL_RETURN_(JvmtiDeferredEvent());
static JvmtiDeferredEvent compiled_method_unload_event( static JvmtiDeferredEvent compiled_method_unload_event(nmethod* nm,
jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent()); jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
static JvmtiDeferredEvent dynamic_code_generated_event( static JvmtiDeferredEvent dynamic_code_generated_event(
const char* name, const void* begin, const void* end) const char* name, const void* begin, const void* end)