8259063: Possible deadlock with vtable/itable creation vs concurrent class unloading

Reviewed-by: pliden, neliasso
This commit is contained in:
Erik Österlund 2021-01-13 16:48:17 +00:00
parent 6bb6093fca
commit 42d2d6dcc1
4 changed files with 34 additions and 7 deletions

View file

@ -304,12 +304,22 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
return blob;
}
void* VtableBlob::operator new(size_t s, unsigned size) throw() {
// Handling of allocation failure stops compilation and prints a bunch of
// stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
// can be locked, and then re-locking the CodeCache_lock. That is not safe in
// this context as we hold the CompiledICLocker. So we just don't handle code
// cache exhaustion here; we leave that for a later allocation that does not
// hold the CompiledICLocker.
return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
}
VtableBlob::VtableBlob(const char* name, int size) :
BufferBlob(name, size) {
}
VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
VtableBlob* blob = NULL;
unsigned int size = sizeof(VtableBlob);
@ -318,8 +328,21 @@ VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
size += align_up(buffer_size, oopSize);
assert(name != NULL, "must provide a name");
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
if (!CodeCache_lock->try_lock()) {
// If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
// IC transition to megamorphic, for which this stub will be needed. It is better to
// bail out the transition, and wait for a more opportune moment. Not only is it not
// worth waiting for the lock blockingly for the megamorphic transition, it might
// also result in a deadlock to blockingly wait, when concurrent class unloading is
// performed. At this point in time, the CompiledICLocker is taken, so we are not
// allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
// consistently taken in the opposite order. Bailing out results in an IC transition to
// the clean state instead, which will cause subsequent calls to retry the transitioning
// eventually.
return NULL;
}
blob = new (size) VtableBlob(name, size);
CodeCache_lock->unlock();
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();

View file

@ -441,6 +441,8 @@ class VtableBlob: public BufferBlob {
private:
VtableBlob(const char*, int);
void* operator new(size_t s, unsigned size) throw();
public:
// Creation
static VtableBlob* create(const char* name, int buffer_size);

View file

@ -483,7 +483,7 @@ CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
* run the constructor for the CodeBlob subclass he is busy
* instantiating.
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool handle_alloc_failure, int orig_code_blob_type) {
// Possibly wakes up the sweeper thread.
NMethodSweeper::report_allocation(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
@ -531,11 +531,13 @@ CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_t
tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
heap->name(), get_code_heap(type)->name());
}
return allocate(size, type, orig_code_blob_type);
return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
}
}
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(orig_code_blob_type);
if (handle_alloc_failure) {
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(orig_code_blob_type);
}
return NULL;
}
if (PrintCodeCacheExtension) {

View file

@ -136,7 +136,7 @@ class CodeCache : AllStatic {
static const GrowableArray<CodeHeap*>* nmethod_heaps() { return _nmethod_heaps; }
// Allocation/administration
static CodeBlob* allocate(int size, int code_blob_type, int orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
static CodeBlob* allocate(int size, int code_blob_type, bool handle_alloc_failure = true, int orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)