6539517: CR 6186200 should be extended to perm gen allocation to prevent spurious OOM's from perm gen

Reviewed-by: ysr, jmasa
This commit is contained in:
Andrey Petrusenko 2008-04-01 15:13:47 +04:00
parent 6bfbc36dcd
commit 252a10cf1a
14 changed files with 165 additions and 80 deletions

View file

@ -44,52 +44,12 @@ HeapWord* CMSPermGen::mem_allocate(size_t size) {
bool lock_owned = lock->owned_by_self(); bool lock_owned = lock->owned_by_self();
if (lock_owned) { if (lock_owned) {
MutexUnlocker mul(lock); MutexUnlocker mul(lock);
return mem_allocate_work(size); return mem_allocate_in_gen(size, _gen);
} else { } else {
return mem_allocate_work(size); return mem_allocate_in_gen(size, _gen);
} }
} }
HeapWord* CMSPermGen::mem_allocate_work(size_t size) {
assert(!_gen->freelistLock()->owned_by_self(), "Potetntial deadlock");
MutexLocker ml(Heap_lock);
HeapWord* obj = NULL;
obj = _gen->allocate(size, false);
// Since we want to minimize pause times, we will prefer
// expanding the perm gen rather than doing a stop-world
// collection to satisfy the allocation request.
if (obj == NULL) {
// Try to expand the perm gen and allocate space.
obj = _gen->expand_and_allocate(size, false, false);
if (obj == NULL) {
// Let's see if a normal stop-world full collection will
// free up enough space.
SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
obj = _gen->allocate(size, false);
if (obj == NULL) {
// The collection above may have shrunk the space, so try
// to expand again and allocate space.
obj = _gen->expand_and_allocate(size, false, false);
}
if (obj == NULL) {
// We have not been able to allocate space despite a
// full stop-world collection. We now make a last-ditch collection
// attempt (in which soft refs are all aggressively freed)
// that will try to reclaim as much space as possible.
SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
obj = _gen->allocate(size, false);
if (obj == NULL) {
// Expand generation in case it was shrunk following the collection.
obj = _gen->expand_and_allocate(size, false, false);
}
}
}
}
return obj;
}
void CMSPermGen::compute_new_size() { void CMSPermGen::compute_new_size() {
_gen->compute_new_size(); _gen->compute_new_size();
} }

View file

@ -29,7 +29,6 @@ class ConcurrentMarkSweepGeneration;
class CMSPermGen: public PermGen { class CMSPermGen: public PermGen {
friend class VMStructs; friend class VMStructs;
HeapWord* mem_allocate_work(size_t size);
protected: protected:
// The "generation" view. // The "generation" view.
ConcurrentMarkSweepGeneration* _gen; ConcurrentMarkSweepGeneration* _gen;

View file

@ -590,6 +590,31 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
full_gc_count = Universe::heap()->total_full_collections(); full_gc_count = Universe::heap()->total_full_collections();
result = perm_gen()->allocate_permanent(size); result = perm_gen()->allocate_permanent(size);
if (result != NULL) {
return result;
}
if (GC_locker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
GC_locker::stall_until_clear();
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
} }
if (result == NULL) { if (result == NULL) {
@ -622,6 +647,12 @@ HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
if (op.prologue_succeeded()) { if (op.prologue_succeeded()) {
assert(Universe::heap()->is_in_permanent_or_null(op.result()), assert(Universe::heap()->is_in_permanent_or_null(op.result()),
"result not in heap"); "result not in heap");
// If GC was locked out during VM operation then retry allocation
// and/or stall as necessary.
if (op.gc_locked()) {
assert(op.result() == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary
}
// If a NULL results is being returned, an out-of-memory // If a NULL results is being returned, an out-of-memory
// will be thrown now. Clear the gc_time_limit_exceeded // will be thrown now. Clear the gc_time_limit_exceeded
// flag to avoid the following situation. // flag to avoid the following situation.

View file

@ -999,7 +999,7 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;) DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
// Increment the invocation count // Increment the invocation count
heap->increment_total_collections(); heap->increment_total_collections(true);
// We need to track unique mark sweep invocations as well. // We need to track unique mark sweep invocations as well.
_total_invocations++; _total_invocations++;
@ -1964,7 +1964,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != NULL, "Sanity"); assert(ref_processor() != NULL, "Sanity");
if (GC_locker::is_active()) { if (GC_locker::check_active_before_gc()) {
return; return;
} }

View file

@ -69,6 +69,9 @@ void VM_ParallelGCFailedPermanentAllocation::doit() {
GCCauseSetter gccs(heap, _gc_cause); GCCauseSetter gccs(heap, _gc_cause);
_result = heap->failed_permanent_mem_allocate(_size); _result = heap->failed_permanent_mem_allocate(_size);
if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
notify_gc_end(); notify_gc_end();
} }

View file

@ -144,3 +144,18 @@ void VM_GenCollectFull::doit() {
gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
notify_gc_end(); notify_gc_end();
} }
void VM_GenCollectForPermanentAllocation::doit() {
JvmtiGCForAllocationMarker jgcm;
notify_gc_begin(true);
GenCollectedHeap* gch = GenCollectedHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
gch->do_full_collection(gch->must_clear_all_soft_refs(),
gch->n_gens() - 1);
_res = gch->perm_gen()->allocate(_size, false);
assert(gch->is_in_reserved_or_null(_res), "result not in heap");
if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
set_gc_locked();
}
notify_gc_end();
}

View file

@ -43,6 +43,7 @@
// is specified; and also the attach "inspectheap" operation // is specified; and also the attach "inspectheap" operation
// //
// VM_GenCollectForAllocation // VM_GenCollectForAllocation
// VM_GenCollectForPermanentAllocation
// VM_ParallelGCFailedAllocation // VM_ParallelGCFailedAllocation
// VM_ParallelGCFailedPermanentAllocation // VM_ParallelGCFailedPermanentAllocation
// - this operation is invoked when allocation is failed; // - this operation is invoked when allocation is failed;
@ -166,3 +167,23 @@ class VM_GenCollectFull: public VM_GC_Operation {
virtual VMOp_Type type() const { return VMOp_GenCollectFull; } virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit(); virtual void doit();
}; };
class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
private:
HeapWord* _res;
size_t _size; // size of object to be allocated
public:
VM_GenCollectForPermanentAllocation(size_t size,
unsigned int gc_count_before,
unsigned int full_gc_count_before,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, full_gc_count_before, true),
_size(size) {
_res = NULL;
_gc_cause = gc_cause;
}
~VM_GenCollectForPermanentAllocation() {}
virtual VMOp_Type type() const { return VMOp_GenCollectForPermanentAllocation; }
virtual void doit();
HeapWord* result() const { return _res; }
};

View file

@ -719,6 +719,11 @@ ciObjArray.cpp ciNullObject.hpp
ciObjArray.cpp ciUtilities.hpp ciObjArray.cpp ciUtilities.hpp
ciObjArray.cpp objArrayOop.hpp ciObjArray.cpp objArrayOop.hpp
ciObjArray.cpp ciObjArray.hpp
ciObjArray.cpp ciNullObject.hpp
ciObjArray.cpp ciUtilities.hpp
ciObjArray.cpp objArrayOop.hpp
ciObjArrayKlass.cpp ciInstanceKlass.hpp ciObjArrayKlass.cpp ciInstanceKlass.hpp
ciObjArrayKlass.cpp ciObjArrayKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlass.hpp
ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp
@ -1636,6 +1641,7 @@ frame_<arch>.inline.hpp generate_platform_dependent_include
gcLocker.cpp gcLocker.inline.hpp gcLocker.cpp gcLocker.inline.hpp
gcLocker.cpp sharedHeap.hpp gcLocker.cpp sharedHeap.hpp
gcLocker.cpp resourceArea.hpp
gcLocker.hpp collectedHeap.hpp gcLocker.hpp collectedHeap.hpp
gcLocker.hpp genCollectedHeap.hpp gcLocker.hpp genCollectedHeap.hpp
@ -3061,13 +3067,14 @@ oopMap.cpp scopeDesc.hpp
oopMap.cpp signature.hpp oopMap.cpp signature.hpp
oopMap.hpp allocation.hpp oopMap.hpp allocation.hpp
oopMapCache.cpp jvmtiRedefineClassesTrace.hpp
oopMap.hpp compressedStream.hpp oopMap.hpp compressedStream.hpp
oopMap.hpp growableArray.hpp oopMap.hpp growableArray.hpp
oopMap.hpp vmreg.hpp oopMap.hpp vmreg.hpp
oopMapCache.cpp allocation.inline.hpp oopMapCache.cpp allocation.inline.hpp
oopMapCache.cpp handles.inline.hpp
oopMapCache.cpp jvmtiRedefineClassesTrace.hpp oopMapCache.cpp jvmtiRedefineClassesTrace.hpp
oopMapCache.cpp handles.inline.hpp
oopMapCache.cpp oop.inline.hpp oopMapCache.cpp oop.inline.hpp
oopMapCache.cpp oopMapCache.hpp oopMapCache.cpp oopMapCache.hpp
oopMapCache.cpp resourceArea.hpp oopMapCache.cpp resourceArea.hpp
@ -3315,6 +3322,10 @@ permGen.cpp java.hpp
permGen.cpp oop.inline.hpp permGen.cpp oop.inline.hpp
permGen.cpp permGen.hpp permGen.cpp permGen.hpp
permGen.cpp universe.hpp permGen.cpp universe.hpp
permGen.cpp gcLocker.hpp
permGen.cpp gcLocker.inline.hpp
permGen.cpp vmGCOperations.hpp
permGen.cpp vmThread.hpp
permGen.hpp gcCause.hpp permGen.hpp gcCause.hpp
permGen.hpp generation.hpp permGen.hpp generation.hpp

View file

@ -32,6 +32,12 @@ volatile bool GC_locker::_doing_gc = false;
void GC_locker::stall_until_clear() { void GC_locker::stall_until_clear() {
assert(!JavaThread::current()->in_critical(), "Would deadlock"); assert(!JavaThread::current()->in_critical(), "Would deadlock");
if (PrintJNIGCStalls && PrintGCDetails) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
gclog_or_tty->print_cr(
"Allocation failed. Thread \"%s\" is stalled by JNI critical section.",
JavaThread::current()->name());
}
MutexLocker ml(JNICritical_lock); MutexLocker ml(JNICritical_lock);
// Wait for _needs_gc to be cleared // Wait for _needs_gc to be cleared
while (GC_locker::needs_gc()) { while (GC_locker::needs_gc()) {

View file

@ -35,6 +35,7 @@ class GenCollectedHeap : public SharedHeap {
friend class CMSCollector; friend class CMSCollector;
friend class GenMarkSweep; friend class GenMarkSweep;
friend class VM_GenCollectForAllocation; friend class VM_GenCollectForAllocation;
friend class VM_GenCollectForPermanentAllocation;
friend class VM_GenCollectFull; friend class VM_GenCollectFull;
friend class VM_GenCollectFullConcurrent; friend class VM_GenCollectFullConcurrent;
friend class VM_GC_HeapInspection; friend class VM_GC_HeapInspection;

View file

@ -25,6 +25,70 @@
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_permGen.cpp.incl" #include "incls/_permGen.cpp.incl"
HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
MutexLocker ml(Heap_lock);
GCCause::Cause next_cause = GCCause::_permanent_generation_full;
GCCause::Cause prev_cause = GCCause::_no_gc;
for (;;) {
HeapWord* obj = gen->allocate(size, false);
if (obj != NULL) {
return obj;
}
if (gen->capacity() < _capacity_expansion_limit ||
prev_cause != GCCause::_no_gc) {
obj = gen->expand_and_allocate(size, false);
}
if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) {
if (GC_locker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited
GC_locker::stall_until_clear();
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return NULL;
}
}
// Read the GC count while holding the Heap_lock
unsigned int gc_count_before = SharedHeap::heap()->total_collections();
unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections();
{
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
next_cause);
VMThread::execute(&op);
if (!op.prologue_succeeded() || op.gc_locked()) {
assert(op.result() == NULL, "must be NULL if gc_locked() is true");
continue; // retry and/or stall as necessary
}
obj = op.result();
assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
"result not in heap");
if (obj != NULL) {
return obj;
}
}
prev_cause = next_cause;
next_cause = GCCause::_last_ditch_collection;
} else {
return obj;
}
}
}
CompactingPermGen::CompactingPermGen(ReservedSpace rs, CompactingPermGen::CompactingPermGen(ReservedSpace rs,
ReservedSpace shared_rs, ReservedSpace shared_rs,
size_t initial_byte_size, size_t initial_byte_size,
@ -44,40 +108,7 @@ CompactingPermGen::CompactingPermGen(ReservedSpace rs,
} }
HeapWord* CompactingPermGen::mem_allocate(size_t size) { HeapWord* CompactingPermGen::mem_allocate(size_t size) {
MutexLocker ml(Heap_lock); return mem_allocate_in_gen(size, _gen);
HeapWord* obj = _gen->allocate(size, false);
bool tried_collection = false;
bool tried_expansion = false;
while (obj == NULL) {
if (_gen->capacity() >= _capacity_expansion_limit || tried_expansion) {
// Expansion limit reached, try collection before expanding further
// For now we force a full collection, this could be changed
SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
obj = _gen->allocate(size, false);
tried_collection = true;
tried_expansion = false; // ... following the collection:
// the collection may have shrunk the space.
}
if (obj == NULL && !tried_expansion) {
obj = _gen->expand_and_allocate(size, false);
tried_expansion = true;
}
if (obj == NULL && tried_collection && tried_expansion) {
// We have not been able to allocate despite a collection and
// an attempted space expansion. We now make a last-ditch collection
// attempt that will try to reclaim as much space as possible (for
// example by aggressively clearing all soft refs).
SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
obj = _gen->allocate(size, false);
if (obj == NULL) {
// An expansion attempt is necessary since the previous
// collection may have shrunk the space.
obj = _gen->expand_and_allocate(size, false);
}
break;
}
}
return obj;
} }
void CompactingPermGen::compute_new_size() { void CompactingPermGen::compute_new_size() {

View file

@ -38,6 +38,8 @@ class PermGen : public CHeapObj {
size_t _capacity_expansion_limit; // maximum expansion allowed without a size_t _capacity_expansion_limit; // maximum expansion allowed without a
// full gc occuring // full gc occuring
HeapWord* mem_allocate_in_gen(size_t size, Generation* gen);
public: public:
enum Name { enum Name {
MarkSweepCompact, MarkSweep, ConcurrentMarkSweep MarkSweepCompact, MarkSweep, ConcurrentMarkSweep

View file

@ -1919,6 +1919,10 @@ class CommandLineFlags {
develop(bool, IgnoreLibthreadGPFault, false, \ develop(bool, IgnoreLibthreadGPFault, false, \
"Suppress workaround for libthread GP fault") \ "Suppress workaround for libthread GP fault") \
\ \
product(bool, PrintJNIGCStalls, false, \
"Print diagnostic message when GC is stalled" \
"by JNI critical section") \
\
/* JVMTI heap profiling */ \ /* JVMTI heap profiling */ \
\ \
diagnostic(bool, TraceJVMTIObjectTagging, false, \ diagnostic(bool, TraceJVMTIObjectTagging, false, \

View file

@ -49,6 +49,7 @@
template(GenCollectFull) \ template(GenCollectFull) \
template(GenCollectFullConcurrent) \ template(GenCollectFullConcurrent) \
template(GenCollectForAllocation) \ template(GenCollectForAllocation) \
template(GenCollectForPermanentAllocation) \
template(ParallelGCFailedAllocation) \ template(ParallelGCFailedAllocation) \
template(ParallelGCFailedPermanentAllocation) \ template(ParallelGCFailedPermanentAllocation) \
template(ParallelGCSystemGC) \ template(ParallelGCSystemGC) \