6423256: GC stacks should use a better data structure

6942771: SEGV in ParScanThreadState::take_from_overflow_stack

Reviewed-by: apetrusenko, ysr, pbk
This commit is contained in:
John Coomes 2010-09-28 15:56:15 -07:00
parent aff36499e7
commit 1cdd538ea5
30 changed files with 718 additions and 402 deletions

View file

@ -540,8 +540,6 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_is_alive_closure(_span, &_markBitMap), _is_alive_closure(_span, &_markBitMap),
_restart_addr(NULL), _restart_addr(NULL),
_overflow_list(NULL), _overflow_list(NULL),
_preserved_oop_stack(NULL),
_preserved_mark_stack(NULL),
_stats(cmsGen), _stats(cmsGen),
_eden_chunk_array(NULL), // may be set in ctor body _eden_chunk_array(NULL), // may be set in ctor body
_eden_chunk_capacity(0), // -- ditto -- _eden_chunk_capacity(0), // -- ditto --
@ -8907,23 +8905,10 @@ void CMSCollector::par_push_on_overflow_list(oop p) {
// failures where possible, thus, incrementally hardening the VM // failures where possible, thus, incrementally hardening the VM
// in such low resource situations. // in such low resource situations.
void CMSCollector::preserve_mark_work(oop p, markOop m) { void CMSCollector::preserve_mark_work(oop p, markOop m) {
if (_preserved_oop_stack == NULL) { _preserved_oop_stack.push(p);
assert(_preserved_mark_stack == NULL, _preserved_mark_stack.push(m);
"bijection with preserved_oop_stack");
// Allocate the stacks
_preserved_oop_stack = new (ResourceObj::C_HEAP)
GrowableArray<oop>(PreserveMarkStackSize, true);
_preserved_mark_stack = new (ResourceObj::C_HEAP)
GrowableArray<markOop>(PreserveMarkStackSize, true);
if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) {
vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */,
"Preserved Mark/Oop Stack for CMS (C-heap)");
}
}
_preserved_oop_stack->push(p);
_preserved_mark_stack->push(m);
assert(m == p->mark(), "Mark word changed"); assert(m == p->mark(), "Mark word changed");
assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(), assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
"bijection"); "bijection");
} }
@ -8965,42 +8950,30 @@ void CMSCollector::par_preserve_mark_if_necessary(oop p) {
// effect on performance so great that this will // effect on performance so great that this will
// likely just be in the noise anyway. // likely just be in the noise anyway.
void CMSCollector::restore_preserved_marks_if_any() { void CMSCollector::restore_preserved_marks_if_any() {
if (_preserved_oop_stack == NULL) {
assert(_preserved_mark_stack == NULL,
"bijection with preserved_oop_stack");
return;
}
assert(SafepointSynchronize::is_at_safepoint(), assert(SafepointSynchronize::is_at_safepoint(),
"world should be stopped"); "world should be stopped");
assert(Thread::current()->is_ConcurrentGC_thread() || assert(Thread::current()->is_ConcurrentGC_thread() ||
Thread::current()->is_VM_thread(), Thread::current()->is_VM_thread(),
"should be single-threaded"); "should be single-threaded");
assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
"bijection");
int length = _preserved_oop_stack->length(); while (!_preserved_oop_stack.is_empty()) {
assert(_preserved_mark_stack->length() == length, "bijection"); oop p = _preserved_oop_stack.pop();
for (int i = 0; i < length; i++) {
oop p = _preserved_oop_stack->at(i);
assert(p->is_oop(), "Should be an oop"); assert(p->is_oop(), "Should be an oop");
assert(_span.contains(p), "oop should be in _span"); assert(_span.contains(p), "oop should be in _span");
assert(p->mark() == markOopDesc::prototype(), assert(p->mark() == markOopDesc::prototype(),
"Set when taken from overflow list"); "Set when taken from overflow list");
markOop m = _preserved_mark_stack->at(i); markOop m = _preserved_mark_stack.pop();
p->set_mark(m); p->set_mark(m);
} }
_preserved_mark_stack->clear(); assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
_preserved_oop_stack->clear();
assert(_preserved_mark_stack->is_empty() &&
_preserved_oop_stack->is_empty(),
"stacks were cleared above"); "stacks were cleared above");
} }
#ifndef PRODUCT #ifndef PRODUCT
bool CMSCollector::no_preserved_marks() const { bool CMSCollector::no_preserved_marks() const {
return ( ( _preserved_mark_stack == NULL return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
&& _preserved_oop_stack == NULL)
|| ( _preserved_mark_stack->is_empty()
&& _preserved_oop_stack->is_empty()));
} }
#endif #endif

View file

@ -537,8 +537,8 @@ class CMSCollector: public CHeapObj {
// The following array-pair keeps track of mark words // The following array-pair keeps track of mark words
// displaced for accomodating overflow list above. // displaced for accomodating overflow list above.
// This code will likely be revisited under RFE#4922830. // This code will likely be revisited under RFE#4922830.
GrowableArray<oop>* _preserved_oop_stack; Stack<oop> _preserved_oop_stack;
GrowableArray<markOop>* _preserved_mark_stack; Stack<markOop> _preserved_mark_stack;
int* _hash_seed; int* _hash_seed;

View file

@ -1691,8 +1691,8 @@ public:
ref = new_ref; ref = new_ref;
} }
int refs_to_scan() { return refs()->size(); } int refs_to_scan() { return (int)refs()->size(); }
int overflowed_refs_to_scan() { return refs()->overflow_stack()->length(); } int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
template <class T> void update_rs(HeapRegion* from, T* p, int tid) { template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
if (G1DeferredRSUpdate) { if (G1DeferredRSUpdate) {

View file

@ -101,22 +101,6 @@ void G1MarkSweep::allocate_stacks() {
GenMarkSweep::_preserved_count_max = 0; GenMarkSweep::_preserved_count_max = 0;
GenMarkSweep::_preserved_marks = NULL; GenMarkSweep::_preserved_marks = NULL;
GenMarkSweep::_preserved_count = 0; GenMarkSweep::_preserved_count = 0;
GenMarkSweep::_preserved_mark_stack = NULL;
GenMarkSweep::_preserved_oop_stack = NULL;
GenMarkSweep::_marking_stack =
new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
GenMarkSweep::_objarray_stack =
new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2;
GenMarkSweep::_revisit_klass_stack =
new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
// for now until we have a chance to work out a more optimal setting.
GenMarkSweep::_revisit_mdo_stack =
new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
@ -145,7 +129,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
// Follow system dictionary roots and unload classes // Follow system dictionary roots and unload classes
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive); bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
assert(GenMarkSweep::_marking_stack->is_empty(), assert(GenMarkSweep::_marking_stack.is_empty(),
"stack should be empty by now"); "stack should be empty by now");
// Follow code cache roots (has to be done after system dictionary, // Follow code cache roots (has to be done after system dictionary,
@ -157,19 +141,19 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
GenMarkSweep::follow_weak_klass_links(); GenMarkSweep::follow_weak_klass_links();
assert(GenMarkSweep::_marking_stack->is_empty(), assert(GenMarkSweep::_marking_stack.is_empty(),
"stack should be empty by now"); "stack should be empty by now");
// Visit memoized MDO's and clear any unmarked weak refs // Visit memoized MDO's and clear any unmarked weak refs
GenMarkSweep::follow_mdo_weak_refs(); GenMarkSweep::follow_mdo_weak_refs();
assert(GenMarkSweep::_marking_stack->is_empty(), "just drained"); assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(&GenMarkSweep::is_alive); SymbolTable::unlink(&GenMarkSweep::is_alive);
StringTable::unlink(&GenMarkSweep::is_alive); StringTable::unlink(&GenMarkSweep::is_alive);
assert(GenMarkSweep::_marking_stack->is_empty(), assert(GenMarkSweep::_marking_stack.is_empty(),
"stack should be empty by now"); "stack should be empty by now");
} }

View file

@ -171,6 +171,7 @@ concurrentMarkSweepGeneration.hpp generation.hpp
concurrentMarkSweepGeneration.hpp generationCounters.hpp concurrentMarkSweepGeneration.hpp generationCounters.hpp
concurrentMarkSweepGeneration.hpp memoryService.hpp concurrentMarkSweepGeneration.hpp memoryService.hpp
concurrentMarkSweepGeneration.hpp mutexLocker.hpp concurrentMarkSweepGeneration.hpp mutexLocker.hpp
concurrentMarkSweepGeneration.hpp stack.inline.hpp
concurrentMarkSweepGeneration.hpp taskqueue.hpp concurrentMarkSweepGeneration.hpp taskqueue.hpp
concurrentMarkSweepGeneration.hpp virtualspace.hpp concurrentMarkSweepGeneration.hpp virtualspace.hpp
concurrentMarkSweepGeneration.hpp yieldingWorkgroup.hpp concurrentMarkSweepGeneration.hpp yieldingWorkgroup.hpp

View file

@ -187,9 +187,11 @@ psCompactionManager.cpp parMarkBitMap.hpp
psCompactionManager.cpp psParallelCompact.hpp psCompactionManager.cpp psParallelCompact.hpp
psCompactionManager.cpp psCompactionManager.hpp psCompactionManager.cpp psCompactionManager.hpp
psCompactionManager.cpp psOldGen.hpp psCompactionManager.cpp psOldGen.hpp
psCompactionManager.cpp stack.inline.hpp
psCompactionManager.cpp systemDictionary.hpp psCompactionManager.cpp systemDictionary.hpp
psCompactionManager.hpp allocation.hpp psCompactionManager.hpp allocation.hpp
psCompactionManager.hpp stack.hpp
psCompactionManager.hpp taskqueue.hpp psCompactionManager.hpp taskqueue.hpp
psCompactionManager.inline.hpp psCompactionManager.hpp psCompactionManager.inline.hpp psCompactionManager.hpp
@ -233,12 +235,14 @@ psMarkSweep.cpp referencePolicy.hpp
psMarkSweep.cpp referenceProcessor.hpp psMarkSweep.cpp referenceProcessor.hpp
psMarkSweep.cpp safepoint.hpp psMarkSweep.cpp safepoint.hpp
psMarkSweep.cpp spaceDecorator.hpp psMarkSweep.cpp spaceDecorator.hpp
psMarkSweep.cpp stack.inline.hpp
psMarkSweep.cpp symbolTable.hpp psMarkSweep.cpp symbolTable.hpp
psMarkSweep.cpp systemDictionary.hpp psMarkSweep.cpp systemDictionary.hpp
psMarkSweep.cpp vmThread.hpp psMarkSweep.cpp vmThread.hpp
psMarkSweep.hpp markSweep.inline.hpp psMarkSweep.hpp markSweep.inline.hpp
psMarkSweep.hpp collectorCounters.hpp psMarkSweep.hpp collectorCounters.hpp
psMarkSweep.hpp stack.hpp
psMarkSweepDecorator.cpp liveRange.hpp psMarkSweepDecorator.cpp liveRange.hpp
psMarkSweepDecorator.cpp markSweep.inline.hpp psMarkSweepDecorator.cpp markSweep.inline.hpp
@ -280,6 +284,7 @@ psParallelCompact.cpp psYoungGen.hpp
psParallelCompact.cpp referencePolicy.hpp psParallelCompact.cpp referencePolicy.hpp
psParallelCompact.cpp referenceProcessor.hpp psParallelCompact.cpp referenceProcessor.hpp
psParallelCompact.cpp safepoint.hpp psParallelCompact.cpp safepoint.hpp
psParallelCompact.cpp stack.inline.hpp
psParallelCompact.cpp symbolTable.hpp psParallelCompact.cpp symbolTable.hpp
psParallelCompact.cpp systemDictionary.hpp psParallelCompact.cpp systemDictionary.hpp
psParallelCompact.cpp vmThread.hpp psParallelCompact.cpp vmThread.hpp
@ -367,6 +372,7 @@ psScavenge.cpp referencePolicy.hpp
psScavenge.cpp referenceProcessor.hpp psScavenge.cpp referenceProcessor.hpp
psScavenge.cpp resourceArea.hpp psScavenge.cpp resourceArea.hpp
psScavenge.cpp spaceDecorator.hpp psScavenge.cpp spaceDecorator.hpp
psScavenge.cpp stack.inline.hpp
psScavenge.cpp threadCritical.hpp psScavenge.cpp threadCritical.hpp
psScavenge.cpp vmThread.hpp psScavenge.cpp vmThread.hpp
psScavenge.cpp vm_operations.hpp psScavenge.cpp vm_operations.hpp
@ -376,6 +382,7 @@ psScavenge.hpp cardTableExtension.hpp
psScavenge.hpp collectorCounters.hpp psScavenge.hpp collectorCounters.hpp
psScavenge.hpp oop.hpp psScavenge.hpp oop.hpp
psScavenge.hpp psVirtualspace.hpp psScavenge.hpp psVirtualspace.hpp
psScavenge.hpp stack.hpp
psScavenge.inline.hpp cardTableExtension.hpp psScavenge.inline.hpp cardTableExtension.hpp
psScavenge.inline.hpp parallelScavengeHeap.hpp psScavenge.inline.hpp parallelScavengeHeap.hpp

View file

@ -93,11 +93,13 @@ markSweep.cpp oop.inline.hpp
markSweep.hpp growableArray.hpp markSweep.hpp growableArray.hpp
markSweep.hpp markOop.hpp markSweep.hpp markOop.hpp
markSweep.hpp oop.hpp markSweep.hpp oop.hpp
markSweep.hpp stack.hpp
markSweep.hpp timer.hpp markSweep.hpp timer.hpp
markSweep.hpp universe.hpp markSweep.hpp universe.hpp
markSweep.inline.hpp collectedHeap.hpp markSweep.inline.hpp collectedHeap.hpp
markSweep.inline.hpp markSweep.hpp markSweep.inline.hpp markSweep.hpp
markSweep.inline.hpp stack.inline.hpp
mutableSpace.hpp immutableSpace.hpp mutableSpace.hpp immutableSpace.hpp
mutableSpace.hpp memRegion.hpp mutableSpace.hpp memRegion.hpp

View file

@ -34,12 +34,12 @@ ParScanThreadState::ParScanThreadState(Space* to_space_,
Generation* old_gen_, Generation* old_gen_,
int thread_num_, int thread_num_,
ObjToScanQueueSet* work_queue_set_, ObjToScanQueueSet* work_queue_set_,
GrowableArray<oop>** overflow_stack_set_, Stack<oop>* overflow_stacks_,
size_t desired_plab_sz_, size_t desired_plab_sz_,
ParallelTaskTerminator& term_) : ParallelTaskTerminator& term_) :
_to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
_work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
_overflow_stack(overflow_stack_set_[thread_num_]), _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
_ageTable(false), // false ==> not the global age table, no perf data. _ageTable(false), // false ==> not the global age table, no perf data.
_to_space_alloc_buffer(desired_plab_sz_), _to_space_alloc_buffer(desired_plab_sz_),
_to_space_closure(gen_, this), _old_gen_closure(gen_, this), _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
@ -159,11 +159,12 @@ bool ParScanThreadState::take_from_overflow_stack() {
assert(ParGCUseLocalOverflow, "Else should not call"); assert(ParGCUseLocalOverflow, "Else should not call");
assert(young_gen()->overflow_list() == NULL, "Error"); assert(young_gen()->overflow_list() == NULL, "Error");
ObjToScanQueue* queue = work_queue(); ObjToScanQueue* queue = work_queue();
GrowableArray<oop>* of_stack = overflow_stack(); Stack<oop>* const of_stack = overflow_stack();
uint num_overflow_elems = of_stack->length(); const size_t num_overflow_elems = of_stack->size();
uint num_take_elems = MIN2(MIN2((queue->max_elems() - queue->size())/4, const size_t space_available = queue->max_elems() - queue->size();
(juint)ParGCDesiredObjsFromOverflowList), const size_t num_take_elems = MIN3(space_available / 4,
num_overflow_elems); ParGCDesiredObjsFromOverflowList,
num_overflow_elems);
// Transfer the most recent num_take_elems from the overflow // Transfer the most recent num_take_elems from the overflow
// stack to our work queue. // stack to our work queue.
for (size_t i = 0; i != num_take_elems; i++) { for (size_t i = 0; i != num_take_elems; i++) {
@ -271,7 +272,7 @@ public:
ParNewGeneration& gen, ParNewGeneration& gen,
Generation& old_gen, Generation& old_gen,
ObjToScanQueueSet& queue_set, ObjToScanQueueSet& queue_set,
GrowableArray<oop>** overflow_stacks_, Stack<oop>* overflow_stacks_,
size_t desired_plab_sz, size_t desired_plab_sz,
ParallelTaskTerminator& term); ParallelTaskTerminator& term);
@ -302,17 +303,19 @@ private:
ParScanThreadStateSet::ParScanThreadStateSet( ParScanThreadStateSet::ParScanThreadStateSet(
int num_threads, Space& to_space, ParNewGeneration& gen, int num_threads, Space& to_space, ParNewGeneration& gen,
Generation& old_gen, ObjToScanQueueSet& queue_set, Generation& old_gen, ObjToScanQueueSet& queue_set,
GrowableArray<oop>** overflow_stack_set_, Stack<oop>* overflow_stacks,
size_t desired_plab_sz, ParallelTaskTerminator& term) size_t desired_plab_sz, ParallelTaskTerminator& term)
: ResourceArray(sizeof(ParScanThreadState), num_threads), : ResourceArray(sizeof(ParScanThreadState), num_threads),
_gen(gen), _next_gen(old_gen), _term(term) _gen(gen), _next_gen(old_gen), _term(term)
{ {
assert(num_threads > 0, "sanity check!"); assert(num_threads > 0, "sanity check!");
assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
"overflow_stack allocation mismatch");
// Initialize states. // Initialize states.
for (int i = 0; i < num_threads; ++i) { for (int i = 0; i < num_threads; ++i) {
new ((ParScanThreadState*)_data + i) new ((ParScanThreadState*)_data + i)
ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
overflow_stack_set_, desired_plab_sz, term); overflow_stacks, desired_plab_sz, term);
} }
} }
@ -596,14 +599,11 @@ ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
for (uint i2 = 0; i2 < ParallelGCThreads; i2++) for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
_task_queues->queue(i2)->initialize(); _task_queues->queue(i2)->initialize();
_overflow_stacks = NEW_C_HEAP_ARRAY(GrowableArray<oop>*, ParallelGCThreads); _overflow_stacks = NULL;
guarantee(_overflow_stacks != NULL, "Overflow stack set allocation failure"); if (ParGCUseLocalOverflow) {
for (uint i = 0; i < ParallelGCThreads; i++) { _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads);
if (ParGCUseLocalOverflow) { for (size_t i = 0; i < ParallelGCThreads; ++i) {
_overflow_stacks[i] = new (ResourceObj::C_HEAP) GrowableArray<oop>(512, true); new (_overflow_stacks + i) Stack<oop>();
guarantee(_overflow_stacks[i] != NULL, "Overflow Stack allocation failure.");
} else {
_overflow_stacks[i] = NULL;
} }
} }
@ -937,12 +937,9 @@ void ParNewGeneration::collect(bool full,
} else { } else {
assert(HandlePromotionFailure, assert(HandlePromotionFailure,
"Should only be here if promotion failure handling is on"); "Should only be here if promotion failure handling is on");
if (_promo_failure_scan_stack != NULL) { assert(_promo_failure_scan_stack.is_empty(), "post condition");
// Can be non-null because of reference processing. _promo_failure_scan_stack.clear(true); // Clear cached segments.
// Free stack with its elements.
delete _promo_failure_scan_stack;
_promo_failure_scan_stack = NULL;
}
remove_forwarding_pointers(); remove_forwarding_pointers();
if (PrintGCDetails) { if (PrintGCDetails) {
gclog_or_tty->print(" (promotion failed)"); gclog_or_tty->print(" (promotion failed)");
@ -1397,8 +1394,8 @@ bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan
size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
(size_t)ParGCDesiredObjsFromOverflowList); (size_t)ParGCDesiredObjsFromOverflowList);
assert(par_scan_state->overflow_stack() == NULL, "Error");
assert(!UseCompressedOops, "Error"); assert(!UseCompressedOops, "Error");
assert(par_scan_state->overflow_stack() == NULL, "Error");
if (_overflow_list == NULL) return false; if (_overflow_list == NULL) return false;
// Otherwise, there was something there; try claiming the list. // Otherwise, there was something there; try claiming the list.

View file

@ -52,7 +52,7 @@ class ParScanThreadState {
friend class ParScanThreadStateSet; friend class ParScanThreadStateSet;
private: private:
ObjToScanQueue *_work_queue; ObjToScanQueue *_work_queue;
GrowableArray<oop>* _overflow_stack; Stack<oop>* const _overflow_stack;
ParGCAllocBuffer _to_space_alloc_buffer; ParGCAllocBuffer _to_space_alloc_buffer;
@ -120,7 +120,7 @@ class ParScanThreadState {
ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
Generation* old_gen_, int thread_num_, Generation* old_gen_, int thread_num_,
ObjToScanQueueSet* work_queue_set_, ObjToScanQueueSet* work_queue_set_,
GrowableArray<oop>** overflow_stack_set_, Stack<oop>* overflow_stacks_,
size_t desired_plab_sz_, size_t desired_plab_sz_,
ParallelTaskTerminator& term_); ParallelTaskTerminator& term_);
@ -144,7 +144,7 @@ class ParScanThreadState {
void trim_queues(int max_size); void trim_queues(int max_size);
// Private overflow stack usage // Private overflow stack usage
GrowableArray<oop>* overflow_stack() { return _overflow_stack; } Stack<oop>* overflow_stack() { return _overflow_stack; }
bool take_from_overflow_stack(); bool take_from_overflow_stack();
void push_on_overflow_stack(oop p); void push_on_overflow_stack(oop p);
@ -301,7 +301,7 @@ class ParNewGeneration: public DefNewGeneration {
ObjToScanQueueSet* _task_queues; ObjToScanQueueSet* _task_queues;
// Per-worker-thread local overflow stacks // Per-worker-thread local overflow stacks
GrowableArray<oop>** _overflow_stacks; Stack<oop>* _overflow_stacks;
// Desired size of survivor space plab's // Desired size of survivor space plab's
PLABStats _plab_stats; PLABStats _plab_stats;

View file

@ -59,8 +59,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
assert(cm->stacks_have_been_allocated(),
"Stack space has not been allocated");
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
switch (_root_type) { switch (_root_type) {
@ -119,7 +117,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
// Do the real work // Do the real work
cm->follow_marking_stacks(); cm->follow_marking_stacks();
// cm->deallocate_stacks();
} }
@ -135,8 +132,6 @@ void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty)); PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
ParCompactionManager* cm = ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which); ParCompactionManager::gc_thread_compaction_manager(which);
assert(cm->stacks_have_been_allocated(),
"Stack space has not been allocated");
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
PSParallelCompact::FollowStackClosure follow_stack_closure(cm); PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
_rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(), _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),

View file

@ -46,23 +46,6 @@ ParCompactionManager::ParCompactionManager() :
marking_stack()->initialize(); marking_stack()->initialize();
_objarray_stack.initialize(); _objarray_stack.initialize();
region_stack()->initialize(); region_stack()->initialize();
// Note that _revisit_klass_stack is allocated out of the
// C heap (as opposed to out of ResourceArena).
int size =
(SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
// have to do for now until we are able to investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
}
ParCompactionManager::~ParCompactionManager() {
delete _revisit_klass_stack;
delete _revisit_mdo_stack;
// _manager_array and _stack_array are statics
// shared with all instances of ParCompactionManager
// should not be deallocated.
} }
void ParCompactionManager::initialize(ParMarkBitMap* mbm) { void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
@ -134,9 +117,9 @@ ParCompactionManager::gc_thread_compaction_manager(int index) {
} }
void ParCompactionManager::reset() { void ParCompactionManager::reset() {
for(uint i=0; i<ParallelGCThreads+1; i++) { for(uint i = 0; i < ParallelGCThreads + 1; i++) {
manager_array(i)->revisit_klass_stack()->clear(); assert(manager_array(i)->revisit_klass_stack()->is_empty(), "sanity");
manager_array(i)->revisit_mdo_stack()->clear(); assert(manager_array(i)->revisit_mdo_stack()->is_empty(), "sanity");
} }
} }
@ -178,10 +161,3 @@ void ParCompactionManager::drain_region_stacks() {
} }
} while (!region_stack()->is_empty()); } while (!region_stack()->is_empty());
} }
#ifdef ASSERT
bool ParCompactionManager::stacks_have_been_allocated() {
return (revisit_klass_stack()->data_addr() != NULL &&
revisit_mdo_stack()->data_addr() != NULL);
}
#endif

View file

@ -80,10 +80,9 @@ private:
// type of TaskQueue. // type of TaskQueue.
RegionTaskQueue _region_stack; RegionTaskQueue _region_stack;
#if 1 // does this happen enough to need a per thread stack? Stack<Klass*> _revisit_klass_stack;
GrowableArray<Klass*>* _revisit_klass_stack; Stack<DataLayout*> _revisit_mdo_stack;
GrowableArray<DataLayout*>* _revisit_mdo_stack;
#endif
static ParMarkBitMap* _mark_bitmap; static ParMarkBitMap* _mark_bitmap;
Action _action; Action _action;
@ -113,10 +112,7 @@ private:
inline static ParCompactionManager* manager_array(int index); inline static ParCompactionManager* manager_array(int index);
ParCompactionManager(); ParCompactionManager();
~ParCompactionManager();
void allocate_stacks();
void deallocate_stacks();
ParMarkBitMap* mark_bitmap() { return _mark_bitmap; } ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
// Take actions in preparation for a compaction. // Take actions in preparation for a compaction.
@ -129,11 +125,8 @@ private:
bool should_verify_only(); bool should_verify_only();
bool should_reset_only(); bool should_reset_only();
#if 1 Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; }
// Probably stays as a growable array Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; }
GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
#endif
// Save for later processing. Must not fail. // Save for later processing. Must not fail.
inline void push(oop obj) { _marking_stack.push(obj); } inline void push(oop obj) { _marking_stack.push(obj); }
@ -162,10 +155,6 @@ private:
// Process tasks remaining on any stack // Process tasks remaining on any stack
void drain_region_stacks(); void drain_region_stacks();
// Debugging support
#ifdef ASSERT
bool stacks_have_been_allocated();
#endif
}; };
inline ParCompactionManager* ParCompactionManager::manager_array(int index) { inline ParCompactionManager* ParCompactionManager::manager_array(int index) {

View file

@ -466,33 +466,16 @@ void PSMarkSweep::allocate_stacks() {
_preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
// Now divide by the size of a PreservedMark // Now divide by the size of a PreservedMark
_preserved_count_max /= sizeof(PreservedMark); _preserved_count_max /= sizeof(PreservedMark);
_preserved_mark_stack = NULL;
_preserved_oop_stack = NULL;
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
// now until we investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
} }
void PSMarkSweep::deallocate_stacks() { void PSMarkSweep::deallocate_stacks() {
if (_preserved_oop_stack) { _preserved_mark_stack.clear(true);
delete _preserved_mark_stack; _preserved_oop_stack.clear(true);
_preserved_mark_stack = NULL; _marking_stack.clear();
delete _preserved_oop_stack; _objarray_stack.clear(true);
_preserved_oop_stack = NULL; _revisit_klass_stack.clear(true);
} _revisit_mdo_stack.clear(true);
delete _marking_stack;
delete _objarray_stack;
delete _revisit_klass_stack;
delete _revisit_mdo_stack;
} }
void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
@ -542,17 +525,17 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
follow_weak_klass_links(); follow_weak_klass_links();
assert(_marking_stack->is_empty(), "just drained"); assert(_marking_stack.is_empty(), "just drained");
// Visit memoized mdo's and clear unmarked weak refs // Visit memoized mdo's and clear unmarked weak refs
follow_mdo_weak_refs(); follow_mdo_weak_refs();
assert(_marking_stack->is_empty(), "just drained"); assert(_marking_stack.is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(is_alive_closure()); SymbolTable::unlink(is_alive_closure());
StringTable::unlink(is_alive_closure()); StringTable::unlink(is_alive_closure());
assert(_marking_stack->is_empty(), "stack should be empty by now"); assert(_marking_stack.is_empty(), "stack should be empty by now");
} }

View file

@ -2170,6 +2170,16 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
heap->update_counters(); heap->update_counters();
} }
#ifdef ASSERT
for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
ParCompactionManager* const cm =
ParCompactionManager::manager_array(int(i));
assert(cm->marking_stack()->is_empty(), "should be empty");
assert(cm->region_stack()->is_empty(), "should be empty");
assert(cm->revisit_klass_stack()->is_empty(), "should be empty");
}
#endif // ASSERT
if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
HandleMark hm; // Discard invalid handles created during verification HandleMark hm; // Discard invalid handles created during verification
gclog_or_tty->print(" VerifyAfterGC:"); gclog_or_tty->print(" VerifyAfterGC:");
@ -2711,21 +2721,22 @@ PSParallelCompact::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links. // Update and follow all subklass, sibling and implementor links.
if (PrintRevisitStats) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes()); gclog_or_tty->print_cr("#classes in system dictionary = %d",
SystemDictionary::number_of_classes());
} }
for (uint i = 0; i < ParallelGCThreads + 1; i++) { for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i); ParCompactionManager* cm = ParCompactionManager::manager_array(i);
KeepAliveClosure keep_alive_closure(cm); KeepAliveClosure keep_alive_closure(cm);
int length = cm->revisit_klass_stack()->length(); Stack<Klass*>* const rks = cm->revisit_klass_stack();
if (PrintRevisitStats) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit klass stack[%d] length = %d", i, length); gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT,
i, rks->size());
} }
for (int j = 0; j < length; j++) { while (!rks->is_empty()) {
cm->revisit_klass_stack()->at(j)->follow_weak_klass_links( Klass* const k = rks->pop();
is_alive_closure(), k->follow_weak_klass_links(is_alive_closure(), &keep_alive_closure);
&keep_alive_closure);
} }
// revisit_klass_stack is cleared in reset()
cm->follow_marking_stacks(); cm->follow_marking_stacks();
} }
} }
@ -2744,19 +2755,20 @@ void PSParallelCompact::follow_mdo_weak_refs() {
// we can visit and clear any weak references from MDO's which // we can visit and clear any weak references from MDO's which
// we memoized during the strong marking phase. // we memoized during the strong marking phase.
if (PrintRevisitStats) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes()); gclog_or_tty->print_cr("#classes in system dictionary = %d",
SystemDictionary::number_of_classes());
} }
for (uint i = 0; i < ParallelGCThreads + 1; i++) { for (uint i = 0; i < ParallelGCThreads + 1; i++) {
ParCompactionManager* cm = ParCompactionManager::manager_array(i); ParCompactionManager* cm = ParCompactionManager::manager_array(i);
GrowableArray<DataLayout*>* rms = cm->revisit_mdo_stack(); Stack<DataLayout*>* rms = cm->revisit_mdo_stack();
int length = rms->length();
if (PrintRevisitStats) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("Revisit MDO stack[%d] length = %d", i, length); gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT,
i, rms->size());
} }
for (int j = 0; j < length; j++) { while (!rms->is_empty()) {
rms->at(j)->follow_weak_refs(is_alive_closure()); rms->pop()->follow_weak_refs(is_alive_closure());
} }
// revisit_mdo_stack is cleared in reset()
cm->follow_marking_stacks(); cm->follow_marking_stacks();
} }
} }

View file

@ -185,7 +185,6 @@ void PSPromotionManager::reset() {
void PSPromotionManager::drain_stacks_depth(bool totally_drain) { void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant");
totally_drain = totally_drain || _totally_drain; totally_drain = totally_drain || _totally_drain;
#ifdef ASSERT #ifdef ASSERT

View file

@ -34,9 +34,10 @@ bool PSScavenge::_survivor_overflow = false;
int PSScavenge::_tenuring_threshold = 0; int PSScavenge::_tenuring_threshold = 0;
HeapWord* PSScavenge::_young_generation_boundary = NULL; HeapWord* PSScavenge::_young_generation_boundary = NULL;
elapsedTimer PSScavenge::_accumulated_time; elapsedTimer PSScavenge::_accumulated_time;
GrowableArray<markOop>* PSScavenge::_preserved_mark_stack = NULL; Stack<markOop> PSScavenge::_preserved_mark_stack;
GrowableArray<oop>* PSScavenge::_preserved_oop_stack = NULL; Stack<oop> PSScavenge::_preserved_oop_stack;
CollectorCounters* PSScavenge::_counters = NULL; CollectorCounters* PSScavenge::_counters = NULL;
bool PSScavenge::_promotion_failed = false;
// Define before use // Define before use
class PSIsAliveClosure: public BoolObjectClosure { class PSIsAliveClosure: public BoolObjectClosure {
@ -223,6 +224,9 @@ bool PSScavenge::invoke_no_policy() {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_preserved_mark_stack.is_empty(), "should be empty");
assert(_preserved_oop_stack.is_empty(), "should be empty");
TimeStamp scavenge_entry; TimeStamp scavenge_entry;
TimeStamp scavenge_midpoint; TimeStamp scavenge_midpoint;
TimeStamp scavenge_exit; TimeStamp scavenge_exit;
@ -636,24 +640,20 @@ void PSScavenge::clean_up_failed_promotion() {
young_gen->object_iterate(&unforward_closure); young_gen->object_iterate(&unforward_closure);
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr("Restoring %d marks", gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
_preserved_oop_stack->length());
} }
// Restore any saved marks. // Restore any saved marks.
for (int i=0; i < _preserved_oop_stack->length(); i++) { while (!_preserved_oop_stack.is_empty()) {
oop obj = _preserved_oop_stack->at(i); oop obj = _preserved_oop_stack.pop();
markOop mark = _preserved_mark_stack->at(i); markOop mark = _preserved_mark_stack.pop();
obj->set_mark(mark); obj->set_mark(mark);
} }
// Deallocate the preserved mark and oop stacks. // Clear the preserved mark and oop stack caches.
// The stacks were allocated as CHeap objects, so _preserved_mark_stack.clear(true);
// we must call delete to prevent mem leaks. _preserved_oop_stack.clear(true);
delete _preserved_mark_stack; _promotion_failed = false;
_preserved_mark_stack = NULL;
delete _preserved_oop_stack;
_preserved_oop_stack = NULL;
} }
// Reset the PromotionFailureALot counters. // Reset the PromotionFailureALot counters.
@ -661,27 +661,16 @@ void PSScavenge::clean_up_failed_promotion() {
} }
// This method is called whenever an attempt to promote an object // This method is called whenever an attempt to promote an object
// fails. Some markOops will need preserving, some will not. Note // fails. Some markOops will need preservation, some will not. Note
// that the entire eden is traversed after a failed promotion, with // that the entire eden is traversed after a failed promotion, with
// all forwarded headers replaced by the default markOop. This means // all forwarded headers replaced by the default markOop. This means
// it is not neccessary to preserve most markOops. // it is not neccessary to preserve most markOops.
void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
if (_preserved_mark_stack == NULL) { _promotion_failed = true;
ThreadCritical tc; // Lock and retest
if (_preserved_mark_stack == NULL) {
assert(_preserved_oop_stack == NULL, "Sanity");
_preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
_preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
}
}
// Because we must hold the ThreadCritical lock before using
// the stacks, we should be safe from observing partial allocations,
// which are also guarded by the ThreadCritical lock.
if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
ThreadCritical tc; ThreadCritical tc;
_preserved_oop_stack->push(obj); _preserved_oop_stack.push(obj);
_preserved_mark_stack->push(obj_mark); _preserved_mark_stack.push(obj_mark);
} }
} }

View file

@ -61,9 +61,10 @@ class PSScavenge: AllStatic {
static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen. static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen.
// This is used to decide if an oop should be scavenged, // This is used to decide if an oop should be scavenged,
// cards should be marked, etc. // cards should be marked, etc.
static GrowableArray<markOop>* _preserved_mark_stack; // List of marks to be restored after failed promotion static Stack<markOop> _preserved_mark_stack; // List of marks to be restored after failed promotion
static GrowableArray<oop>* _preserved_oop_stack; // List of oops that need their mark restored. static Stack<oop> _preserved_oop_stack; // List of oops that need their mark restored.
static CollectorCounters* _counters; // collector performance counters static CollectorCounters* _counters; // collector performance counters
static bool _promotion_failed;
static void clean_up_failed_promotion(); static void clean_up_failed_promotion();
@ -79,8 +80,7 @@ class PSScavenge: AllStatic {
// Accessors // Accessors
static int tenuring_threshold() { return _tenuring_threshold; } static int tenuring_threshold() { return _tenuring_threshold; }
static elapsedTimer* accumulated_time() { return &_accumulated_time; } static elapsedTimer* accumulated_time() { return &_accumulated_time; }
static bool promotion_failed() static bool promotion_failed() { return _promotion_failed; }
{ return _preserved_mark_stack != NULL; }
static int consecutive_skipped_scavenges() static int consecutive_skipped_scavenges()
{ return _consecutive_skipped_scavenges; } { return _consecutive_skipped_scavenges; }

View file

@ -25,13 +25,13 @@
#include "incls/_precompiled.incl" #include "incls/_precompiled.incl"
#include "incls/_markSweep.cpp.incl" #include "incls/_markSweep.cpp.incl"
GrowableArray<oop>* MarkSweep::_marking_stack = NULL; Stack<oop> MarkSweep::_marking_stack;
GrowableArray<ObjArrayTask>* MarkSweep::_objarray_stack = NULL; Stack<DataLayout*> MarkSweep::_revisit_mdo_stack;
GrowableArray<Klass*>* MarkSweep::_revisit_klass_stack = NULL; Stack<Klass*> MarkSweep::_revisit_klass_stack;
GrowableArray<DataLayout*>* MarkSweep::_revisit_mdo_stack = NULL; Stack<ObjArrayTask> MarkSweep::_objarray_stack;
GrowableArray<oop>* MarkSweep::_preserved_oop_stack = NULL; Stack<oop> MarkSweep::_preserved_oop_stack;
GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL; Stack<markOop> MarkSweep::_preserved_mark_stack;
size_t MarkSweep::_preserved_count = 0; size_t MarkSweep::_preserved_count = 0;
size_t MarkSweep::_preserved_count_max = 0; size_t MarkSweep::_preserved_count_max = 0;
PreservedMark* MarkSweep::_preserved_marks = NULL; PreservedMark* MarkSweep::_preserved_marks = NULL;
@ -58,37 +58,42 @@ GrowableArray<size_t> * MarkSweep::_last_gc_live_oops_size = NULL;
#endif #endif
void MarkSweep::revisit_weak_klass_link(Klass* k) { void MarkSweep::revisit_weak_klass_link(Klass* k) {
_revisit_klass_stack->push(k); _revisit_klass_stack.push(k);
} }
void MarkSweep::follow_weak_klass_links() { void MarkSweep::follow_weak_klass_links() {
// All klasses on the revisit stack are marked at this point. // All klasses on the revisit stack are marked at this point.
// Update and follow all subklass, sibling and implementor links. // Update and follow all subklass, sibling and implementor links.
if (PrintRevisitStats) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes()); gclog_or_tty->print_cr("#classes in system dictionary = %d",
gclog_or_tty->print_cr("Revisit klass stack length = %d", _revisit_klass_stack->length()); SystemDictionary::number_of_classes());
gclog_or_tty->print_cr("Revisit klass stack size = " SIZE_FORMAT,
_revisit_klass_stack.size());
} }
for (int i = 0; i < _revisit_klass_stack->length(); i++) { while (!_revisit_klass_stack.is_empty()) {
_revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive); Klass* const k = _revisit_klass_stack.pop();
k->follow_weak_klass_links(&is_alive, &keep_alive);
} }
follow_stack(); follow_stack();
} }
void MarkSweep::revisit_mdo(DataLayout* p) { void MarkSweep::revisit_mdo(DataLayout* p) {
_revisit_mdo_stack->push(p); _revisit_mdo_stack.push(p);
} }
void MarkSweep::follow_mdo_weak_refs() { void MarkSweep::follow_mdo_weak_refs() {
// All strongly reachable oops have been marked at this point; // All strongly reachable oops have been marked at this point;
// we can visit and clear any weak references from MDO's which // we can visit and clear any weak references from MDO's which
// we memoized during the strong marking phase. // we memoized during the strong marking phase.
assert(_marking_stack->is_empty(), "Marking stack should be empty"); assert(_marking_stack.is_empty(), "Marking stack should be empty");
if (PrintRevisitStats) { if (PrintRevisitStats) {
gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes()); gclog_or_tty->print_cr("#classes in system dictionary = %d",
gclog_or_tty->print_cr("Revisit MDO stack length = %d", _revisit_mdo_stack->length()); SystemDictionary::number_of_classes());
gclog_or_tty->print_cr("Revisit MDO stack size = " SIZE_FORMAT,
_revisit_mdo_stack.size());
} }
for (int i = 0; i < _revisit_mdo_stack->length(); i++) { while (!_revisit_mdo_stack.is_empty()) {
_revisit_mdo_stack->at(i)->follow_weak_refs(&is_alive); _revisit_mdo_stack.pop()->follow_weak_refs(&is_alive);
} }
follow_stack(); follow_stack();
} }
@ -106,41 +111,37 @@ void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); }
void MarkSweep::follow_stack() { void MarkSweep::follow_stack() {
do { do {
while (!_marking_stack->is_empty()) { while (!_marking_stack.is_empty()) {
oop obj = _marking_stack->pop(); oop obj = _marking_stack.pop();
assert (obj->is_gc_marked(), "p must be marked"); assert (obj->is_gc_marked(), "p must be marked");
obj->follow_contents(); obj->follow_contents();
} }
// Process ObjArrays one at a time to avoid marking stack bloat. // Process ObjArrays one at a time to avoid marking stack bloat.
if (!_objarray_stack->is_empty()) { if (!_objarray_stack.is_empty()) {
ObjArrayTask task = _objarray_stack->pop(); ObjArrayTask task = _objarray_stack.pop();
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(task.obj(), task.index()); k->oop_follow_contents(task.obj(), task.index());
} }
} while (!_marking_stack->is_empty() || !_objarray_stack->is_empty()); } while (!_marking_stack.is_empty() || !_objarray_stack.is_empty());
} }
MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure; MarkSweep::FollowStackClosure MarkSweep::follow_stack_closure;
void MarkSweep::FollowStackClosure::do_void() { follow_stack(); } void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
// We preserve the mark which should be replaced at the end and the location that it // We preserve the mark which should be replaced at the end and the location
// will go. Note that the object that this markOop belongs to isn't currently at that // that it will go. Note that the object that this markOop belongs to isn't
// address but it will be after phase4 // currently at that address but it will be after phase4
void MarkSweep::preserve_mark(oop obj, markOop mark) { void MarkSweep::preserve_mark(oop obj, markOop mark) {
// we try to store preserved marks in the to space of the new generation since this // We try to store preserved marks in the to space of the new generation since
// is storage which should be available. Most of the time this should be sufficient // this is storage which should be available. Most of the time this should be
// space for the marks we need to preserve but if it isn't we fall back in using // sufficient space for the marks we need to preserve but if it isn't we fall
// GrowableArrays to keep track of the overflow. // back to using Stacks to keep track of the overflow.
if (_preserved_count < _preserved_count_max) { if (_preserved_count < _preserved_count_max) {
_preserved_marks[_preserved_count++].init(obj, mark); _preserved_marks[_preserved_count++].init(obj, mark);
} else { } else {
if (_preserved_mark_stack == NULL) { _preserved_mark_stack.push(mark);
_preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); _preserved_oop_stack.push(obj);
_preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
}
_preserved_mark_stack->push(mark);
_preserved_oop_stack->push(obj);
} }
} }
@ -151,8 +152,7 @@ void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _
void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
void MarkSweep::adjust_marks() { void MarkSweep::adjust_marks() {
assert(_preserved_oop_stack == NULL || assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
_preserved_oop_stack->length() == _preserved_mark_stack->length(),
"inconsistent preserved oop stacks"); "inconsistent preserved oop stacks");
// adjust the oops we saved earlier // adjust the oops we saved earlier
@ -161,21 +161,19 @@ void MarkSweep::adjust_marks() {
} }
// deal with the overflow stack // deal with the overflow stack
if (_preserved_oop_stack) { StackIterator<oop> iter(_preserved_oop_stack);
for (int i = 0; i < _preserved_oop_stack->length(); i++) { while (!iter.is_empty()) {
oop* p = _preserved_oop_stack->adr_at(i); oop* p = iter.next_addr();
adjust_pointer(p); adjust_pointer(p);
}
} }
} }
void MarkSweep::restore_marks() { void MarkSweep::restore_marks() {
assert(_preserved_oop_stack == NULL || assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
_preserved_oop_stack->length() == _preserved_mark_stack->length(),
"inconsistent preserved oop stacks"); "inconsistent preserved oop stacks");
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr("Restoring %d marks", _preserved_count + gclog_or_tty->print_cr("Restoring %d marks",
(_preserved_oop_stack ? _preserved_oop_stack->length() : 0)); _preserved_count + _preserved_oop_stack.size());
} }
// restore the marks we saved earlier // restore the marks we saved earlier
@ -184,12 +182,10 @@ void MarkSweep::restore_marks() {
} }
// deal with the overflow // deal with the overflow
if (_preserved_oop_stack) { while (!_preserved_oop_stack.is_empty()) {
for (int i = 0; i < _preserved_oop_stack->length(); i++) { oop obj = _preserved_oop_stack.pop();
oop obj = _preserved_oop_stack->at(i); markOop mark = _preserved_mark_stack.pop();
markOop mark = _preserved_mark_stack->at(i); obj->set_mark(mark);
obj->set_mark(mark);
}
} }
} }

View file

@ -104,23 +104,22 @@ class MarkSweep : AllStatic {
friend class KeepAliveClosure; friend class KeepAliveClosure;
friend class VM_MarkSweep; friend class VM_MarkSweep;
friend void marksweep_init(); friend void marksweep_init();
friend class DataLayout;
// //
// Vars // Vars
// //
protected: protected:
// Traversal stacks used during phase1 // Traversal stacks used during phase1
static GrowableArray<oop>* _marking_stack; static Stack<oop> _marking_stack;
static GrowableArray<ObjArrayTask>* _objarray_stack; static Stack<ObjArrayTask> _objarray_stack;
// Stack for live klasses to revisit at end of marking phase // Stack for live klasses to revisit at end of marking phase
static GrowableArray<Klass*>* _revisit_klass_stack; static Stack<Klass*> _revisit_klass_stack;
// Set (stack) of MDO's to revisit at end of marking phase // Set (stack) of MDO's to revisit at end of marking phase
static GrowableArray<DataLayout*>* _revisit_mdo_stack; static Stack<DataLayout*> _revisit_mdo_stack;
// Space for storing/restoring mark word // Space for storing/restoring mark word
static GrowableArray<markOop>* _preserved_mark_stack; static Stack<markOop> _preserved_mark_stack;
static GrowableArray<oop>* _preserved_oop_stack; static Stack<oop> _preserved_oop_stack;
static size_t _preserved_count; static size_t _preserved_count;
static size_t _preserved_count_max; static size_t _preserved_count_max;
static PreservedMark* _preserved_marks; static PreservedMark* _preserved_marks;

View file

@ -72,7 +72,7 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!obj->mark()->is_marked()) { if (!obj->mark()->is_marked()) {
mark_object(obj); mark_object(obj);
_marking_stack->push(obj); _marking_stack.push(obj);
} }
} }
} }
@ -80,7 +80,7 @@ template <class T> inline void MarkSweep::mark_and_push(T* p) {
void MarkSweep::push_objarray(oop obj, size_t index) { void MarkSweep::push_objarray(oop obj, size_t index) {
ObjArrayTask task(obj, index); ObjArrayTask task(obj, index);
assert(task.is_valid(), "bad ObjArrayTask"); assert(task.is_valid(), "bad ObjArrayTask");
_objarray_stack->push(task); _objarray_stack.push(task);
} }
template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) { template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {

View file

@ -1435,12 +1435,14 @@ defNewGeneration.cpp oop.inline.hpp
defNewGeneration.cpp referencePolicy.hpp defNewGeneration.cpp referencePolicy.hpp
defNewGeneration.cpp space.inline.hpp defNewGeneration.cpp space.inline.hpp
defNewGeneration.cpp spaceDecorator.hpp defNewGeneration.cpp spaceDecorator.hpp
defNewGeneration.cpp stack.inline.hpp
defNewGeneration.cpp thread_<os_family>.inline.hpp defNewGeneration.cpp thread_<os_family>.inline.hpp
defNewGeneration.hpp ageTable.hpp defNewGeneration.hpp ageTable.hpp
defNewGeneration.hpp cSpaceCounters.hpp defNewGeneration.hpp cSpaceCounters.hpp
defNewGeneration.hpp generation.inline.hpp defNewGeneration.hpp generation.inline.hpp
defNewGeneration.hpp generationCounters.hpp defNewGeneration.hpp generationCounters.hpp
defNewGeneration.hpp stack.hpp
defNewGeneration.inline.hpp cardTableRS.hpp defNewGeneration.inline.hpp cardTableRS.hpp
defNewGeneration.inline.hpp defNewGeneration.hpp defNewGeneration.inline.hpp defNewGeneration.hpp
@ -3852,6 +3854,10 @@ specialized_oop_closures.cpp specialized_oop_closures.hpp
specialized_oop_closures.hpp atomic.hpp specialized_oop_closures.hpp atomic.hpp
stack.hpp allocation.inline.hpp
stack.inline.hpp stack.hpp
stackMapFrame.cpp globalDefinitions.hpp stackMapFrame.cpp globalDefinitions.hpp
stackMapFrame.cpp handles.inline.hpp stackMapFrame.cpp handles.inline.hpp
stackMapFrame.cpp oop.inline.hpp stackMapFrame.cpp oop.inline.hpp
@ -4095,6 +4101,7 @@ task.hpp top.hpp
taskqueue.cpp debug.hpp taskqueue.cpp debug.hpp
taskqueue.cpp oop.inline.hpp taskqueue.cpp oop.inline.hpp
taskqueue.cpp os.hpp taskqueue.cpp os.hpp
taskqueue.cpp stack.inline.hpp
taskqueue.cpp taskqueue.hpp taskqueue.cpp taskqueue.hpp
taskqueue.cpp thread_<os_family>.inline.hpp taskqueue.cpp thread_<os_family>.inline.hpp
@ -4102,6 +4109,7 @@ taskqueue.hpp allocation.hpp
taskqueue.hpp allocation.inline.hpp taskqueue.hpp allocation.inline.hpp
taskqueue.hpp mutex.hpp taskqueue.hpp mutex.hpp
taskqueue.hpp orderAccess_<os_arch>.inline.hpp taskqueue.hpp orderAccess_<os_arch>.inline.hpp
taskqueue.hpp stack.hpp
templateInterpreter.cpp interpreter.hpp templateInterpreter.cpp interpreter.hpp
templateInterpreter.cpp interpreterGenerator.hpp templateInterpreter.cpp interpreterGenerator.hpp

View file

@ -289,16 +289,17 @@ private:
// One of the following macros must be used when allocating // One of the following macros must be used when allocating
// an array or object from an arena // an array or object from an arena
#define NEW_ARENA_ARRAY(arena, type, size)\ #define NEW_ARENA_ARRAY(arena, type, size) \
(type*) arena->Amalloc((size) * sizeof(type)) (type*) (arena)->Amalloc((size) * sizeof(type))
#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size)\ #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \
(type*) arena->Arealloc((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) ) (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
(new_size) * sizeof(type) )
#define FREE_ARENA_ARRAY(arena, type, old, size)\ #define FREE_ARENA_ARRAY(arena, type, old, size) \
arena->Afree((char*)(old), (size) * sizeof(type)) (arena)->Afree((char*)(old), (size) * sizeof(type))
#define NEW_ARENA_OBJ(arena, type)\ #define NEW_ARENA_OBJ(arena, type) \
NEW_ARENA_ARRAY(arena, type, 1) NEW_ARENA_ARRAY(arena, type, 1)

View file

@ -87,9 +87,7 @@ void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
_gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
_scan_older); _scan_older);
} while (!_gch->no_allocs_since_save_marks(_level)); } while (!_gch->no_allocs_since_save_marks(_level));
guarantee(_gen->promo_failure_scan_stack() == NULL guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
|| _gen->promo_failure_scan_stack()->length() == 0,
"Failed to finish scan");
} }
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
@ -130,9 +128,6 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
int level, int level,
const char* policy) const char* policy)
: Generation(rs, initial_size, level), : Generation(rs, initial_size, level),
_objs_with_preserved_marks(NULL),
_preserved_marks_of_objs(NULL),
_promo_failure_scan_stack(NULL),
_promo_failure_drain_in_progress(false), _promo_failure_drain_in_progress(false),
_should_allocate_from_space(false) _should_allocate_from_space(false)
{ {
@ -604,12 +599,8 @@ void DefNewGeneration::collect(bool full,
} else { } else {
assert(HandlePromotionFailure, assert(HandlePromotionFailure,
"Should not be here unless promotion failure handling is on"); "Should not be here unless promotion failure handling is on");
assert(_promo_failure_scan_stack != NULL && assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack->length() == 0, "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments.
// deallocate stack and it's elements
delete _promo_failure_scan_stack;
_promo_failure_scan_stack = NULL;
remove_forwarding_pointers(); remove_forwarding_pointers();
if (PrintGCDetails) { if (PrintGCDetails) {
@ -620,7 +611,7 @@ void DefNewGeneration::collect(bool full,
// case there can be live objects in to-space // case there can be live objects in to-space
// as a result of a partial evacuation of eden // as a result of a partial evacuation of eden
// and from-space. // and from-space.
swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect(). swap_spaces(); // For uniformity wrt ParNewGeneration.
from()->set_next_compaction_space(to()); from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail(); gch->set_incremental_collection_will_fail();
@ -653,34 +644,23 @@ void DefNewGeneration::remove_forwarding_pointers() {
RemoveForwardPointerClosure rspc; RemoveForwardPointerClosure rspc;
eden()->object_iterate(&rspc); eden()->object_iterate(&rspc);
from()->object_iterate(&rspc); from()->object_iterate(&rspc);
// Now restore saved marks, if any. // Now restore saved marks, if any.
if (_objs_with_preserved_marks != NULL) { assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
assert(_preserved_marks_of_objs != NULL, "Both or none."); "should be the same");
assert(_objs_with_preserved_marks->length() == while (!_objs_with_preserved_marks.is_empty()) {
_preserved_marks_of_objs->length(), "Both or none."); oop obj = _objs_with_preserved_marks.pop();
for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { markOop m = _preserved_marks_of_objs.pop();
oop obj = _objs_with_preserved_marks->at(i); obj->set_mark(m);
markOop m = _preserved_marks_of_objs->at(i);
obj->set_mark(m);
}
delete _objs_with_preserved_marks;
delete _preserved_marks_of_objs;
_objs_with_preserved_marks = NULL;
_preserved_marks_of_objs = NULL;
} }
_objs_with_preserved_marks.clear(true);
_preserved_marks_of_objs.clear(true);
} }
void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
if (m->must_be_preserved_for_promotion_failure(obj)) { if (m->must_be_preserved_for_promotion_failure(obj)) {
if (_objs_with_preserved_marks == NULL) { _objs_with_preserved_marks.push(obj);
assert(_preserved_marks_of_objs == NULL, "Both or none."); _preserved_marks_of_objs.push(m);
_objs_with_preserved_marks = new (ResourceObj::C_HEAP)
GrowableArray<oop>(PreserveMarkStackSize, true);
_preserved_marks_of_objs = new (ResourceObj::C_HEAP)
GrowableArray<markOop>(PreserveMarkStackSize, true);
}
_objs_with_preserved_marks->push(obj);
_preserved_marks_of_objs->push(m);
} }
} }
@ -695,7 +675,7 @@ void DefNewGeneration::handle_promotion_failure(oop old) {
old->forward_to(old); old->forward_to(old);
_promotion_failed = true; _promotion_failed = true;
push_on_promo_failure_scan_stack(old); _promo_failure_scan_stack.push(old);
if (!_promo_failure_drain_in_progress) { if (!_promo_failure_drain_in_progress) {
// prevent recursion in copy_to_survivor_space() // prevent recursion in copy_to_survivor_space()
@ -748,20 +728,9 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
return obj; return obj;
} }
void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
if (_promo_failure_scan_stack == NULL) {
_promo_failure_scan_stack = new (ResourceObj::C_HEAP)
GrowableArray<oop>(40, true);
}
_promo_failure_scan_stack->push(obj);
}
void DefNewGeneration::drain_promo_failure_scan_stack() { void DefNewGeneration::drain_promo_failure_scan_stack() {
assert(_promo_failure_scan_stack != NULL, "precondition"); while (!_promo_failure_scan_stack.is_empty()) {
oop obj = _promo_failure_scan_stack.pop();
while (_promo_failure_scan_stack->length() > 0) {
oop obj = _promo_failure_scan_stack->pop();
obj->oop_iterate(_promo_failure_scan_stack_closure); obj->oop_iterate(_promo_failure_scan_stack_closure);
} }
} }

View file

@ -77,10 +77,10 @@ protected:
// word being overwritten with a self-forwarding-pointer. // word being overwritten with a self-forwarding-pointer.
void preserve_mark_if_necessary(oop obj, markOop m); void preserve_mark_if_necessary(oop obj, markOop m);
// When one is non-null, so is the other. Together, they each pair is // Together, these keep <object with a preserved mark, mark value> pairs.
// an object with a preserved mark, and its mark value. // They should always contain the same number of elements.
GrowableArray<oop>* _objs_with_preserved_marks; Stack<oop> _objs_with_preserved_marks;
GrowableArray<markOop>* _preserved_marks_of_objs; Stack<markOop> _preserved_marks_of_objs;
// Returns true if the collection can be safely attempted. // Returns true if the collection can be safely attempted.
// If this method returns false, a collection is not // If this method returns false, a collection is not
@ -94,11 +94,7 @@ protected:
_promo_failure_scan_stack_closure = scan_stack_closure; _promo_failure_scan_stack_closure = scan_stack_closure;
} }
GrowableArray<oop>* _promo_failure_scan_stack; Stack<oop> _promo_failure_scan_stack;
GrowableArray<oop>* promo_failure_scan_stack() const {
return _promo_failure_scan_stack;
}
void push_on_promo_failure_scan_stack(oop);
void drain_promo_failure_scan_stack(void); void drain_promo_failure_scan_stack(void);
bool _promo_failure_drain_in_progress; bool _promo_failure_drain_in_progress;
@ -184,8 +180,6 @@ protected:
void do_void(); void do_void();
}; };
class FastEvacuateFollowersClosure;
friend class FastEvacuateFollowersClosure;
class FastEvacuateFollowersClosure: public VoidClosure { class FastEvacuateFollowersClosure: public VoidClosure {
GenCollectedHeap* _gch; GenCollectedHeap* _gch;
int _level; int _level;
@ -336,6 +330,10 @@ protected:
void verify(bool allow_dirty); void verify(bool allow_dirty);
bool promo_failure_scan_is_complete() const {
return _promo_failure_scan_stack.is_empty();
}
protected: protected:
// If clear_space is true, clear the survivor spaces. Eden is // If clear_space is true, clear the survivor spaces. Eden is
// cleared if the minimum size of eden is 0. If mangle_space // cleared if the minimum size of eden is 0. If mangle_space

View file

@ -161,17 +161,6 @@ void GenMarkSweep::allocate_stacks() {
_preserved_marks = (PreservedMark*)scratch; _preserved_marks = (PreservedMark*)scratch;
_preserved_count = 0; _preserved_count = 0;
_preserved_mark_stack = NULL;
_preserved_oop_stack = NULL;
_marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
_objarray_stack = new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
int size = SystemDictionary::number_of_classes() * 2;
_revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
// (#klass/k)^2 for k ~ 10 appears to be a better fit, but this will have to do for
// now until we have had a chance to investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(2*size, true);
#ifdef VALIDATE_MARK_SWEEP #ifdef VALIDATE_MARK_SWEEP
if (ValidateMarkSweep) { if (ValidateMarkSweep) {
@ -206,17 +195,12 @@ void GenMarkSweep::deallocate_stacks() {
gch->release_scratch(); gch->release_scratch();
} }
if (_preserved_oop_stack) { _preserved_mark_stack.clear(true);
delete _preserved_mark_stack; _preserved_oop_stack.clear(true);
_preserved_mark_stack = NULL; _marking_stack.clear();
delete _preserved_oop_stack; _objarray_stack.clear(true);
_preserved_oop_stack = NULL; _revisit_klass_stack.clear(true);
} _revisit_mdo_stack.clear(true);
delete _marking_stack;
delete _objarray_stack;
delete _revisit_klass_stack;
delete _revisit_mdo_stack;
#ifdef VALIDATE_MARK_SWEEP #ifdef VALIDATE_MARK_SWEEP
if (ValidateMarkSweep) { if (ValidateMarkSweep) {
@ -274,17 +258,17 @@ void GenMarkSweep::mark_sweep_phase1(int level,
// Update subklass/sibling/implementor links of live klasses // Update subklass/sibling/implementor links of live klasses
follow_weak_klass_links(); follow_weak_klass_links();
assert(_marking_stack->is_empty(), "just drained"); assert(_marking_stack.is_empty(), "just drained");
// Visit memoized MDO's and clear any unmarked weak refs // Visit memoized MDO's and clear any unmarked weak refs
follow_mdo_weak_refs(); follow_mdo_weak_refs();
assert(_marking_stack->is_empty(), "just drained"); assert(_marking_stack.is_empty(), "just drained");
// Visit symbol and interned string tables and delete unmarked oops // Visit symbol and interned string tables and delete unmarked oops
SymbolTable::unlink(&is_alive); SymbolTable::unlink(&is_alive);
StringTable::unlink(&is_alive); StringTable::unlink(&is_alive);
assert(_marking_stack->is_empty(), "stack should be empty by now"); assert(_marking_stack.is_empty(), "stack should be empty by now");
} }

View file

@ -641,6 +641,9 @@ class CommandLineFlags {
develop(bool, ZapJNIHandleArea, trueInDebug, \ develop(bool, ZapJNIHandleArea, trueInDebug, \
"Zap freed JNI handle space with 0xFEFEFEFE") \ "Zap freed JNI handle space with 0xFEFEFEFE") \
\ \
notproduct(bool, ZapStackSegments, trueInDebug, \
"Zap allocated/freed Stack segments with 0xFADFADED") \
\
develop(bool, ZapUnusedHeapArea, trueInDebug, \ develop(bool, ZapUnusedHeapArea, trueInDebug, \
"Zap unused heap space with 0xBAADBABE") \ "Zap unused heap space with 0xBAADBABE") \
\ \

View file

@ -1073,6 +1073,7 @@ void WatcherThread::run() {
} }
} }
#if 0
if (is_error_reported()) { if (is_error_reported()) {
// A fatal error has happened, the error handler(VMError::report_and_die) // A fatal error has happened, the error handler(VMError::report_and_die)
// should abort JVM after creating an error log file. However in some // should abort JVM after creating an error log file. However in some
@ -1100,6 +1101,7 @@ void WatcherThread::run() {
os::sleep(this, 5 * 1000, false); os::sleep(this, 5 * 1000, false);
} }
} }
#endif // #if 0
PeriodicTask::real_time_tick(time_to_wait); PeriodicTask::real_time_tick(time_to_wait);

View file

@ -0,0 +1,204 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
// Class Stack (below) grows and shrinks by linking together "segments" which
// are allocated on demand. Segments are arrays of the element type (E) plus an
// extra pointer-sized field to store the segment link. Recently emptied
// segments are kept in a cache and reused.
//
// Notes/caveats:
//
// The size of an element must either evenly divide the size of a pointer or be
// a multiple of the size of a pointer.
//
// Destructors are not called for elements popped off the stack, so element
// types which rely on destructors for things like reference counting will not
// work properly.
//
// Class Stack allocates segments from the C heap. However, two protected
// virtual methods are used to alloc/free memory which subclasses can override:
//
// virtual void* alloc(size_t bytes);
// virtual void free(void* addr, size_t bytes);
//
// The alloc() method must return storage aligned for any use. The
// implementation in class Stack assumes that alloc() will terminate the process
// if the allocation fails.
template <class E> class StackIterator;
// StackBase holds common data/methods that don't depend on the element type,
// factored out to reduce template code duplication.
class StackBase
{
public:
size_t segment_size() const { return _seg_size; } // Elements per segment.
size_t max_size() const { return _max_size; } // Max elements allowed.
size_t max_cache_size() const { return _max_cache_size; } // Max segments
// allowed in cache.
size_t cache_size() const { return _cache_size; } // Segments in the cache.
protected:
// The ctor arguments correspond to the like-named functions above.
// segment_size: number of items per segment
// max_cache_size: maxmium number of *segments* to cache
// max_size: maximum number of items allowed, rounded to a multiple of
// the segment size (0 == unlimited)
inline StackBase(size_t segment_size, size_t max_cache_size, size_t max_size);
// Round max_size to a multiple of the segment size. Treat 0 as unlimited.
static inline size_t adjust_max_size(size_t max_size, size_t seg_size);
protected:
const size_t _seg_size; // Number of items per segment.
const size_t _max_size; // Maximum number of items allowed in the stack.
const size_t _max_cache_size; // Maximum number of segments to cache.
size_t _cur_seg_size; // Number of items in the current segment.
size_t _full_seg_size; // Number of items in already-filled segments.
size_t _cache_size; // Number of segments in the cache.
};
#ifdef __GNUC__
#define inline
#endif // __GNUC__
template <class E>
class Stack: public StackBase
{
public:
friend class StackIterator<E>;
// segment_size: number of items per segment
// max_cache_size: maxmium number of *segments* to cache
// max_size: maximum number of items allowed, rounded to a multiple of
// the segment size (0 == unlimited)
inline Stack(size_t segment_size = default_segment_size(),
size_t max_cache_size = 4, size_t max_size = 0);
inline ~Stack() { clear(true); }
inline bool is_empty() const { return _cur_seg == NULL; }
inline bool is_full() const { return _full_seg_size >= max_size(); }
// Performance sensitive code should use is_empty() instead of size() == 0 and
// is_full() instead of size() == max_size(). Using a conditional here allows
// just one var to be updated when pushing/popping elements instead of two;
// _full_seg_size is updated only when pushing/popping segments.
inline size_t size() const {
return is_empty() ? 0 : _full_seg_size + _cur_seg_size;
}
inline void push(E elem);
inline E pop();
// Clear everything from the stack, releasing the associated memory. If
// clear_cache is true, also release any cached segments.
void clear(bool clear_cache = false);
static inline size_t default_segment_size();
protected:
// Each segment includes space for _seg_size elements followed by a link
// (pointer) to the previous segment; the space is allocated as a single block
// of size segment_bytes(). _seg_size is rounded up if necessary so the link
// is properly aligned. The C struct for the layout would be:
//
// struct segment {
// E elements[_seg_size];
// E* link;
// };
// Round up seg_size to keep the link field aligned.
static inline size_t adjust_segment_size(size_t seg_size);
// Methods for allocation size and getting/setting the link.
inline size_t link_offset() const; // Byte offset of link field.
inline size_t segment_bytes() const; // Segment size in bytes.
inline E** link_addr(E* seg) const; // Address of the link field.
inline E* get_link(E* seg) const; // Extract the link from seg.
inline E* set_link(E* new_seg, E* old_seg); // new_seg.link = old_seg.
virtual E* alloc(size_t bytes);
virtual void free(E* addr, size_t bytes);
void push_segment();
void pop_segment();
void free_segments(E* seg); // Free all segments in the list.
inline void reset(bool reset_cache); // Reset all data fields.
DEBUG_ONLY(void verify(bool at_empty_transition) const;)
DEBUG_ONLY(void zap_segment(E* seg, bool zap_link_field) const;)
private:
E* _cur_seg; // Current segment.
E* _cache; // Segment cache to avoid ping-ponging.
};
template <class E> class ResourceStack: public Stack<E>, public ResourceObj
{
public:
// If this class becomes widely used, it may make sense to save the Thread
// and use it when allocating segments.
ResourceStack(size_t segment_size = Stack<E>::default_segment_size()):
Stack<E>(segment_size, max_uintx)
{ }
// Set the segment pointers to NULL so the parent dtor does not free them;
// that must be done by the ResourceMark code.
~ResourceStack() { Stack<E>::reset(true); }
protected:
virtual E* alloc(size_t bytes);
virtual void free(E* addr, size_t bytes);
private:
void clear(bool clear_cache = false);
};
template <class E>
class StackIterator: public StackObj
{
public:
StackIterator(Stack<E>& stack): _stack(stack) { sync(); }
Stack<E>& stack() const { return _stack; }
bool is_empty() const { return _cur_seg == NULL; }
E next() { return *next_addr(); }
E* next_addr();
void sync(); // Sync the iterator's state to the stack's current state.
private:
Stack<E>& _stack;
size_t _cur_seg_size;
E* _cur_seg;
size_t _full_seg_size;
};
#ifdef __GNUC__
#undef inline
#endif // __GNUC__

View file

@ -0,0 +1,273 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
StackBase::StackBase(size_t segment_size, size_t max_cache_size,
size_t max_size):
_seg_size(segment_size),
_max_cache_size(max_cache_size),
_max_size(adjust_max_size(max_size, segment_size))
{
assert(_max_size % _seg_size == 0, "not a multiple");
}
size_t StackBase::adjust_max_size(size_t max_size, size_t seg_size)
{
assert(seg_size > 0, "cannot be 0");
assert(max_size >= seg_size || max_size == 0, "max_size too small");
const size_t limit = max_uintx - (seg_size - 1);
if (max_size == 0 || max_size > limit) {
max_size = limit;
}
return (max_size + seg_size - 1) / seg_size * seg_size;
}
template <class E>
Stack<E>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
StackBase(adjust_segment_size(segment_size), max_cache_size, max_size)
{
reset(true);
}
template <class E>
void Stack<E>::push(E item)
{
assert(!is_full(), "pushing onto a full stack");
if (_cur_seg_size == _seg_size) {
push_segment();
}
_cur_seg[_cur_seg_size] = item;
++_cur_seg_size;
}
template <class E>
E Stack<E>::pop()
{
assert(!is_empty(), "popping from an empty stack");
if (_cur_seg_size == 1) {
E tmp = _cur_seg[--_cur_seg_size];
pop_segment();
return tmp;
}
return _cur_seg[--_cur_seg_size];
}
template <class E>
void Stack<E>::clear(bool clear_cache)
{
free_segments(_cur_seg);
if (clear_cache) free_segments(_cache);
reset(clear_cache);
}
template <class E>
size_t Stack<E>::default_segment_size()
{
// Number of elements that fit in 4K bytes minus the size of two pointers
// (link field and malloc header).
return (4096 - 2 * sizeof(E*)) / sizeof(E);
}
template <class E>
size_t Stack<E>::adjust_segment_size(size_t seg_size)
{
const size_t elem_sz = sizeof(E);
const size_t ptr_sz = sizeof(E*);
assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
if (elem_sz < ptr_sz) {
return align_size_up(seg_size * elem_sz, ptr_sz) / elem_sz;
}
return seg_size;
}
template <class E>
size_t Stack<E>::link_offset() const
{
return align_size_up(_seg_size * sizeof(E), sizeof(E*));
}
template <class E>
size_t Stack<E>::segment_bytes() const
{
return link_offset() + sizeof(E*);
}
template <class E>
E** Stack<E>::link_addr(E* seg) const
{
return (E**) ((char*)seg + link_offset());
}
template <class E>
E* Stack<E>::get_link(E* seg) const
{
return *link_addr(seg);
}
template <class E>
E* Stack<E>::set_link(E* new_seg, E* old_seg)
{
*link_addr(new_seg) = old_seg;
return new_seg;
}
template <class E>
E* Stack<E>::alloc(size_t bytes)
{
return (E*) NEW_C_HEAP_ARRAY(char, bytes);
}
template <class E>
void Stack<E>::free(E* addr, size_t bytes)
{
FREE_C_HEAP_ARRAY(char, (char*) addr);
}
template <class E>
void Stack<E>::push_segment()
{
assert(_cur_seg_size == _seg_size, "current segment is not full");
E* next;
if (_cache_size > 0) {
// Use a cached segment.
next = _cache;
_cache = get_link(_cache);
--_cache_size;
} else {
next = alloc(segment_bytes());
DEBUG_ONLY(zap_segment(next, true);)
}
const bool at_empty_transition = is_empty();
_cur_seg = set_link(next, _cur_seg);
_cur_seg_size = 0;
_full_seg_size += at_empty_transition ? 0 : _seg_size;
DEBUG_ONLY(verify(at_empty_transition);)
}
template <class E>
void Stack<E>::pop_segment()
{
assert(_cur_seg_size == 0, "current segment is not empty");
E* const prev = get_link(_cur_seg);
if (_cache_size < _max_cache_size) {
// Add the current segment to the cache.
DEBUG_ONLY(zap_segment(_cur_seg, false);)
_cache = set_link(_cur_seg, _cache);
++_cache_size;
} else {
DEBUG_ONLY(zap_segment(_cur_seg, true);)
free(_cur_seg, segment_bytes());
}
const bool at_empty_transition = prev == NULL;
_cur_seg = prev;
_cur_seg_size = _seg_size;
_full_seg_size -= at_empty_transition ? 0 : _seg_size;
DEBUG_ONLY(verify(at_empty_transition);)
}
template <class E>
void Stack<E>::free_segments(E* seg)
{
const size_t bytes = segment_bytes();
while (seg != NULL) {
E* const prev = get_link(seg);
free(seg, bytes);
seg = prev;
}
}
template <class E>
void Stack<E>::reset(bool reset_cache)
{
_cur_seg_size = _seg_size; // So push() will alloc a new segment.
_full_seg_size = 0;
_cur_seg = NULL;
if (reset_cache) {
_cache_size = 0;
_cache = NULL;
}
}
#ifdef ASSERT
template <class E>
void Stack<E>::verify(bool at_empty_transition) const
{
assert(size() <= max_size(), "stack exceeded bounds");
assert(cache_size() <= max_cache_size(), "cache exceeded bounds");
assert(_cur_seg_size <= segment_size(), "segment index exceeded bounds");
assert(_full_seg_size % _seg_size == 0, "not a multiple");
assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
assert((_cache == NULL) == (cache_size() == 0), "mismatch");
if (is_empty()) {
assert(_cur_seg_size == segment_size(), "sanity");
}
}
template <class E>
void Stack<E>::zap_segment(E* seg, bool zap_link_field) const
{
if (!ZapStackSegments) return;
const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
uint32_t* cur = (uint32_t*)seg;
const uint32_t* end = cur + zap_bytes / sizeof(uint32_t);
while (cur < end) {
*cur++ = 0xfadfaded;
}
}
#endif
template <class E>
E* ResourceStack<E>::alloc(size_t bytes)
{
return (E*) resource_allocate_bytes(bytes);
}
template <class E>
void ResourceStack<E>::free(E* addr, size_t bytes)
{
resource_free_bytes((char*) addr, bytes);
}
template <class E>
void StackIterator<E>::sync()
{
_full_seg_size = _stack._full_seg_size;
_cur_seg_size = _stack._cur_seg_size;
_cur_seg = _stack._cur_seg;
}
template <class E>
E* StackIterator<E>::next_addr()
{
assert(!is_empty(), "no items left");
if (_cur_seg_size == 1) {
E* addr = _cur_seg;
_cur_seg = _stack.get_link(_cur_seg);
_cur_seg_size = _stack.segment_size();
_full_seg_size -= _stack.segment_size();
return addr;
}
return _cur_seg + --_cur_seg_size;
}

View file

@ -372,75 +372,47 @@ GenericTaskQueue<E, N>::~GenericTaskQueue() {
// OverflowTaskQueue is a TaskQueue that also includes an overflow stack for // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
// elements that do not fit in the TaskQueue. // elements that do not fit in the TaskQueue.
// //
// Three methods from super classes are overridden: // This class hides two methods from super classes:
// //
// initialize() - initialize the super classes and create the overflow stack
// push() - push onto the task queue or, if that fails, onto the overflow stack // push() - push onto the task queue or, if that fails, onto the overflow stack
// is_empty() - return true if both the TaskQueue and overflow stack are empty // is_empty() - return true if both the TaskQueue and overflow stack are empty
// //
// Note that size() is not overridden--it returns the number of elements in the // Note that size() is not hidden--it returns the number of elements in the
// TaskQueue, and does not include the size of the overflow stack. This // TaskQueue, and does not include the size of the overflow stack. This
// simplifies replacement of GenericTaskQueues with OverflowTaskQueues. // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
template<class E, unsigned int N = TASKQUEUE_SIZE> template<class E, unsigned int N = TASKQUEUE_SIZE>
class OverflowTaskQueue: public GenericTaskQueue<E, N> class OverflowTaskQueue: public GenericTaskQueue<E, N>
{ {
public: public:
typedef GrowableArray<E> overflow_t; typedef Stack<E> overflow_t;
typedef GenericTaskQueue<E, N> taskqueue_t; typedef GenericTaskQueue<E, N> taskqueue_t;
TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
OverflowTaskQueue();
~OverflowTaskQueue();
void initialize();
inline overflow_t* overflow_stack() const { return _overflow_stack; }
// Push task t onto the queue or onto the overflow stack. Return true. // Push task t onto the queue or onto the overflow stack. Return true.
inline bool push(E t); inline bool push(E t);
// Attempt to pop from the overflow stack; return true if anything was popped. // Attempt to pop from the overflow stack; return true if anything was popped.
inline bool pop_overflow(E& t); inline bool pop_overflow(E& t);
inline overflow_t* overflow_stack() { return &_overflow_stack; }
inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
inline bool overflow_empty() const { return overflow_stack()->is_empty(); } inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
inline bool is_empty() const { inline bool is_empty() const {
return taskqueue_empty() && overflow_empty(); return taskqueue_empty() && overflow_empty();
} }
private: private:
overflow_t* _overflow_stack; overflow_t _overflow_stack;
}; };
template <class E, unsigned int N>
OverflowTaskQueue<E, N>::OverflowTaskQueue()
{
_overflow_stack = NULL;
}
template <class E, unsigned int N>
OverflowTaskQueue<E, N>::~OverflowTaskQueue()
{
if (_overflow_stack != NULL) {
delete _overflow_stack;
_overflow_stack = NULL;
}
}
template <class E, unsigned int N>
void OverflowTaskQueue<E, N>::initialize()
{
taskqueue_t::initialize();
assert(_overflow_stack == NULL, "memory leak");
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<E>(10, true);
}
template <class E, unsigned int N> template <class E, unsigned int N>
bool OverflowTaskQueue<E, N>::push(E t) bool OverflowTaskQueue<E, N>::push(E t)
{ {
if (!taskqueue_t::push(t)) { if (!taskqueue_t::push(t)) {
overflow_stack()->push(t); overflow_stack()->push(t);
TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->length())); TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
} }
return true; return true;
} }