mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 03:24:38 +02:00
Merge
This commit is contained in:
commit
c8006a68d5
19 changed files with 978 additions and 1827 deletions
|
@ -1350,11 +1350,7 @@ bool nmethod::can_unload(BoolObjectClosure* is_alive,
|
|||
return false;
|
||||
}
|
||||
}
|
||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
||||
// Cannot do this test if verification of the UseParallelOldGC
|
||||
// code using the PSMarkSweep code is being done.
|
||||
assert(unloading_occurred, "Inconsistency in unloading");
|
||||
}
|
||||
assert(unloading_occurred, "Inconsistency in unloading");
|
||||
make_unloaded(is_alive, obj);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -210,10 +210,6 @@ void ParallelScavengeHeap::post_initialize() {
|
|||
PSScavenge::initialize();
|
||||
if (UseParallelOldGC) {
|
||||
PSParallelCompact::post_initialize();
|
||||
if (VerifyParallelOldWithMarkSweep) {
|
||||
// Will be used for verification of par old.
|
||||
PSMarkSweep::initialize();
|
||||
}
|
||||
} else {
|
||||
PSMarkSweep::initialize();
|
||||
}
|
||||
|
@ -402,7 +398,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||
return result;
|
||||
}
|
||||
if (!is_tlab &&
|
||||
size >= (young_gen()->eden_space()->capacity_in_words() / 2)) {
|
||||
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
|
||||
result = old_gen()->allocate(size, is_tlab);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
|
|
|
@ -146,7 +146,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
|
|||
{
|
||||
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||
ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
|
||||
RegionTaskQueueSet* qset = ParCompactionManager::region_array();
|
||||
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
||||
GCTaskQueue* q = GCTaskQueue::create();
|
||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||
|
@ -205,38 +205,38 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
|||
}
|
||||
|
||||
//
|
||||
// StealChunkCompactionTask
|
||||
// StealRegionCompactionTask
|
||||
//
|
||||
|
||||
|
||||
StealChunkCompactionTask::StealChunkCompactionTask(ParallelTaskTerminator* t) :
|
||||
_terminator(t) {};
|
||||
StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
|
||||
_terminator(t) {}
|
||||
|
||||
void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||
|
||||
NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask",
|
||||
NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
|
||||
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
||||
|
||||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
|
||||
// Has to drain stacks first because there may be chunks on
|
||||
// Has to drain stacks first because there may be regions on
|
||||
// preloaded onto the stack and this thread may never have
|
||||
// done a draining task. Are the draining tasks needed?
|
||||
|
||||
cm->drain_chunk_stacks();
|
||||
cm->drain_region_stacks();
|
||||
|
||||
size_t chunk_index = 0;
|
||||
size_t region_index = 0;
|
||||
int random_seed = 17;
|
||||
|
||||
// If we're the termination task, try 10 rounds of stealing before
|
||||
// setting the termination flag
|
||||
|
||||
while(true) {
|
||||
if (ParCompactionManager::steal(which, &random_seed, chunk_index)) {
|
||||
PSParallelCompact::fill_and_update_chunk(cm, chunk_index);
|
||||
cm->drain_chunk_stacks();
|
||||
if (ParCompactionManager::steal(which, &random_seed, region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(cm, region_index);
|
||||
cm->drain_region_stacks();
|
||||
} else {
|
||||
if (terminator()->offer_termination()) {
|
||||
break;
|
||||
|
@ -249,11 +249,10 @@ void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
|||
|
||||
UpdateDensePrefixTask::UpdateDensePrefixTask(
|
||||
PSParallelCompact::SpaceId space_id,
|
||||
size_t chunk_index_start,
|
||||
size_t chunk_index_end) :
|
||||
_space_id(space_id), _chunk_index_start(chunk_index_start),
|
||||
_chunk_index_end(chunk_index_end)
|
||||
{}
|
||||
size_t region_index_start,
|
||||
size_t region_index_end) :
|
||||
_space_id(space_id), _region_index_start(region_index_start),
|
||||
_region_index_end(region_index_end) {}
|
||||
|
||||
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
||||
|
@ -265,8 +264,8 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
|||
|
||||
PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
|
||||
_space_id,
|
||||
_chunk_index_start,
|
||||
_chunk_index_end);
|
||||
_region_index_start,
|
||||
_region_index_end);
|
||||
}
|
||||
|
||||
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||
|
@ -278,6 +277,6 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
|||
ParCompactionManager* cm =
|
||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||
|
||||
// Process any chunks already in the compaction managers stacks.
|
||||
cm->drain_chunk_stacks();
|
||||
// Process any regions already in the compaction managers stacks.
|
||||
cm->drain_region_stacks();
|
||||
}
|
||||
|
|
|
@ -188,18 +188,18 @@ class StealMarkingTask : public GCTask {
|
|||
};
|
||||
|
||||
//
|
||||
// StealChunkCompactionTask
|
||||
// StealRegionCompactionTask
|
||||
//
|
||||
// This task is used to distribute work to idle threads.
|
||||
//
|
||||
|
||||
class StealChunkCompactionTask : public GCTask {
|
||||
class StealRegionCompactionTask : public GCTask {
|
||||
private:
|
||||
ParallelTaskTerminator* const _terminator;
|
||||
public:
|
||||
StealChunkCompactionTask(ParallelTaskTerminator* t);
|
||||
StealRegionCompactionTask(ParallelTaskTerminator* t);
|
||||
|
||||
char* name() { return (char *)"steal-chunk-task"; }
|
||||
char* name() { return (char *)"steal-region-task"; }
|
||||
ParallelTaskTerminator* terminator() { return _terminator; }
|
||||
|
||||
virtual void do_it(GCTaskManager* manager, uint which);
|
||||
|
@ -215,15 +215,15 @@ class StealChunkCompactionTask : public GCTask {
|
|||
class UpdateDensePrefixTask : public GCTask {
|
||||
private:
|
||||
PSParallelCompact::SpaceId _space_id;
|
||||
size_t _chunk_index_start;
|
||||
size_t _chunk_index_end;
|
||||
size_t _region_index_start;
|
||||
size_t _region_index_end;
|
||||
|
||||
public:
|
||||
char* name() { return (char *)"update-dense_prefix-task"; }
|
||||
|
||||
UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
|
||||
size_t chunk_index_start,
|
||||
size_t chunk_index_end);
|
||||
size_t region_index_start,
|
||||
size_t region_index_end);
|
||||
|
||||
virtual void do_it(GCTaskManager* manager, uint which);
|
||||
};
|
||||
|
@ -231,17 +231,17 @@ class UpdateDensePrefixTask : public GCTask {
|
|||
//
|
||||
// DrainStacksCompactionTask
|
||||
//
|
||||
// This task processes chunks that have been added to the stacks of each
|
||||
// This task processes regions that have been added to the stacks of each
|
||||
// compaction manager.
|
||||
//
|
||||
// Trying to use one draining thread does not work because there are no
|
||||
// guarantees about which task will be picked up by which thread. For example,
|
||||
// if thread A gets all the preloaded chunks, thread A may not get a draining
|
||||
// if thread A gets all the preloaded regions, thread A may not get a draining
|
||||
// task (they may all be done by other threads).
|
||||
//
|
||||
|
||||
class DrainStacksCompactionTask : public GCTask {
|
||||
public:
|
||||
char* name() { return (char *)"drain-chunk-task"; }
|
||||
char* name() { return (char *)"drain-region-task"; }
|
||||
virtual void do_it(GCTaskManager* manager, uint which);
|
||||
};
|
||||
|
|
|
@ -30,7 +30,7 @@ ParCompactionManager** ParCompactionManager::_manager_array = NULL;
|
|||
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
|
||||
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
||||
ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL;
|
||||
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
|
||||
|
||||
ParCompactionManager::ParCompactionManager() :
|
||||
_action(CopyAndUpdate) {
|
||||
|
@ -46,13 +46,13 @@ ParCompactionManager::ParCompactionManager() :
|
|||
|
||||
// We want the overflow stack to be permanent
|
||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
chunk_stack()->initialize();
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
region_stack()->initialize();
|
||||
#else
|
||||
chunk_stack()->initialize();
|
||||
region_stack()->initialize();
|
||||
|
||||
// We want the overflow stack to be permanent
|
||||
_chunk_overflow_stack =
|
||||
_region_overflow_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
|
||||
#endif
|
||||
|
||||
|
@ -86,18 +86,18 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
|||
|
||||
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
|
||||
guarantee(_stack_array != NULL, "Count not initialize promotion manager");
|
||||
_chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
|
||||
guarantee(_chunk_array != NULL, "Count not initialize promotion manager");
|
||||
_region_array = new RegionTaskQueueSet(parallel_gc_threads);
|
||||
guarantee(_region_array != NULL, "Count not initialize promotion manager");
|
||||
|
||||
// Create and register the ParCompactionManager(s) for the worker threads.
|
||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||
_manager_array[i] = new ParCompactionManager();
|
||||
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
||||
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
|
||||
#else
|
||||
chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
|
||||
region_array()->register_queue(i, _manager_array[i]->region_stack());
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -153,31 +153,31 @@ oop ParCompactionManager::retrieve_for_scanning() {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
// Save chunk on a stack
|
||||
void ParCompactionManager::save_for_processing(size_t chunk_index) {
|
||||
// Save region on a stack
|
||||
void ParCompactionManager::save_for_processing(size_t region_index) {
|
||||
#ifdef ASSERT
|
||||
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
||||
ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index);
|
||||
assert(chunk_ptr->claimed(), "must be claimed");
|
||||
assert(chunk_ptr->_pushed++ == 0, "should only be pushed once");
|
||||
ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
|
||||
assert(region_ptr->claimed(), "must be claimed");
|
||||
assert(region_ptr->_pushed++ == 0, "should only be pushed once");
|
||||
#endif
|
||||
chunk_stack_push(chunk_index);
|
||||
region_stack_push(region_index);
|
||||
}
|
||||
|
||||
void ParCompactionManager::chunk_stack_push(size_t chunk_index) {
|
||||
void ParCompactionManager::region_stack_push(size_t region_index) {
|
||||
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
chunk_stack()->save(chunk_index);
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
region_stack()->save(region_index);
|
||||
#else
|
||||
if(!chunk_stack()->push(chunk_index)) {
|
||||
chunk_overflow_stack()->push(chunk_index);
|
||||
if(!region_stack()->push(region_index)) {
|
||||
region_overflow_stack()->push(region_index);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
return chunk_stack()->retrieve(chunk_index);
|
||||
bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
return region_stack()->retrieve(region_index);
|
||||
#else
|
||||
// Should not be used in the parallel case
|
||||
ShouldNotReachHere();
|
||||
|
@ -230,14 +230,14 @@ void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
|
|||
assert(overflow_stack()->length() == 0, "Sanity");
|
||||
}
|
||||
|
||||
void ParCompactionManager::drain_chunk_overflow_stack() {
|
||||
size_t chunk_index = (size_t) -1;
|
||||
while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
|
||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
||||
void ParCompactionManager::drain_region_overflow_stack() {
|
||||
size_t region_index = (size_t) -1;
|
||||
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
}
|
||||
|
||||
void ParCompactionManager::drain_chunk_stacks() {
|
||||
void ParCompactionManager::drain_region_stacks() {
|
||||
#ifdef ASSERT
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
@ -249,42 +249,42 @@ void ParCompactionManager::drain_chunk_stacks() {
|
|||
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
|
||||
do {
|
||||
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
size_t chunk_index = (size_t) -1;
|
||||
while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
|
||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
||||
size_t region_index = (size_t) -1;
|
||||
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
|
||||
while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) {
|
||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
||||
while (region_stack()->retrieve_from_stealable_queue(region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
} while (!chunk_stack()->is_empty());
|
||||
} while (!region_stack()->is_empty());
|
||||
#else
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while(!chunk_overflow_stack()->is_empty()) {
|
||||
size_t chunk_index = chunk_overflow_stack()->pop();
|
||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
||||
while(!region_overflow_stack()->is_empty()) {
|
||||
size_t region_index = region_overflow_stack()->pop();
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
|
||||
size_t chunk_index = -1;
|
||||
size_t region_index = -1;
|
||||
// obj is a reference!!!
|
||||
while (chunk_stack()->pop_local(chunk_index)) {
|
||||
while (region_stack()->pop_local(region_index)) {
|
||||
// It would be nice to assert about the type of objects we might
|
||||
// pop, but they can come from anywhere, unfortunately.
|
||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
} while((chunk_stack()->size() != 0) ||
|
||||
(chunk_overflow_stack()->length() != 0));
|
||||
} while((region_stack()->size() != 0) ||
|
||||
(region_overflow_stack()->length() != 0));
|
||||
#endif
|
||||
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
assert(chunk_stack()->is_empty(), "Sanity");
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
assert(region_stack()->is_empty(), "Sanity");
|
||||
#else
|
||||
assert(chunk_stack()->size() == 0, "Sanity");
|
||||
assert(chunk_overflow_stack()->length() == 0, "Sanity");
|
||||
assert(region_stack()->size() == 0, "Sanity");
|
||||
assert(region_overflow_stack()->length() == 0, "Sanity");
|
||||
#endif
|
||||
#else
|
||||
oop obj;
|
||||
|
|
|
@ -52,7 +52,7 @@ class ParCompactionManager : public CHeapObj {
|
|||
friend class ParallelTaskTerminator;
|
||||
friend class ParMarkBitMap;
|
||||
friend class PSParallelCompact;
|
||||
friend class StealChunkCompactionTask;
|
||||
friend class StealRegionCompactionTask;
|
||||
friend class UpdateAndFillClosure;
|
||||
friend class RefProcTaskExecutor;
|
||||
|
||||
|
@ -72,27 +72,27 @@ class ParCompactionManager : public CHeapObj {
|
|||
// ------------------------ End don't putback if not needed
|
||||
|
||||
private:
|
||||
static ParCompactionManager** _manager_array;
|
||||
static OopTaskQueueSet* _stack_array;
|
||||
static ObjectStartArray* _start_array;
|
||||
static ChunkTaskQueueSet* _chunk_array;
|
||||
static PSOldGen* _old_gen;
|
||||
static ParCompactionManager** _manager_array;
|
||||
static OopTaskQueueSet* _stack_array;
|
||||
static ObjectStartArray* _start_array;
|
||||
static RegionTaskQueueSet* _region_array;
|
||||
static PSOldGen* _old_gen;
|
||||
|
||||
OopTaskQueue _marking_stack;
|
||||
GrowableArray<oop>* _overflow_stack;
|
||||
OopTaskQueue _marking_stack;
|
||||
GrowableArray<oop>* _overflow_stack;
|
||||
// Is there a way to reuse the _marking_stack for the
|
||||
// saving empty chunks? For now just create a different
|
||||
// saving empty regions? For now just create a different
|
||||
// type of TaskQueue.
|
||||
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
ChunkTaskQueueWithOverflow _chunk_stack;
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
RegionTaskQueueWithOverflow _region_stack;
|
||||
#else
|
||||
ChunkTaskQueue _chunk_stack;
|
||||
GrowableArray<size_t>* _chunk_overflow_stack;
|
||||
RegionTaskQueue _region_stack;
|
||||
GrowableArray<size_t>* _region_overflow_stack;
|
||||
#endif
|
||||
|
||||
#if 1 // does this happen enough to need a per thread stack?
|
||||
GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
#endif
|
||||
static ParMarkBitMap* _mark_bitmap;
|
||||
|
||||
|
@ -100,21 +100,22 @@ class ParCompactionManager : public CHeapObj {
|
|||
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
static ObjectStartArray* start_array() { return _start_array; }
|
||||
static OopTaskQueueSet* stack_array() { return _stack_array; }
|
||||
static OopTaskQueueSet* stack_array() { return _stack_array; }
|
||||
|
||||
static void initialize(ParMarkBitMap* mbm);
|
||||
|
||||
protected:
|
||||
// Array of tasks. Needed by the ParallelTaskTerminator.
|
||||
static ChunkTaskQueueSet* chunk_array() { return _chunk_array; }
|
||||
|
||||
OopTaskQueue* marking_stack() { return &_marking_stack; }
|
||||
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
||||
ChunkTaskQueueWithOverflow* chunk_stack() { return &_chunk_stack; }
|
||||
static RegionTaskQueueSet* region_array() { return _region_array; }
|
||||
OopTaskQueue* marking_stack() { return &_marking_stack; }
|
||||
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
RegionTaskQueueWithOverflow* region_stack() { return &_region_stack; }
|
||||
#else
|
||||
ChunkTaskQueue* chunk_stack() { return &_chunk_stack; }
|
||||
GrowableArray<size_t>* chunk_overflow_stack() { return _chunk_overflow_stack; }
|
||||
RegionTaskQueue* region_stack() { return &_region_stack; }
|
||||
GrowableArray<size_t>* region_overflow_stack() {
|
||||
return _region_overflow_stack;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Pushes onto the marking stack. If the marking stack is full,
|
||||
|
@ -123,9 +124,9 @@ class ParCompactionManager : public CHeapObj {
|
|||
// Do not implement an equivalent stack_pop. Deal with the
|
||||
// marking stack and overflow stack directly.
|
||||
|
||||
// Pushes onto the chunk stack. If the chunk stack is full,
|
||||
// pushes onto the chunk overflow stack.
|
||||
void chunk_stack_push(size_t chunk_index);
|
||||
// Pushes onto the region stack. If the region stack is full,
|
||||
// pushes onto the region overflow stack.
|
||||
void region_stack_push(size_t region_index);
|
||||
public:
|
||||
|
||||
Action action() { return _action; }
|
||||
|
@ -160,10 +161,10 @@ class ParCompactionManager : public CHeapObj {
|
|||
// Get a oop for scanning. If returns null, no oop were found.
|
||||
oop retrieve_for_scanning();
|
||||
|
||||
// Save chunk for later processing. Must not fail.
|
||||
void save_for_processing(size_t chunk_index);
|
||||
// Get a chunk for processing. If returns null, no chunk were found.
|
||||
bool retrieve_for_processing(size_t& chunk_index);
|
||||
// Save region for later processing. Must not fail.
|
||||
void save_for_processing(size_t region_index);
|
||||
// Get a region for processing. If returns null, no region were found.
|
||||
bool retrieve_for_processing(size_t& region_index);
|
||||
|
||||
// Access function for compaction managers
|
||||
static ParCompactionManager* gc_thread_compaction_manager(int index);
|
||||
|
@ -172,18 +173,18 @@ class ParCompactionManager : public CHeapObj {
|
|||
return stack_array()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
static bool steal(int queue_num, int* seed, ChunkTask& t) {
|
||||
return chunk_array()->steal(queue_num, seed, t);
|
||||
static bool steal(int queue_num, int* seed, RegionTask& t) {
|
||||
return region_array()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
// Process tasks remaining on any stack
|
||||
void drain_marking_stacks(OopClosure *blk);
|
||||
|
||||
// Process tasks remaining on any stack
|
||||
void drain_chunk_stacks();
|
||||
void drain_region_stacks();
|
||||
|
||||
// Process tasks remaining on any stack
|
||||
void drain_chunk_overflow_stack();
|
||||
void drain_region_overflow_stack();
|
||||
|
||||
// Debugging support
|
||||
#ifdef ASSERT
|
||||
|
|
|
@ -35,9 +35,7 @@ void PSMarkSweep::initialize() {
|
|||
_ref_processor = new ReferenceProcessor(mr,
|
||||
true, // atomic_discovery
|
||||
false); // mt_discovery
|
||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||
}
|
||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||
}
|
||||
|
||||
// This method contains all heap specific policy for invoking mark sweep.
|
||||
|
@ -518,9 +516,6 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
|||
follow_stack();
|
||||
|
||||
// Process reference objects found during marking
|
||||
|
||||
// Skipping the reference processing for VerifyParallelOldWithMarkSweep
|
||||
// affects the marking (makes it different).
|
||||
{
|
||||
ReferencePolicy *soft_ref_policy;
|
||||
if (clear_all_softrefs) {
|
||||
|
|
|
@ -152,20 +152,15 @@ void PSMarkSweepDecorator::precompact() {
|
|||
oop(q)->forward_to(oop(compact_top));
|
||||
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
||||
} else {
|
||||
// Don't clear the mark since it's confuses parallel old
|
||||
// verification.
|
||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
||||
// if the object isn't moving we can just set the mark to the default
|
||||
// mark and handle it specially later on.
|
||||
oop(q)->init_mark();
|
||||
}
|
||||
// if the object isn't moving we can just set the mark to the default
|
||||
// mark and handle it specially later on.
|
||||
oop(q)->init_mark();
|
||||
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
||||
}
|
||||
|
||||
// Update object start array
|
||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
||||
if (start_array)
|
||||
start_array->allocate_block(compact_top);
|
||||
if (start_array) {
|
||||
start_array->allocate_block(compact_top);
|
||||
}
|
||||
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
|
||||
|
@ -219,19 +214,14 @@ void PSMarkSweepDecorator::precompact() {
|
|||
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
||||
} else {
|
||||
// if the object isn't moving we can just set the mark to the default
|
||||
// Don't clear the mark since it's confuses parallel old
|
||||
// verification.
|
||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
||||
// mark and handle it specially later on.
|
||||
oop(q)->init_mark();
|
||||
}
|
||||
// mark and handle it specially later on.
|
||||
oop(q)->init_mark();
|
||||
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
||||
}
|
||||
|
||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
||||
// Update object start array
|
||||
if (start_array)
|
||||
start_array->allocate_block(compact_top);
|
||||
// Update object start array
|
||||
if (start_array) {
|
||||
start_array->allocate_block(compact_top);
|
||||
}
|
||||
|
||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
|
||||
|
|
|
@ -152,9 +152,7 @@ void PSOldGen::precompact() {
|
|||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
|
||||
// Reset start array first.
|
||||
debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
|
||||
start_array()->reset();
|
||||
debug_only(})
|
||||
|
||||
object_mark_sweep()->precompact();
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -123,8 +123,6 @@ void PSPermGen::move_and_update(ParCompactionManager* cm) {
|
|||
|
||||
void PSPermGen::precompact() {
|
||||
// Reset start array first.
|
||||
debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
|
||||
_start_array.reset();
|
||||
debug_only(})
|
||||
object_mark_sweep()->precompact();
|
||||
}
|
||||
|
|
|
@ -50,7 +50,8 @@ class ImmutableSpace: public CHeapObj {
|
|||
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }
|
||||
|
||||
// Size computations. Sizes are in heapwords.
|
||||
size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
|
||||
size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
|
||||
virtual size_t capacity_in_words(Thread*) const { return capacity_in_words(); }
|
||||
|
||||
// Iteration.
|
||||
virtual void oop_iterate(OopClosure* cl);
|
||||
|
|
|
@ -23,13 +23,6 @@
|
|||
*/
|
||||
|
||||
inline void MarkSweep::mark_object(oop obj) {
|
||||
#ifndef SERIALGC
|
||||
if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) {
|
||||
assert(PSParallelCompact::mark_bitmap()->is_marked(obj),
|
||||
"Should be marked in the marking bitmap");
|
||||
}
|
||||
#endif // SERIALGC
|
||||
|
||||
// some marks may contain information we need to preserve so we store them away
|
||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||
markOop mark = obj->mark();
|
||||
|
|
|
@ -181,6 +181,25 @@ size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
|
|||
return lgrp_spaces()->at(i)->space()->free_in_bytes();
|
||||
}
|
||||
|
||||
|
||||
size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
|
||||
guarantee(thr != NULL, "No thread");
|
||||
int lgrp_id = thr->lgrp_id();
|
||||
if (lgrp_id == -1) {
|
||||
if (lgrp_spaces()->length() > 0) {
|
||||
return capacity_in_words() / lgrp_spaces()->length();
|
||||
} else {
|
||||
assert(false, "There should be at least one locality group");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
||||
if (i == -1) {
|
||||
return 0;
|
||||
}
|
||||
return lgrp_spaces()->at(i)->space()->capacity_in_words();
|
||||
}
|
||||
|
||||
// Check if the NUMA topology has changed. Add and remove spaces if needed.
|
||||
// The update can be forced by setting the force parameter equal to true.
|
||||
bool MutableNUMASpace::update_layout(bool force) {
|
||||
|
@ -722,7 +741,8 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
|
|||
i = os::random() % lgrp_spaces()->length();
|
||||
}
|
||||
|
||||
MutableSpace *s = lgrp_spaces()->at(i)->space();
|
||||
LGRPSpace* ls = lgrp_spaces()->at(i);
|
||||
MutableSpace *s = ls->space();
|
||||
HeapWord *p = s->allocate(size);
|
||||
|
||||
if (p != NULL) {
|
||||
|
@ -743,6 +763,9 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
|
|||
*(int*)i = 0;
|
||||
}
|
||||
}
|
||||
if (p == NULL) {
|
||||
ls->set_allocation_failed();
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -761,7 +784,8 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
|||
if (i == -1) {
|
||||
i = os::random() % lgrp_spaces()->length();
|
||||
}
|
||||
MutableSpace *s = lgrp_spaces()->at(i)->space();
|
||||
LGRPSpace *ls = lgrp_spaces()->at(i);
|
||||
MutableSpace *s = ls->space();
|
||||
HeapWord *p = s->cas_allocate(size);
|
||||
if (p != NULL) {
|
||||
size_t remainder = pointer_delta(s->end(), p + size);
|
||||
|
@ -790,6 +814,9 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
|||
*(int*)i = 0;
|
||||
}
|
||||
}
|
||||
if (p == NULL) {
|
||||
ls->set_allocation_failed();
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ class MutableNUMASpace : public MutableSpace {
|
|||
MutableSpace* _space;
|
||||
MemRegion _invalid_region;
|
||||
AdaptiveWeightedAverage *_alloc_rate;
|
||||
bool _allocation_failed;
|
||||
|
||||
struct SpaceStats {
|
||||
size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
|
||||
|
@ -81,7 +82,7 @@ class MutableNUMASpace : public MutableSpace {
|
|||
char* last_page_scanned() { return _last_page_scanned; }
|
||||
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
|
||||
public:
|
||||
LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL) {
|
||||
LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
|
||||
_space = new MutableSpace();
|
||||
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
|
||||
}
|
||||
|
@ -103,8 +104,21 @@ class MutableNUMASpace : public MutableSpace {
|
|||
return *(int*)lgrp_id_value == p->lgrp_id();
|
||||
}
|
||||
|
||||
// Report a failed allocation.
|
||||
void set_allocation_failed() { _allocation_failed = true; }
|
||||
|
||||
void sample() {
|
||||
alloc_rate()->sample(space()->used_in_bytes());
|
||||
// If there was a failed allocation make allocation rate equal
|
||||
// to the size of the whole chunk. This ensures the progress of
|
||||
// the adaptation process.
|
||||
size_t alloc_rate_sample;
|
||||
if (_allocation_failed) {
|
||||
alloc_rate_sample = space()->capacity_in_bytes();
|
||||
_allocation_failed = false;
|
||||
} else {
|
||||
alloc_rate_sample = space()->used_in_bytes();
|
||||
}
|
||||
alloc_rate()->sample(alloc_rate_sample);
|
||||
}
|
||||
|
||||
MemRegion invalid_region() const { return _invalid_region; }
|
||||
|
@ -190,6 +204,9 @@ class MutableNUMASpace : public MutableSpace {
|
|||
virtual void ensure_parsability();
|
||||
virtual size_t used_in_words() const;
|
||||
virtual size_t free_in_words() const;
|
||||
|
||||
using MutableSpace::capacity_in_words;
|
||||
virtual size_t capacity_in_words(Thread* thr) const;
|
||||
virtual size_t tlab_capacity(Thread* thr) const;
|
||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||
|
||||
|
|
|
@ -1157,10 +1157,6 @@ class CommandLineFlags {
|
|||
"In the Parallel Old garbage collector use parallel dense" \
|
||||
" prefix update") \
|
||||
\
|
||||
develop(bool, UseParallelOldGCChunkPointerCalc, true, \
|
||||
"In the Parallel Old garbage collector use chucks to calculate" \
|
||||
" new object locations") \
|
||||
\
|
||||
product(uintx, HeapMaximumCompactionInterval, 20, \
|
||||
"How often should we maximally compact the heap (not allowing " \
|
||||
"any dead space)") \
|
||||
|
@ -1189,21 +1185,14 @@ class CommandLineFlags {
|
|||
product(uintx, ParallelCMSThreads, 0, \
|
||||
"Max number of threads CMS will use for concurrent work") \
|
||||
\
|
||||
develop(bool, VerifyParallelOldWithMarkSweep, false, \
|
||||
"Use the MarkSweep code to verify phases of Parallel Old") \
|
||||
\
|
||||
develop(uintx, VerifyParallelOldWithMarkSweepInterval, 1, \
|
||||
"Interval at which the MarkSweep code is used to verify " \
|
||||
"phases of Parallel Old") \
|
||||
\
|
||||
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
|
||||
"Use the Parallel Old MT unsafe in marking the bitmap") \
|
||||
\
|
||||
develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \
|
||||
"Use the Parallel Old MT unsafe in update of live size") \
|
||||
\
|
||||
develop(bool, TraceChunkTasksQueuing, false, \
|
||||
"Trace the queuing of the chunk tasks") \
|
||||
develop(bool, TraceRegionTasksQueuing, false, \
|
||||
"Trace the queuing of the region tasks") \
|
||||
\
|
||||
product(uintx, ParallelMarkingThreads, 0, \
|
||||
"Number of marking threads concurrent gc will use") \
|
||||
|
|
|
@ -109,72 +109,72 @@ void ParallelTaskTerminator::reset_for_reuse() {
|
|||
}
|
||||
}
|
||||
|
||||
bool ChunkTaskQueueWithOverflow::is_empty() {
|
||||
return (_chunk_queue.size() == 0) &&
|
||||
bool RegionTaskQueueWithOverflow::is_empty() {
|
||||
return (_region_queue.size() == 0) &&
|
||||
(_overflow_stack->length() == 0);
|
||||
}
|
||||
|
||||
bool ChunkTaskQueueWithOverflow::stealable_is_empty() {
|
||||
return _chunk_queue.size() == 0;
|
||||
bool RegionTaskQueueWithOverflow::stealable_is_empty() {
|
||||
return _region_queue.size() == 0;
|
||||
}
|
||||
|
||||
bool ChunkTaskQueueWithOverflow::overflow_is_empty() {
|
||||
bool RegionTaskQueueWithOverflow::overflow_is_empty() {
|
||||
return _overflow_stack->length() == 0;
|
||||
}
|
||||
|
||||
void ChunkTaskQueueWithOverflow::initialize() {
|
||||
_chunk_queue.initialize();
|
||||
void RegionTaskQueueWithOverflow::initialize() {
|
||||
_region_queue.initialize();
|
||||
assert(_overflow_stack == 0, "Creating memory leak");
|
||||
_overflow_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<ChunkTask>(10, true);
|
||||
new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
|
||||
}
|
||||
|
||||
void ChunkTaskQueueWithOverflow::save(ChunkTask t) {
|
||||
if (TraceChunkTasksQueuing && Verbose) {
|
||||
void RegionTaskQueueWithOverflow::save(RegionTask t) {
|
||||
if (TraceRegionTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
|
||||
}
|
||||
if(!_chunk_queue.push(t)) {
|
||||
if(!_region_queue.push(t)) {
|
||||
_overflow_stack->push(t);
|
||||
}
|
||||
}
|
||||
|
||||
// Note that using this method will retrieve all chunks
|
||||
// Note that using this method will retrieve all regions
|
||||
// that have been saved but that it will always check
|
||||
// the overflow stack. It may be more efficient to
|
||||
// check the stealable queue and the overflow stack
|
||||
// separately.
|
||||
bool ChunkTaskQueueWithOverflow::retrieve(ChunkTask& chunk_task) {
|
||||
bool result = retrieve_from_overflow(chunk_task);
|
||||
bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
|
||||
bool result = retrieve_from_overflow(region_task);
|
||||
if (!result) {
|
||||
result = retrieve_from_stealable_queue(chunk_task);
|
||||
result = retrieve_from_stealable_queue(region_task);
|
||||
}
|
||||
if (TraceChunkTasksQueuing && Verbose && result) {
|
||||
if (TraceRegionTasksQueuing && Verbose && result) {
|
||||
gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool ChunkTaskQueueWithOverflow::retrieve_from_stealable_queue(
|
||||
ChunkTask& chunk_task) {
|
||||
bool result = _chunk_queue.pop_local(chunk_task);
|
||||
if (TraceChunkTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
|
||||
bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
|
||||
RegionTask& region_task) {
|
||||
bool result = _region_queue.pop_local(region_task);
|
||||
if (TraceRegionTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool ChunkTaskQueueWithOverflow::retrieve_from_overflow(
|
||||
ChunkTask& chunk_task) {
|
||||
bool
|
||||
RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
|
||||
bool result;
|
||||
if (!_overflow_stack->is_empty()) {
|
||||
chunk_task = _overflow_stack->pop();
|
||||
region_task = _overflow_stack->pop();
|
||||
result = true;
|
||||
} else {
|
||||
chunk_task = (ChunkTask) NULL;
|
||||
region_task = (RegionTask) NULL;
|
||||
result = false;
|
||||
}
|
||||
if (TraceChunkTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
|
||||
if (TraceRegionTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -557,32 +557,32 @@ class StarTask {
|
|||
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
||||
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
|
||||
|
||||
typedef size_t ChunkTask; // index for chunk
|
||||
typedef GenericTaskQueue<ChunkTask> ChunkTaskQueue;
|
||||
typedef GenericTaskQueueSet<ChunkTask> ChunkTaskQueueSet;
|
||||
typedef size_t RegionTask; // index for region
|
||||
typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
|
||||
typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet;
|
||||
|
||||
class ChunkTaskQueueWithOverflow: public CHeapObj {
|
||||
class RegionTaskQueueWithOverflow: public CHeapObj {
|
||||
protected:
|
||||
ChunkTaskQueue _chunk_queue;
|
||||
GrowableArray<ChunkTask>* _overflow_stack;
|
||||
RegionTaskQueue _region_queue;
|
||||
GrowableArray<RegionTask>* _overflow_stack;
|
||||
|
||||
public:
|
||||
ChunkTaskQueueWithOverflow() : _overflow_stack(NULL) {}
|
||||
RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
|
||||
// Initialize both stealable queue and overflow
|
||||
void initialize();
|
||||
// Save first to stealable queue and then to overflow
|
||||
void save(ChunkTask t);
|
||||
void save(RegionTask t);
|
||||
// Retrieve first from overflow and then from stealable queue
|
||||
bool retrieve(ChunkTask& chunk_index);
|
||||
bool retrieve(RegionTask& region_index);
|
||||
// Retrieve from stealable queue
|
||||
bool retrieve_from_stealable_queue(ChunkTask& chunk_index);
|
||||
bool retrieve_from_stealable_queue(RegionTask& region_index);
|
||||
// Retrieve from overflow
|
||||
bool retrieve_from_overflow(ChunkTask& chunk_index);
|
||||
bool retrieve_from_overflow(RegionTask& region_index);
|
||||
bool is_empty();
|
||||
bool stealable_is_empty();
|
||||
bool overflow_is_empty();
|
||||
juint stealable_size() { return _chunk_queue.size(); }
|
||||
ChunkTaskQueue* task_queue() { return &_chunk_queue; }
|
||||
juint stealable_size() { return _region_queue.size(); }
|
||||
RegionTaskQueue* task_queue() { return &_region_queue; }
|
||||
};
|
||||
|
||||
#define USE_ChunkTaskQueueWithOverflow
|
||||
#define USE_RegionTaskQueueWithOverflow
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue