mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-20 19:14:38 +02:00
Merge
This commit is contained in:
commit
c8006a68d5
19 changed files with 978 additions and 1827 deletions
|
@ -1350,11 +1350,7 @@ bool nmethod::can_unload(BoolObjectClosure* is_alive,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
assert(unloading_occurred, "Inconsistency in unloading");
|
||||||
// Cannot do this test if verification of the UseParallelOldGC
|
|
||||||
// code using the PSMarkSweep code is being done.
|
|
||||||
assert(unloading_occurred, "Inconsistency in unloading");
|
|
||||||
}
|
|
||||||
make_unloaded(is_alive, obj);
|
make_unloaded(is_alive, obj);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -210,10 +210,6 @@ void ParallelScavengeHeap::post_initialize() {
|
||||||
PSScavenge::initialize();
|
PSScavenge::initialize();
|
||||||
if (UseParallelOldGC) {
|
if (UseParallelOldGC) {
|
||||||
PSParallelCompact::post_initialize();
|
PSParallelCompact::post_initialize();
|
||||||
if (VerifyParallelOldWithMarkSweep) {
|
|
||||||
// Will be used for verification of par old.
|
|
||||||
PSMarkSweep::initialize();
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
PSMarkSweep::initialize();
|
PSMarkSweep::initialize();
|
||||||
}
|
}
|
||||||
|
@ -402,7 +398,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
if (!is_tlab &&
|
if (!is_tlab &&
|
||||||
size >= (young_gen()->eden_space()->capacity_in_words() / 2)) {
|
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
|
||||||
result = old_gen()->allocate(size, is_tlab);
|
result = old_gen()->allocate(size, is_tlab);
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -146,7 +146,7 @@ void RefProcTaskExecutor::execute(ProcessTask& task)
|
||||||
{
|
{
|
||||||
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
|
||||||
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
uint parallel_gc_threads = heap->gc_task_manager()->workers();
|
||||||
ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
|
RegionTaskQueueSet* qset = ParCompactionManager::region_array();
|
||||||
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
ParallelTaskTerminator terminator(parallel_gc_threads, qset);
|
||||||
GCTaskQueue* q = GCTaskQueue::create();
|
GCTaskQueue* q = GCTaskQueue::create();
|
||||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||||
|
@ -205,38 +205,38 @@ void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// StealChunkCompactionTask
|
// StealRegionCompactionTask
|
||||||
//
|
//
|
||||||
|
|
||||||
|
|
||||||
StealChunkCompactionTask::StealChunkCompactionTask(ParallelTaskTerminator* t) :
|
StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
|
||||||
_terminator(t) {};
|
_terminator(t) {}
|
||||||
|
|
||||||
void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
assert(Universe::heap()->is_gc_active(), "called outside gc");
|
||||||
|
|
||||||
NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask",
|
NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
|
||||||
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
|
||||||
|
|
||||||
ParCompactionManager* cm =
|
ParCompactionManager* cm =
|
||||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||||
|
|
||||||
// Has to drain stacks first because there may be chunks on
|
// Has to drain stacks first because there may be regions on
|
||||||
// preloaded onto the stack and this thread may never have
|
// preloaded onto the stack and this thread may never have
|
||||||
// done a draining task. Are the draining tasks needed?
|
// done a draining task. Are the draining tasks needed?
|
||||||
|
|
||||||
cm->drain_chunk_stacks();
|
cm->drain_region_stacks();
|
||||||
|
|
||||||
size_t chunk_index = 0;
|
size_t region_index = 0;
|
||||||
int random_seed = 17;
|
int random_seed = 17;
|
||||||
|
|
||||||
// If we're the termination task, try 10 rounds of stealing before
|
// If we're the termination task, try 10 rounds of stealing before
|
||||||
// setting the termination flag
|
// setting the termination flag
|
||||||
|
|
||||||
while(true) {
|
while(true) {
|
||||||
if (ParCompactionManager::steal(which, &random_seed, chunk_index)) {
|
if (ParCompactionManager::steal(which, &random_seed, region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(cm, chunk_index);
|
PSParallelCompact::fill_and_update_region(cm, region_index);
|
||||||
cm->drain_chunk_stacks();
|
cm->drain_region_stacks();
|
||||||
} else {
|
} else {
|
||||||
if (terminator()->offer_termination()) {
|
if (terminator()->offer_termination()) {
|
||||||
break;
|
break;
|
||||||
|
@ -249,11 +249,10 @@ void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
|
|
||||||
UpdateDensePrefixTask::UpdateDensePrefixTask(
|
UpdateDensePrefixTask::UpdateDensePrefixTask(
|
||||||
PSParallelCompact::SpaceId space_id,
|
PSParallelCompact::SpaceId space_id,
|
||||||
size_t chunk_index_start,
|
size_t region_index_start,
|
||||||
size_t chunk_index_end) :
|
size_t region_index_end) :
|
||||||
_space_id(space_id), _chunk_index_start(chunk_index_start),
|
_space_id(space_id), _region_index_start(region_index_start),
|
||||||
_chunk_index_end(chunk_index_end)
|
_region_index_end(region_index_end) {}
|
||||||
{}
|
|
||||||
|
|
||||||
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
|
|
||||||
|
@ -265,8 +264,8 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
|
|
||||||
PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
|
PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
|
||||||
_space_id,
|
_space_id,
|
||||||
_chunk_index_start,
|
_region_index_start,
|
||||||
_chunk_index_end);
|
_region_index_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
|
@ -278,6 +277,6 @@ void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
|
||||||
ParCompactionManager* cm =
|
ParCompactionManager* cm =
|
||||||
ParCompactionManager::gc_thread_compaction_manager(which);
|
ParCompactionManager::gc_thread_compaction_manager(which);
|
||||||
|
|
||||||
// Process any chunks already in the compaction managers stacks.
|
// Process any regions already in the compaction managers stacks.
|
||||||
cm->drain_chunk_stacks();
|
cm->drain_region_stacks();
|
||||||
}
|
}
|
||||||
|
|
|
@ -188,18 +188,18 @@ class StealMarkingTask : public GCTask {
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
// StealChunkCompactionTask
|
// StealRegionCompactionTask
|
||||||
//
|
//
|
||||||
// This task is used to distribute work to idle threads.
|
// This task is used to distribute work to idle threads.
|
||||||
//
|
//
|
||||||
|
|
||||||
class StealChunkCompactionTask : public GCTask {
|
class StealRegionCompactionTask : public GCTask {
|
||||||
private:
|
private:
|
||||||
ParallelTaskTerminator* const _terminator;
|
ParallelTaskTerminator* const _terminator;
|
||||||
public:
|
public:
|
||||||
StealChunkCompactionTask(ParallelTaskTerminator* t);
|
StealRegionCompactionTask(ParallelTaskTerminator* t);
|
||||||
|
|
||||||
char* name() { return (char *)"steal-chunk-task"; }
|
char* name() { return (char *)"steal-region-task"; }
|
||||||
ParallelTaskTerminator* terminator() { return _terminator; }
|
ParallelTaskTerminator* terminator() { return _terminator; }
|
||||||
|
|
||||||
virtual void do_it(GCTaskManager* manager, uint which);
|
virtual void do_it(GCTaskManager* manager, uint which);
|
||||||
|
@ -215,15 +215,15 @@ class StealChunkCompactionTask : public GCTask {
|
||||||
class UpdateDensePrefixTask : public GCTask {
|
class UpdateDensePrefixTask : public GCTask {
|
||||||
private:
|
private:
|
||||||
PSParallelCompact::SpaceId _space_id;
|
PSParallelCompact::SpaceId _space_id;
|
||||||
size_t _chunk_index_start;
|
size_t _region_index_start;
|
||||||
size_t _chunk_index_end;
|
size_t _region_index_end;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
char* name() { return (char *)"update-dense_prefix-task"; }
|
char* name() { return (char *)"update-dense_prefix-task"; }
|
||||||
|
|
||||||
UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
|
UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
|
||||||
size_t chunk_index_start,
|
size_t region_index_start,
|
||||||
size_t chunk_index_end);
|
size_t region_index_end);
|
||||||
|
|
||||||
virtual void do_it(GCTaskManager* manager, uint which);
|
virtual void do_it(GCTaskManager* manager, uint which);
|
||||||
};
|
};
|
||||||
|
@ -231,17 +231,17 @@ class UpdateDensePrefixTask : public GCTask {
|
||||||
//
|
//
|
||||||
// DrainStacksCompactionTask
|
// DrainStacksCompactionTask
|
||||||
//
|
//
|
||||||
// This task processes chunks that have been added to the stacks of each
|
// This task processes regions that have been added to the stacks of each
|
||||||
// compaction manager.
|
// compaction manager.
|
||||||
//
|
//
|
||||||
// Trying to use one draining thread does not work because there are no
|
// Trying to use one draining thread does not work because there are no
|
||||||
// guarantees about which task will be picked up by which thread. For example,
|
// guarantees about which task will be picked up by which thread. For example,
|
||||||
// if thread A gets all the preloaded chunks, thread A may not get a draining
|
// if thread A gets all the preloaded regions, thread A may not get a draining
|
||||||
// task (they may all be done by other threads).
|
// task (they may all be done by other threads).
|
||||||
//
|
//
|
||||||
|
|
||||||
class DrainStacksCompactionTask : public GCTask {
|
class DrainStacksCompactionTask : public GCTask {
|
||||||
public:
|
public:
|
||||||
char* name() { return (char *)"drain-chunk-task"; }
|
char* name() { return (char *)"drain-region-task"; }
|
||||||
virtual void do_it(GCTaskManager* manager, uint which);
|
virtual void do_it(GCTaskManager* manager, uint which);
|
||||||
};
|
};
|
||||||
|
|
|
@ -30,7 +30,7 @@ ParCompactionManager** ParCompactionManager::_manager_array = NULL;
|
||||||
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
|
OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
|
||||||
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
||||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
||||||
ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL;
|
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
|
||||||
|
|
||||||
ParCompactionManager::ParCompactionManager() :
|
ParCompactionManager::ParCompactionManager() :
|
||||||
_action(CopyAndUpdate) {
|
_action(CopyAndUpdate) {
|
||||||
|
@ -46,13 +46,13 @@ ParCompactionManager::ParCompactionManager() :
|
||||||
|
|
||||||
// We want the overflow stack to be permanent
|
// We want the overflow stack to be permanent
|
||||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
chunk_stack()->initialize();
|
region_stack()->initialize();
|
||||||
#else
|
#else
|
||||||
chunk_stack()->initialize();
|
region_stack()->initialize();
|
||||||
|
|
||||||
// We want the overflow stack to be permanent
|
// We want the overflow stack to be permanent
|
||||||
_chunk_overflow_stack =
|
_region_overflow_stack =
|
||||||
new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
|
new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -86,18 +86,18 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||||
|
|
||||||
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
|
_stack_array = new OopTaskQueueSet(parallel_gc_threads);
|
||||||
guarantee(_stack_array != NULL, "Count not initialize promotion manager");
|
guarantee(_stack_array != NULL, "Count not initialize promotion manager");
|
||||||
_chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
|
_region_array = new RegionTaskQueueSet(parallel_gc_threads);
|
||||||
guarantee(_chunk_array != NULL, "Count not initialize promotion manager");
|
guarantee(_region_array != NULL, "Count not initialize promotion manager");
|
||||||
|
|
||||||
// Create and register the ParCompactionManager(s) for the worker threads.
|
// Create and register the ParCompactionManager(s) for the worker threads.
|
||||||
for(uint i=0; i<parallel_gc_threads; i++) {
|
for(uint i=0; i<parallel_gc_threads; i++) {
|
||||||
_manager_array[i] = new ParCompactionManager();
|
_manager_array[i] = new ParCompactionManager();
|
||||||
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
||||||
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
|
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
|
||||||
#else
|
#else
|
||||||
chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
|
region_array()->register_queue(i, _manager_array[i]->region_stack());
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,31 +153,31 @@ oop ParCompactionManager::retrieve_for_scanning() {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save chunk on a stack
|
// Save region on a stack
|
||||||
void ParCompactionManager::save_for_processing(size_t chunk_index) {
|
void ParCompactionManager::save_for_processing(size_t region_index) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
||||||
ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index);
|
ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
|
||||||
assert(chunk_ptr->claimed(), "must be claimed");
|
assert(region_ptr->claimed(), "must be claimed");
|
||||||
assert(chunk_ptr->_pushed++ == 0, "should only be pushed once");
|
assert(region_ptr->_pushed++ == 0, "should only be pushed once");
|
||||||
#endif
|
#endif
|
||||||
chunk_stack_push(chunk_index);
|
region_stack_push(region_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::chunk_stack_push(size_t chunk_index) {
|
void ParCompactionManager::region_stack_push(size_t region_index) {
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
chunk_stack()->save(chunk_index);
|
region_stack()->save(region_index);
|
||||||
#else
|
#else
|
||||||
if(!chunk_stack()->push(chunk_index)) {
|
if(!region_stack()->push(region_index)) {
|
||||||
chunk_overflow_stack()->push(chunk_index);
|
region_overflow_stack()->push(region_index);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
|
bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
return chunk_stack()->retrieve(chunk_index);
|
return region_stack()->retrieve(region_index);
|
||||||
#else
|
#else
|
||||||
// Should not be used in the parallel case
|
// Should not be used in the parallel case
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
|
@ -230,14 +230,14 @@ void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
|
||||||
assert(overflow_stack()->length() == 0, "Sanity");
|
assert(overflow_stack()->length() == 0, "Sanity");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::drain_chunk_overflow_stack() {
|
void ParCompactionManager::drain_region_overflow_stack() {
|
||||||
size_t chunk_index = (size_t) -1;
|
size_t region_index = (size_t) -1;
|
||||||
while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
|
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ParCompactionManager::drain_chunk_stacks() {
|
void ParCompactionManager::drain_region_stacks() {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||||
|
@ -249,42 +249,42 @@ void ParCompactionManager::drain_chunk_stacks() {
|
||||||
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
|
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
|
||||||
do {
|
do {
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
// Drain overflow stack first, so other threads can steal from
|
// Drain overflow stack first, so other threads can steal from
|
||||||
// claimed stack while we work.
|
// claimed stack while we work.
|
||||||
size_t chunk_index = (size_t) -1;
|
size_t region_index = (size_t) -1;
|
||||||
while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
|
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) {
|
while (region_stack()->retrieve_from_stealable_queue(region_index)) {
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
} while (!chunk_stack()->is_empty());
|
} while (!region_stack()->is_empty());
|
||||||
#else
|
#else
|
||||||
// Drain overflow stack first, so other threads can steal from
|
// Drain overflow stack first, so other threads can steal from
|
||||||
// claimed stack while we work.
|
// claimed stack while we work.
|
||||||
while(!chunk_overflow_stack()->is_empty()) {
|
while(!region_overflow_stack()->is_empty()) {
|
||||||
size_t chunk_index = chunk_overflow_stack()->pop();
|
size_t region_index = region_overflow_stack()->pop();
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t chunk_index = -1;
|
size_t region_index = -1;
|
||||||
// obj is a reference!!!
|
// obj is a reference!!!
|
||||||
while (chunk_stack()->pop_local(chunk_index)) {
|
while (region_stack()->pop_local(region_index)) {
|
||||||
// It would be nice to assert about the type of objects we might
|
// It would be nice to assert about the type of objects we might
|
||||||
// pop, but they can come from anywhere, unfortunately.
|
// pop, but they can come from anywhere, unfortunately.
|
||||||
PSParallelCompact::fill_and_update_chunk(this, chunk_index);
|
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||||
}
|
}
|
||||||
} while((chunk_stack()->size() != 0) ||
|
} while((region_stack()->size() != 0) ||
|
||||||
(chunk_overflow_stack()->length() != 0));
|
(region_overflow_stack()->length() != 0));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
assert(chunk_stack()->is_empty(), "Sanity");
|
assert(region_stack()->is_empty(), "Sanity");
|
||||||
#else
|
#else
|
||||||
assert(chunk_stack()->size() == 0, "Sanity");
|
assert(region_stack()->size() == 0, "Sanity");
|
||||||
assert(chunk_overflow_stack()->length() == 0, "Sanity");
|
assert(region_overflow_stack()->length() == 0, "Sanity");
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
oop obj;
|
oop obj;
|
||||||
|
|
|
@ -52,7 +52,7 @@ class ParCompactionManager : public CHeapObj {
|
||||||
friend class ParallelTaskTerminator;
|
friend class ParallelTaskTerminator;
|
||||||
friend class ParMarkBitMap;
|
friend class ParMarkBitMap;
|
||||||
friend class PSParallelCompact;
|
friend class PSParallelCompact;
|
||||||
friend class StealChunkCompactionTask;
|
friend class StealRegionCompactionTask;
|
||||||
friend class UpdateAndFillClosure;
|
friend class UpdateAndFillClosure;
|
||||||
friend class RefProcTaskExecutor;
|
friend class RefProcTaskExecutor;
|
||||||
|
|
||||||
|
@ -72,27 +72,27 @@ class ParCompactionManager : public CHeapObj {
|
||||||
// ------------------------ End don't putback if not needed
|
// ------------------------ End don't putback if not needed
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static ParCompactionManager** _manager_array;
|
static ParCompactionManager** _manager_array;
|
||||||
static OopTaskQueueSet* _stack_array;
|
static OopTaskQueueSet* _stack_array;
|
||||||
static ObjectStartArray* _start_array;
|
static ObjectStartArray* _start_array;
|
||||||
static ChunkTaskQueueSet* _chunk_array;
|
static RegionTaskQueueSet* _region_array;
|
||||||
static PSOldGen* _old_gen;
|
static PSOldGen* _old_gen;
|
||||||
|
|
||||||
OopTaskQueue _marking_stack;
|
OopTaskQueue _marking_stack;
|
||||||
GrowableArray<oop>* _overflow_stack;
|
GrowableArray<oop>* _overflow_stack;
|
||||||
// Is there a way to reuse the _marking_stack for the
|
// Is there a way to reuse the _marking_stack for the
|
||||||
// saving empty chunks? For now just create a different
|
// saving empty regions? For now just create a different
|
||||||
// type of TaskQueue.
|
// type of TaskQueue.
|
||||||
|
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
ChunkTaskQueueWithOverflow _chunk_stack;
|
RegionTaskQueueWithOverflow _region_stack;
|
||||||
#else
|
#else
|
||||||
ChunkTaskQueue _chunk_stack;
|
RegionTaskQueue _region_stack;
|
||||||
GrowableArray<size_t>* _chunk_overflow_stack;
|
GrowableArray<size_t>* _region_overflow_stack;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if 1 // does this happen enough to need a per thread stack?
|
#if 1 // does this happen enough to need a per thread stack?
|
||||||
GrowableArray<Klass*>* _revisit_klass_stack;
|
GrowableArray<Klass*>* _revisit_klass_stack;
|
||||||
#endif
|
#endif
|
||||||
static ParMarkBitMap* _mark_bitmap;
|
static ParMarkBitMap* _mark_bitmap;
|
||||||
|
|
||||||
|
@ -100,21 +100,22 @@ class ParCompactionManager : public CHeapObj {
|
||||||
|
|
||||||
static PSOldGen* old_gen() { return _old_gen; }
|
static PSOldGen* old_gen() { return _old_gen; }
|
||||||
static ObjectStartArray* start_array() { return _start_array; }
|
static ObjectStartArray* start_array() { return _start_array; }
|
||||||
static OopTaskQueueSet* stack_array() { return _stack_array; }
|
static OopTaskQueueSet* stack_array() { return _stack_array; }
|
||||||
|
|
||||||
static void initialize(ParMarkBitMap* mbm);
|
static void initialize(ParMarkBitMap* mbm);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Array of tasks. Needed by the ParallelTaskTerminator.
|
// Array of tasks. Needed by the ParallelTaskTerminator.
|
||||||
static ChunkTaskQueueSet* chunk_array() { return _chunk_array; }
|
static RegionTaskQueueSet* region_array() { return _region_array; }
|
||||||
|
OopTaskQueue* marking_stack() { return &_marking_stack; }
|
||||||
OopTaskQueue* marking_stack() { return &_marking_stack; }
|
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
||||||
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
#ifdef USE_RegionTaskQueueWithOverflow
|
||||||
#ifdef USE_ChunkTaskQueueWithOverflow
|
RegionTaskQueueWithOverflow* region_stack() { return &_region_stack; }
|
||||||
ChunkTaskQueueWithOverflow* chunk_stack() { return &_chunk_stack; }
|
|
||||||
#else
|
#else
|
||||||
ChunkTaskQueue* chunk_stack() { return &_chunk_stack; }
|
RegionTaskQueue* region_stack() { return &_region_stack; }
|
||||||
GrowableArray<size_t>* chunk_overflow_stack() { return _chunk_overflow_stack; }
|
GrowableArray<size_t>* region_overflow_stack() {
|
||||||
|
return _region_overflow_stack;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Pushes onto the marking stack. If the marking stack is full,
|
// Pushes onto the marking stack. If the marking stack is full,
|
||||||
|
@ -123,9 +124,9 @@ class ParCompactionManager : public CHeapObj {
|
||||||
// Do not implement an equivalent stack_pop. Deal with the
|
// Do not implement an equivalent stack_pop. Deal with the
|
||||||
// marking stack and overflow stack directly.
|
// marking stack and overflow stack directly.
|
||||||
|
|
||||||
// Pushes onto the chunk stack. If the chunk stack is full,
|
// Pushes onto the region stack. If the region stack is full,
|
||||||
// pushes onto the chunk overflow stack.
|
// pushes onto the region overflow stack.
|
||||||
void chunk_stack_push(size_t chunk_index);
|
void region_stack_push(size_t region_index);
|
||||||
public:
|
public:
|
||||||
|
|
||||||
Action action() { return _action; }
|
Action action() { return _action; }
|
||||||
|
@ -160,10 +161,10 @@ class ParCompactionManager : public CHeapObj {
|
||||||
// Get a oop for scanning. If returns null, no oop were found.
|
// Get a oop for scanning. If returns null, no oop were found.
|
||||||
oop retrieve_for_scanning();
|
oop retrieve_for_scanning();
|
||||||
|
|
||||||
// Save chunk for later processing. Must not fail.
|
// Save region for later processing. Must not fail.
|
||||||
void save_for_processing(size_t chunk_index);
|
void save_for_processing(size_t region_index);
|
||||||
// Get a chunk for processing. If returns null, no chunk were found.
|
// Get a region for processing. If returns null, no region were found.
|
||||||
bool retrieve_for_processing(size_t& chunk_index);
|
bool retrieve_for_processing(size_t& region_index);
|
||||||
|
|
||||||
// Access function for compaction managers
|
// Access function for compaction managers
|
||||||
static ParCompactionManager* gc_thread_compaction_manager(int index);
|
static ParCompactionManager* gc_thread_compaction_manager(int index);
|
||||||
|
@ -172,18 +173,18 @@ class ParCompactionManager : public CHeapObj {
|
||||||
return stack_array()->steal(queue_num, seed, t);
|
return stack_array()->steal(queue_num, seed, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool steal(int queue_num, int* seed, ChunkTask& t) {
|
static bool steal(int queue_num, int* seed, RegionTask& t) {
|
||||||
return chunk_array()->steal(queue_num, seed, t);
|
return region_array()->steal(queue_num, seed, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process tasks remaining on any stack
|
// Process tasks remaining on any stack
|
||||||
void drain_marking_stacks(OopClosure *blk);
|
void drain_marking_stacks(OopClosure *blk);
|
||||||
|
|
||||||
// Process tasks remaining on any stack
|
// Process tasks remaining on any stack
|
||||||
void drain_chunk_stacks();
|
void drain_region_stacks();
|
||||||
|
|
||||||
// Process tasks remaining on any stack
|
// Process tasks remaining on any stack
|
||||||
void drain_chunk_overflow_stack();
|
void drain_region_overflow_stack();
|
||||||
|
|
||||||
// Debugging support
|
// Debugging support
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
|
|
|
@ -35,9 +35,7 @@ void PSMarkSweep::initialize() {
|
||||||
_ref_processor = new ReferenceProcessor(mr,
|
_ref_processor = new ReferenceProcessor(mr,
|
||||||
true, // atomic_discovery
|
true, // atomic_discovery
|
||||||
false); // mt_discovery
|
false); // mt_discovery
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
_counters = new CollectorCounters("PSMarkSweep", 1);
|
||||||
_counters = new CollectorCounters("PSMarkSweep", 1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This method contains all heap specific policy for invoking mark sweep.
|
// This method contains all heap specific policy for invoking mark sweep.
|
||||||
|
@ -518,9 +516,6 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||||
follow_stack();
|
follow_stack();
|
||||||
|
|
||||||
// Process reference objects found during marking
|
// Process reference objects found during marking
|
||||||
|
|
||||||
// Skipping the reference processing for VerifyParallelOldWithMarkSweep
|
|
||||||
// affects the marking (makes it different).
|
|
||||||
{
|
{
|
||||||
ReferencePolicy *soft_ref_policy;
|
ReferencePolicy *soft_ref_policy;
|
||||||
if (clear_all_softrefs) {
|
if (clear_all_softrefs) {
|
||||||
|
|
|
@ -152,20 +152,15 @@ void PSMarkSweepDecorator::precompact() {
|
||||||
oop(q)->forward_to(oop(compact_top));
|
oop(q)->forward_to(oop(compact_top));
|
||||||
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
||||||
} else {
|
} else {
|
||||||
// Don't clear the mark since it's confuses parallel old
|
// if the object isn't moving we can just set the mark to the default
|
||||||
// verification.
|
// mark and handle it specially later on.
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
oop(q)->init_mark();
|
||||||
// if the object isn't moving we can just set the mark to the default
|
|
||||||
// mark and handle it specially later on.
|
|
||||||
oop(q)->init_mark();
|
|
||||||
}
|
|
||||||
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update object start array
|
// Update object start array
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
if (start_array) {
|
||||||
if (start_array)
|
start_array->allocate_block(compact_top);
|
||||||
start_array->allocate_block(compact_top);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
|
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), size));
|
||||||
|
@ -219,19 +214,14 @@ void PSMarkSweepDecorator::precompact() {
|
||||||
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
|
||||||
} else {
|
} else {
|
||||||
// if the object isn't moving we can just set the mark to the default
|
// if the object isn't moving we can just set the mark to the default
|
||||||
// Don't clear the mark since it's confuses parallel old
|
// mark and handle it specially later on.
|
||||||
// verification.
|
oop(q)->init_mark();
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
|
||||||
// mark and handle it specially later on.
|
|
||||||
oop(q)->init_mark();
|
|
||||||
}
|
|
||||||
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
|
// Update object start array
|
||||||
// Update object start array
|
if (start_array) {
|
||||||
if (start_array)
|
start_array->allocate_block(compact_top);
|
||||||
start_array->allocate_block(compact_top);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
|
VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(oop(q), sz));
|
||||||
|
|
|
@ -152,9 +152,7 @@ void PSOldGen::precompact() {
|
||||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||||
|
|
||||||
// Reset start array first.
|
// Reset start array first.
|
||||||
debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
|
|
||||||
start_array()->reset();
|
start_array()->reset();
|
||||||
debug_only(})
|
|
||||||
|
|
||||||
object_mark_sweep()->precompact();
|
object_mark_sweep()->precompact();
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -123,8 +123,6 @@ void PSPermGen::move_and_update(ParCompactionManager* cm) {
|
||||||
|
|
||||||
void PSPermGen::precompact() {
|
void PSPermGen::precompact() {
|
||||||
// Reset start array first.
|
// Reset start array first.
|
||||||
debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
|
|
||||||
_start_array.reset();
|
_start_array.reset();
|
||||||
debug_only(})
|
|
||||||
object_mark_sweep()->precompact();
|
object_mark_sweep()->precompact();
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,8 @@ class ImmutableSpace: public CHeapObj {
|
||||||
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }
|
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }
|
||||||
|
|
||||||
// Size computations. Sizes are in heapwords.
|
// Size computations. Sizes are in heapwords.
|
||||||
size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
|
size_t capacity_in_words() const { return pointer_delta(end(), bottom()); }
|
||||||
|
virtual size_t capacity_in_words(Thread*) const { return capacity_in_words(); }
|
||||||
|
|
||||||
// Iteration.
|
// Iteration.
|
||||||
virtual void oop_iterate(OopClosure* cl);
|
virtual void oop_iterate(OopClosure* cl);
|
||||||
|
|
|
@ -23,13 +23,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
inline void MarkSweep::mark_object(oop obj) {
|
inline void MarkSweep::mark_object(oop obj) {
|
||||||
#ifndef SERIALGC
|
|
||||||
if (UseParallelOldGC && VerifyParallelOldWithMarkSweep) {
|
|
||||||
assert(PSParallelCompact::mark_bitmap()->is_marked(obj),
|
|
||||||
"Should be marked in the marking bitmap");
|
|
||||||
}
|
|
||||||
#endif // SERIALGC
|
|
||||||
|
|
||||||
// some marks may contain information we need to preserve so we store them away
|
// some marks may contain information we need to preserve so we store them away
|
||||||
// and overwrite the mark. We'll restore it at the end of markSweep.
|
// and overwrite the mark. We'll restore it at the end of markSweep.
|
||||||
markOop mark = obj->mark();
|
markOop mark = obj->mark();
|
||||||
|
|
|
@ -181,6 +181,25 @@ size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
|
||||||
return lgrp_spaces()->at(i)->space()->free_in_bytes();
|
return lgrp_spaces()->at(i)->space()->free_in_bytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
|
||||||
|
guarantee(thr != NULL, "No thread");
|
||||||
|
int lgrp_id = thr->lgrp_id();
|
||||||
|
if (lgrp_id == -1) {
|
||||||
|
if (lgrp_spaces()->length() > 0) {
|
||||||
|
return capacity_in_words() / lgrp_spaces()->length();
|
||||||
|
} else {
|
||||||
|
assert(false, "There should be at least one locality group");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
|
||||||
|
if (i == -1) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return lgrp_spaces()->at(i)->space()->capacity_in_words();
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the NUMA topology has changed. Add and remove spaces if needed.
|
// Check if the NUMA topology has changed. Add and remove spaces if needed.
|
||||||
// The update can be forced by setting the force parameter equal to true.
|
// The update can be forced by setting the force parameter equal to true.
|
||||||
bool MutableNUMASpace::update_layout(bool force) {
|
bool MutableNUMASpace::update_layout(bool force) {
|
||||||
|
@ -722,7 +741,8 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
|
||||||
i = os::random() % lgrp_spaces()->length();
|
i = os::random() % lgrp_spaces()->length();
|
||||||
}
|
}
|
||||||
|
|
||||||
MutableSpace *s = lgrp_spaces()->at(i)->space();
|
LGRPSpace* ls = lgrp_spaces()->at(i);
|
||||||
|
MutableSpace *s = ls->space();
|
||||||
HeapWord *p = s->allocate(size);
|
HeapWord *p = s->allocate(size);
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
|
@ -743,6 +763,9 @@ HeapWord* MutableNUMASpace::allocate(size_t size) {
|
||||||
*(int*)i = 0;
|
*(int*)i = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (p == NULL) {
|
||||||
|
ls->set_allocation_failed();
|
||||||
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -761,7 +784,8 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
||||||
if (i == -1) {
|
if (i == -1) {
|
||||||
i = os::random() % lgrp_spaces()->length();
|
i = os::random() % lgrp_spaces()->length();
|
||||||
}
|
}
|
||||||
MutableSpace *s = lgrp_spaces()->at(i)->space();
|
LGRPSpace *ls = lgrp_spaces()->at(i);
|
||||||
|
MutableSpace *s = ls->space();
|
||||||
HeapWord *p = s->cas_allocate(size);
|
HeapWord *p = s->cas_allocate(size);
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
size_t remainder = pointer_delta(s->end(), p + size);
|
size_t remainder = pointer_delta(s->end(), p + size);
|
||||||
|
@ -790,6 +814,9 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
|
||||||
*(int*)i = 0;
|
*(int*)i = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (p == NULL) {
|
||||||
|
ls->set_allocation_failed();
|
||||||
|
}
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,6 +60,7 @@ class MutableNUMASpace : public MutableSpace {
|
||||||
MutableSpace* _space;
|
MutableSpace* _space;
|
||||||
MemRegion _invalid_region;
|
MemRegion _invalid_region;
|
||||||
AdaptiveWeightedAverage *_alloc_rate;
|
AdaptiveWeightedAverage *_alloc_rate;
|
||||||
|
bool _allocation_failed;
|
||||||
|
|
||||||
struct SpaceStats {
|
struct SpaceStats {
|
||||||
size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
|
size_t _local_space, _remote_space, _unbiased_space, _uncommited_space;
|
||||||
|
@ -81,7 +82,7 @@ class MutableNUMASpace : public MutableSpace {
|
||||||
char* last_page_scanned() { return _last_page_scanned; }
|
char* last_page_scanned() { return _last_page_scanned; }
|
||||||
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
|
void set_last_page_scanned(char* p) { _last_page_scanned = p; }
|
||||||
public:
|
public:
|
||||||
LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL) {
|
LGRPSpace(int l) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
|
||||||
_space = new MutableSpace();
|
_space = new MutableSpace();
|
||||||
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
|
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
|
||||||
}
|
}
|
||||||
|
@ -103,8 +104,21 @@ class MutableNUMASpace : public MutableSpace {
|
||||||
return *(int*)lgrp_id_value == p->lgrp_id();
|
return *(int*)lgrp_id_value == p->lgrp_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Report a failed allocation.
|
||||||
|
void set_allocation_failed() { _allocation_failed = true; }
|
||||||
|
|
||||||
void sample() {
|
void sample() {
|
||||||
alloc_rate()->sample(space()->used_in_bytes());
|
// If there was a failed allocation make allocation rate equal
|
||||||
|
// to the size of the whole chunk. This ensures the progress of
|
||||||
|
// the adaptation process.
|
||||||
|
size_t alloc_rate_sample;
|
||||||
|
if (_allocation_failed) {
|
||||||
|
alloc_rate_sample = space()->capacity_in_bytes();
|
||||||
|
_allocation_failed = false;
|
||||||
|
} else {
|
||||||
|
alloc_rate_sample = space()->used_in_bytes();
|
||||||
|
}
|
||||||
|
alloc_rate()->sample(alloc_rate_sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemRegion invalid_region() const { return _invalid_region; }
|
MemRegion invalid_region() const { return _invalid_region; }
|
||||||
|
@ -190,6 +204,9 @@ class MutableNUMASpace : public MutableSpace {
|
||||||
virtual void ensure_parsability();
|
virtual void ensure_parsability();
|
||||||
virtual size_t used_in_words() const;
|
virtual size_t used_in_words() const;
|
||||||
virtual size_t free_in_words() const;
|
virtual size_t free_in_words() const;
|
||||||
|
|
||||||
|
using MutableSpace::capacity_in_words;
|
||||||
|
virtual size_t capacity_in_words(Thread* thr) const;
|
||||||
virtual size_t tlab_capacity(Thread* thr) const;
|
virtual size_t tlab_capacity(Thread* thr) const;
|
||||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
|
||||||
|
|
||||||
|
|
|
@ -1157,10 +1157,6 @@ class CommandLineFlags {
|
||||||
"In the Parallel Old garbage collector use parallel dense" \
|
"In the Parallel Old garbage collector use parallel dense" \
|
||||||
" prefix update") \
|
" prefix update") \
|
||||||
\
|
\
|
||||||
develop(bool, UseParallelOldGCChunkPointerCalc, true, \
|
|
||||||
"In the Parallel Old garbage collector use chucks to calculate" \
|
|
||||||
" new object locations") \
|
|
||||||
\
|
|
||||||
product(uintx, HeapMaximumCompactionInterval, 20, \
|
product(uintx, HeapMaximumCompactionInterval, 20, \
|
||||||
"How often should we maximally compact the heap (not allowing " \
|
"How often should we maximally compact the heap (not allowing " \
|
||||||
"any dead space)") \
|
"any dead space)") \
|
||||||
|
@ -1189,21 +1185,14 @@ class CommandLineFlags {
|
||||||
product(uintx, ParallelCMSThreads, 0, \
|
product(uintx, ParallelCMSThreads, 0, \
|
||||||
"Max number of threads CMS will use for concurrent work") \
|
"Max number of threads CMS will use for concurrent work") \
|
||||||
\
|
\
|
||||||
develop(bool, VerifyParallelOldWithMarkSweep, false, \
|
|
||||||
"Use the MarkSweep code to verify phases of Parallel Old") \
|
|
||||||
\
|
|
||||||
develop(uintx, VerifyParallelOldWithMarkSweepInterval, 1, \
|
|
||||||
"Interval at which the MarkSweep code is used to verify " \
|
|
||||||
"phases of Parallel Old") \
|
|
||||||
\
|
|
||||||
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
|
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
|
||||||
"Use the Parallel Old MT unsafe in marking the bitmap") \
|
"Use the Parallel Old MT unsafe in marking the bitmap") \
|
||||||
\
|
\
|
||||||
develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \
|
develop(bool, ParallelOldMTUnsafeUpdateLiveData, false, \
|
||||||
"Use the Parallel Old MT unsafe in update of live size") \
|
"Use the Parallel Old MT unsafe in update of live size") \
|
||||||
\
|
\
|
||||||
develop(bool, TraceChunkTasksQueuing, false, \
|
develop(bool, TraceRegionTasksQueuing, false, \
|
||||||
"Trace the queuing of the chunk tasks") \
|
"Trace the queuing of the region tasks") \
|
||||||
\
|
\
|
||||||
product(uintx, ParallelMarkingThreads, 0, \
|
product(uintx, ParallelMarkingThreads, 0, \
|
||||||
"Number of marking threads concurrent gc will use") \
|
"Number of marking threads concurrent gc will use") \
|
||||||
|
|
|
@ -109,72 +109,72 @@ void ParallelTaskTerminator::reset_for_reuse() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::is_empty() {
|
bool RegionTaskQueueWithOverflow::is_empty() {
|
||||||
return (_chunk_queue.size() == 0) &&
|
return (_region_queue.size() == 0) &&
|
||||||
(_overflow_stack->length() == 0);
|
(_overflow_stack->length() == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::stealable_is_empty() {
|
bool RegionTaskQueueWithOverflow::stealable_is_empty() {
|
||||||
return _chunk_queue.size() == 0;
|
return _region_queue.size() == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::overflow_is_empty() {
|
bool RegionTaskQueueWithOverflow::overflow_is_empty() {
|
||||||
return _overflow_stack->length() == 0;
|
return _overflow_stack->length() == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChunkTaskQueueWithOverflow::initialize() {
|
void RegionTaskQueueWithOverflow::initialize() {
|
||||||
_chunk_queue.initialize();
|
_region_queue.initialize();
|
||||||
assert(_overflow_stack == 0, "Creating memory leak");
|
assert(_overflow_stack == 0, "Creating memory leak");
|
||||||
_overflow_stack =
|
_overflow_stack =
|
||||||
new (ResourceObj::C_HEAP) GrowableArray<ChunkTask>(10, true);
|
new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChunkTaskQueueWithOverflow::save(ChunkTask t) {
|
void RegionTaskQueueWithOverflow::save(RegionTask t) {
|
||||||
if (TraceChunkTasksQueuing && Verbose) {
|
if (TraceRegionTasksQueuing && Verbose) {
|
||||||
gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
|
gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
|
||||||
}
|
}
|
||||||
if(!_chunk_queue.push(t)) {
|
if(!_region_queue.push(t)) {
|
||||||
_overflow_stack->push(t);
|
_overflow_stack->push(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note that using this method will retrieve all chunks
|
// Note that using this method will retrieve all regions
|
||||||
// that have been saved but that it will always check
|
// that have been saved but that it will always check
|
||||||
// the overflow stack. It may be more efficient to
|
// the overflow stack. It may be more efficient to
|
||||||
// check the stealable queue and the overflow stack
|
// check the stealable queue and the overflow stack
|
||||||
// separately.
|
// separately.
|
||||||
bool ChunkTaskQueueWithOverflow::retrieve(ChunkTask& chunk_task) {
|
bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
|
||||||
bool result = retrieve_from_overflow(chunk_task);
|
bool result = retrieve_from_overflow(region_task);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
result = retrieve_from_stealable_queue(chunk_task);
|
result = retrieve_from_stealable_queue(region_task);
|
||||||
}
|
}
|
||||||
if (TraceChunkTasksQueuing && Verbose && result) {
|
if (TraceRegionTasksQueuing && Verbose && result) {
|
||||||
gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result);
|
gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::retrieve_from_stealable_queue(
|
bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
|
||||||
ChunkTask& chunk_task) {
|
RegionTask& region_task) {
|
||||||
bool result = _chunk_queue.pop_local(chunk_task);
|
bool result = _region_queue.pop_local(region_task);
|
||||||
if (TraceChunkTasksQueuing && Verbose) {
|
if (TraceRegionTasksQueuing && Verbose) {
|
||||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
|
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ChunkTaskQueueWithOverflow::retrieve_from_overflow(
|
bool
|
||||||
ChunkTask& chunk_task) {
|
RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
|
||||||
bool result;
|
bool result;
|
||||||
if (!_overflow_stack->is_empty()) {
|
if (!_overflow_stack->is_empty()) {
|
||||||
chunk_task = _overflow_stack->pop();
|
region_task = _overflow_stack->pop();
|
||||||
result = true;
|
result = true;
|
||||||
} else {
|
} else {
|
||||||
chunk_task = (ChunkTask) NULL;
|
region_task = (RegionTask) NULL;
|
||||||
result = false;
|
result = false;
|
||||||
}
|
}
|
||||||
if (TraceChunkTasksQueuing && Verbose) {
|
if (TraceRegionTasksQueuing && Verbose) {
|
||||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
|
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -557,32 +557,32 @@ class StarTask {
|
||||||
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
||||||
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
|
typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
|
||||||
|
|
||||||
typedef size_t ChunkTask; // index for chunk
|
typedef size_t RegionTask; // index for region
|
||||||
typedef GenericTaskQueue<ChunkTask> ChunkTaskQueue;
|
typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
|
||||||
typedef GenericTaskQueueSet<ChunkTask> ChunkTaskQueueSet;
|
typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet;
|
||||||
|
|
||||||
class ChunkTaskQueueWithOverflow: public CHeapObj {
|
class RegionTaskQueueWithOverflow: public CHeapObj {
|
||||||
protected:
|
protected:
|
||||||
ChunkTaskQueue _chunk_queue;
|
RegionTaskQueue _region_queue;
|
||||||
GrowableArray<ChunkTask>* _overflow_stack;
|
GrowableArray<RegionTask>* _overflow_stack;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ChunkTaskQueueWithOverflow() : _overflow_stack(NULL) {}
|
RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
|
||||||
// Initialize both stealable queue and overflow
|
// Initialize both stealable queue and overflow
|
||||||
void initialize();
|
void initialize();
|
||||||
// Save first to stealable queue and then to overflow
|
// Save first to stealable queue and then to overflow
|
||||||
void save(ChunkTask t);
|
void save(RegionTask t);
|
||||||
// Retrieve first from overflow and then from stealable queue
|
// Retrieve first from overflow and then from stealable queue
|
||||||
bool retrieve(ChunkTask& chunk_index);
|
bool retrieve(RegionTask& region_index);
|
||||||
// Retrieve from stealable queue
|
// Retrieve from stealable queue
|
||||||
bool retrieve_from_stealable_queue(ChunkTask& chunk_index);
|
bool retrieve_from_stealable_queue(RegionTask& region_index);
|
||||||
// Retrieve from overflow
|
// Retrieve from overflow
|
||||||
bool retrieve_from_overflow(ChunkTask& chunk_index);
|
bool retrieve_from_overflow(RegionTask& region_index);
|
||||||
bool is_empty();
|
bool is_empty();
|
||||||
bool stealable_is_empty();
|
bool stealable_is_empty();
|
||||||
bool overflow_is_empty();
|
bool overflow_is_empty();
|
||||||
juint stealable_size() { return _chunk_queue.size(); }
|
juint stealable_size() { return _region_queue.size(); }
|
||||||
ChunkTaskQueue* task_queue() { return &_chunk_queue; }
|
RegionTaskQueue* task_queue() { return &_region_queue; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#define USE_ChunkTaskQueueWithOverflow
|
#define USE_RegionTaskQueueWithOverflow
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue