mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 06:45:07 +02:00
6977804: G1: remove the zero-filling thread
This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification. Reviewed-by: jcoomes, johnc
This commit is contained in:
parent
9c7b5257f8
commit
8bce4a6620
24 changed files with 2187 additions and 1919 deletions
|
@ -458,6 +458,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
|||
_marking_task_overhead(1.0),
|
||||
_cleanup_sleep_factor(0.0),
|
||||
_cleanup_task_overhead(1.0),
|
||||
_cleanup_list("Cleanup List"),
|
||||
_region_bm(max_regions, false /* in_resource_area*/),
|
||||
_card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
|
||||
CardTableModRefBS::card_shift,
|
||||
|
@ -521,12 +522,6 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
|
|||
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
|
||||
satb_qs.set_buffer_size(G1SATBBufferSize);
|
||||
|
||||
int size = (int) MAX2(ParallelGCThreads, (size_t)1);
|
||||
_par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
|
||||
for (int i = 0 ; i < size; i++) {
|
||||
_par_cleanup_thread_state[i] = new ParCleanupThreadState;
|
||||
}
|
||||
|
||||
_tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num);
|
||||
_accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num);
|
||||
|
||||
|
@ -711,11 +706,6 @@ void ConcurrentMark::set_non_marking_state() {
|
|||
}
|
||||
|
||||
ConcurrentMark::~ConcurrentMark() {
|
||||
int size = (int) MAX2(ParallelGCThreads, (size_t)1);
|
||||
for (int i = 0; i < size; i++) delete _par_cleanup_thread_state[i];
|
||||
FREE_C_HEAP_ARRAY(ParCleanupThreadState*,
|
||||
_par_cleanup_thread_state);
|
||||
|
||||
for (int i = 0; i < (int) _max_task_num; ++i) {
|
||||
delete _task_queues->queue(i);
|
||||
delete _tasks[i];
|
||||
|
@ -1510,21 +1500,20 @@ class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
|
|||
size_t _max_live_bytes;
|
||||
size_t _regions_claimed;
|
||||
size_t _freed_bytes;
|
||||
size_t _cleared_h_regions;
|
||||
size_t _freed_regions;
|
||||
UncleanRegionList* _unclean_region_list;
|
||||
FreeRegionList _local_cleanup_list;
|
||||
HumongousRegionSet _humongous_proxy_set;
|
||||
double _claimed_region_time;
|
||||
double _max_region_time;
|
||||
|
||||
public:
|
||||
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
||||
UncleanRegionList* list,
|
||||
int worker_num);
|
||||
size_t freed_bytes() { return _freed_bytes; }
|
||||
size_t cleared_h_regions() { return _cleared_h_regions; }
|
||||
size_t freed_regions() { return _freed_regions; }
|
||||
UncleanRegionList* unclean_region_list() {
|
||||
return _unclean_region_list;
|
||||
FreeRegionList* local_cleanup_list() {
|
||||
return &_local_cleanup_list;
|
||||
}
|
||||
HumongousRegionSet* humongous_proxy_set() {
|
||||
return &_humongous_proxy_set;
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion *r);
|
||||
|
@ -1537,25 +1526,22 @@ public:
|
|||
|
||||
class G1ParNoteEndTask: public AbstractGangTask {
|
||||
friend class G1NoteEndOfConcMarkClosure;
|
||||
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
size_t _max_live_bytes;
|
||||
size_t _freed_bytes;
|
||||
ConcurrentMark::ParCleanupThreadState** _par_cleanup_thread_state;
|
||||
FreeRegionList* _cleanup_list;
|
||||
|
||||
public:
|
||||
G1ParNoteEndTask(G1CollectedHeap* g1h,
|
||||
ConcurrentMark::ParCleanupThreadState**
|
||||
par_cleanup_thread_state) :
|
||||
FreeRegionList* cleanup_list) :
|
||||
AbstractGangTask("G1 note end"), _g1h(g1h),
|
||||
_max_live_bytes(0), _freed_bytes(0),
|
||||
_par_cleanup_thread_state(par_cleanup_thread_state)
|
||||
{}
|
||||
_max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
|
||||
|
||||
void work(int i) {
|
||||
double start = os::elapsedTime();
|
||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h,
|
||||
&_par_cleanup_thread_state[i]->list,
|
||||
i);
|
||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
_g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
|
||||
HeapRegion::NoteEndClaimValue);
|
||||
|
@ -1564,14 +1550,18 @@ public:
|
|||
}
|
||||
assert(g1_note_end.complete(), "Shouldn't have yielded!");
|
||||
|
||||
// Now finish up freeing the current thread's regions.
|
||||
_g1h->finish_free_region_work(g1_note_end.freed_bytes(),
|
||||
g1_note_end.cleared_h_regions(),
|
||||
0, NULL);
|
||||
// Now update the lists
|
||||
_g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(),
|
||||
NULL /* free_list */,
|
||||
g1_note_end.humongous_proxy_set(),
|
||||
true /* par */);
|
||||
{
|
||||
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
|
||||
_max_live_bytes += g1_note_end.max_live_bytes();
|
||||
_freed_bytes += g1_note_end.freed_bytes();
|
||||
|
||||
_cleanup_list->add_as_tail(g1_note_end.local_cleanup_list());
|
||||
assert(g1_note_end.local_cleanup_list()->is_empty(), "post-condition");
|
||||
}
|
||||
double end = os::elapsedTime();
|
||||
if (G1PrintParCleanupStats) {
|
||||
|
@ -1612,30 +1602,28 @@ public:
|
|||
|
||||
G1NoteEndOfConcMarkClosure::
|
||||
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
|
||||
UncleanRegionList* list,
|
||||
int worker_num)
|
||||
: _g1(g1), _worker_num(worker_num),
|
||||
_max_live_bytes(0), _regions_claimed(0),
|
||||
_freed_bytes(0), _cleared_h_regions(0), _freed_regions(0),
|
||||
_freed_bytes(0),
|
||||
_claimed_region_time(0.0), _max_region_time(0.0),
|
||||
_unclean_region_list(list)
|
||||
{}
|
||||
_local_cleanup_list("Local Cleanup List"),
|
||||
_humongous_proxy_set("Local Cleanup Humongous Proxy Set") { }
|
||||
|
||||
bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *r) {
|
||||
bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
|
||||
// We use a claim value of zero here because all regions
|
||||
// were claimed with value 1 in the FinalCount task.
|
||||
r->reset_gc_time_stamp();
|
||||
if (!r->continuesHumongous()) {
|
||||
hr->reset_gc_time_stamp();
|
||||
if (!hr->continuesHumongous()) {
|
||||
double start = os::elapsedTime();
|
||||
_regions_claimed++;
|
||||
r->note_end_of_marking();
|
||||
_max_live_bytes += r->max_live_bytes();
|
||||
_g1->free_region_if_totally_empty_work(r,
|
||||
_freed_bytes,
|
||||
_cleared_h_regions,
|
||||
_freed_regions,
|
||||
_unclean_region_list,
|
||||
true /*par*/);
|
||||
hr->note_end_of_marking();
|
||||
_max_live_bytes += hr->max_live_bytes();
|
||||
_g1->free_region_if_totally_empty(hr,
|
||||
&_freed_bytes,
|
||||
&_local_cleanup_list,
|
||||
&_humongous_proxy_set,
|
||||
true /* par */);
|
||||
double region_time = (os::elapsedTime() - start);
|
||||
_claimed_region_time += region_time;
|
||||
if (region_time > _max_region_time) _max_region_time = region_time;
|
||||
|
@ -1655,6 +1643,8 @@ void ConcurrentMark::cleanup() {
|
|||
return;
|
||||
}
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
|
||||
if (VerifyDuringGC) {
|
||||
HandleMark hm; // handle scope
|
||||
gclog_or_tty->print(" VerifyDuringGC:(before)");
|
||||
|
@ -1719,7 +1709,7 @@ void ConcurrentMark::cleanup() {
|
|||
|
||||
// Note end of marking in all heap regions.
|
||||
double note_end_start = os::elapsedTime();
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, _par_cleanup_thread_state);
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
int n_workers = g1h->workers()->total_workers();
|
||||
g1h->set_par_threads(n_workers);
|
||||
|
@ -1731,9 +1721,14 @@ void ConcurrentMark::cleanup() {
|
|||
} else {
|
||||
g1_par_note_end_task.work(0);
|
||||
}
|
||||
g1h->set_unclean_regions_coming(true);
|
||||
|
||||
if (!cleanup_list_is_empty()) {
|
||||
// The cleanup list is not empty, so we'll have to process it
|
||||
// concurrently. Notify anyone else that might be wanting free
|
||||
// regions that there will be more free regions coming soon.
|
||||
g1h->set_free_regions_coming();
|
||||
}
|
||||
double note_end_end = os::elapsedTime();
|
||||
// Tell the mutators that there might be unclean regions coming...
|
||||
if (G1PrintParCleanupStats) {
|
||||
gclog_or_tty->print_cr(" note end of marking: %8.3f ms.",
|
||||
(note_end_end - note_end_start)*1000.0);
|
||||
|
@ -1799,33 +1794,63 @@ void ConcurrentMark::cleanup() {
|
|||
/* silent */ false,
|
||||
/* prev marking */ true);
|
||||
}
|
||||
|
||||
g1h->verify_region_sets_optional();
|
||||
}
|
||||
|
||||
void ConcurrentMark::completeCleanup() {
|
||||
// A full collection intervened.
|
||||
if (has_aborted()) return;
|
||||
|
||||
int first = 0;
|
||||
int last = (int)MAX2(ParallelGCThreads, (size_t)1);
|
||||
for (int t = 0; t < last; t++) {
|
||||
UncleanRegionList* list = &_par_cleanup_thread_state[t]->list;
|
||||
assert(list->well_formed(), "Inv");
|
||||
HeapRegion* hd = list->hd();
|
||||
while (hd != NULL) {
|
||||
// Now finish up the other stuff.
|
||||
hd->rem_set()->clear();
|
||||
HeapRegion* next_hd = hd->next_from_unclean_list();
|
||||
(void)list->pop();
|
||||
assert(list->hd() == next_hd, "how not?");
|
||||
_g1h->put_region_on_unclean_list(hd);
|
||||
if (!hd->isHumongous()) {
|
||||
// Add this to the _free_regions count by 1.
|
||||
_g1h->finish_free_region_work(0, 0, 1, NULL);
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
_cleanup_list.verify_optional();
|
||||
FreeRegionList local_free_list("Local Cleanup List");
|
||||
|
||||
if (G1ConcRegionFreeingVerbose) {
|
||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
||||
"cleanup list has "SIZE_FORMAT" entries",
|
||||
_cleanup_list.length());
|
||||
}
|
||||
|
||||
// Noone else should be accessing the _cleanup_list at this point,
|
||||
// so it's not necessary to take any locks
|
||||
while (!_cleanup_list.is_empty()) {
|
||||
HeapRegion* hr = _cleanup_list.remove_head();
|
||||
assert(hr != NULL, "the list was not empty");
|
||||
hr->rem_set()->clear();
|
||||
local_free_list.add_as_tail(hr);
|
||||
|
||||
// Instead of adding one region at a time to the secondary_free_list,
|
||||
// we accumulate them in the local list and move them a few at a
|
||||
// time. This also cuts down on the number of notify_all() calls
|
||||
// we do during this process. We'll also append the local list when
|
||||
// _cleanup_list is empty (which means we just removed the last
|
||||
// region from the _cleanup_list).
|
||||
if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
|
||||
_cleanup_list.is_empty()) {
|
||||
if (G1ConcRegionFreeingVerbose) {
|
||||
gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
|
||||
"appending "SIZE_FORMAT" entries to the "
|
||||
"secondary_free_list, clean list still has "
|
||||
SIZE_FORMAT" entries",
|
||||
local_free_list.length(),
|
||||
_cleanup_list.length());
|
||||
}
|
||||
|
||||
{
|
||||
MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
|
||||
g1h->secondary_free_list_add_as_tail(&local_free_list);
|
||||
SecondaryFreeList_lock->notify_all();
|
||||
}
|
||||
|
||||
if (G1StressConcRegionFreeing) {
|
||||
for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
|
||||
os::sleep(Thread::current(), (jlong) 1, false);
|
||||
}
|
||||
}
|
||||
hd = list->hd();
|
||||
assert(hd == next_hd, "how not?");
|
||||
}
|
||||
}
|
||||
assert(local_free_list.is_empty(), "post-condition");
|
||||
}
|
||||
|
||||
bool G1CMIsAliveClosure::do_object_b(oop obj) {
|
||||
|
@ -2897,9 +2922,9 @@ public:
|
|||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
|
||||
assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(),
|
||||
"invariant");
|
||||
assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
|
||||
assert(!_g1h->is_on_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) p)), "invariant");
|
||||
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (_cm->verbose_high())
|
||||
|
@ -3119,8 +3144,8 @@ void CMTask::deal_with_reference(oop obj) {
|
|||
void CMTask::push(oop obj) {
|
||||
HeapWord* objAddr = (HeapWord*) obj;
|
||||
assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
|
||||
assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(),
|
||||
"invariant");
|
||||
assert(!_g1h->is_on_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
|
||||
assert(!_g1h->is_obj_ill(obj), "invariant");
|
||||
assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
|
||||
|
||||
|
@ -3365,8 +3390,8 @@ void CMTask::drain_local_queue(bool partially) {
|
|||
(void*) obj);
|
||||
|
||||
assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
|
||||
assert(!_g1h->heap_region_containing(obj)->is_on_free_list(),
|
||||
"invariant");
|
||||
assert(!_g1h->is_on_free_list(
|
||||
_g1h->heap_region_containing((HeapWord*) obj)), "invariant");
|
||||
|
||||
scan_object(obj);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue