mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 23:04:50 +02:00
6604422: G1: re-use half-promoted regions
6728271: G1: Cleanup G1CollectedHeap::get_gc_alloc_regions() It allows the last half-full region to be allocated to during a GC to be reused during the next GC. Reviewed-by: apetrusenko, jcoomes
This commit is contained in:
parent
21b3d4aea0
commit
ecdb99412d
3 changed files with 126 additions and 24 deletions
|
@ -786,6 +786,12 @@ void G1CollectedHeap::abandon_cur_alloc_region() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::abandon_gc_alloc_regions() {
|
||||||
|
// first, make sure that the GC alloc region list is empty (it should!)
|
||||||
|
assert(_gc_alloc_region_list == NULL, "invariant");
|
||||||
|
release_gc_alloc_regions(true /* totally */);
|
||||||
|
}
|
||||||
|
|
||||||
class PostMCRemSetClearClosure: public HeapRegionClosure {
|
class PostMCRemSetClearClosure: public HeapRegionClosure {
|
||||||
ModRefBarrierSet* _mr_bs;
|
ModRefBarrierSet* _mr_bs;
|
||||||
public:
|
public:
|
||||||
|
@ -914,6 +920,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
|
||||||
|
|
||||||
// Make sure we'll choose a new allocation region afterwards.
|
// Make sure we'll choose a new allocation region afterwards.
|
||||||
abandon_cur_alloc_region();
|
abandon_cur_alloc_region();
|
||||||
|
abandon_gc_alloc_regions();
|
||||||
assert(_cur_alloc_region == NULL, "Invariant.");
|
assert(_cur_alloc_region == NULL, "Invariant.");
|
||||||
g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
|
g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
|
||||||
tear_down_region_lists();
|
tear_down_region_lists();
|
||||||
|
@ -1306,7 +1313,7 @@ void G1CollectedHeap::shrink_helper(size_t shrink_bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
void G1CollectedHeap::shrink(size_t shrink_bytes) {
|
||||||
release_gc_alloc_regions();
|
release_gc_alloc_regions(true /* totally */);
|
||||||
tear_down_region_lists(); // We will rebuild them in a moment.
|
tear_down_region_lists(); // We will rebuild them in a moment.
|
||||||
shrink_helper(shrink_bytes);
|
shrink_helper(shrink_bytes);
|
||||||
rebuild_region_lists();
|
rebuild_region_lists();
|
||||||
|
@ -1345,8 +1352,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||||
_gc_time_stamp(0),
|
_gc_time_stamp(0),
|
||||||
_surviving_young_words(NULL),
|
_surviving_young_words(NULL),
|
||||||
_in_cset_fast_test(NULL),
|
_in_cset_fast_test(NULL),
|
||||||
_in_cset_fast_test_base(NULL)
|
_in_cset_fast_test_base(NULL) {
|
||||||
{
|
|
||||||
_g1h = this; // To catch bugs.
|
_g1h = this; // To catch bugs.
|
||||||
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
|
||||||
vm_exit_during_initialization("Failed necessary allocation.");
|
vm_exit_during_initialization("Failed necessary allocation.");
|
||||||
|
@ -1371,9 +1377,19 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
_gc_alloc_regions[ap] = NULL;
|
_gc_alloc_regions[ap] = NULL;
|
||||||
_gc_alloc_region_counts[ap] = 0;
|
_gc_alloc_region_counts[ap] = 0;
|
||||||
|
_retained_gc_alloc_regions[ap] = NULL;
|
||||||
|
// by default, we do not retain a GC alloc region for each ap;
|
||||||
|
// we'll override this, when appropriate, below
|
||||||
|
_retain_gc_alloc_region[ap] = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We will try to remember the last half-full tenured region we
|
||||||
|
// allocated to at the end of a collection so that we can re-use it
|
||||||
|
// during the next collection.
|
||||||
|
_retain_gc_alloc_region[GCAllocForTenured] = true;
|
||||||
|
|
||||||
guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
guarantee(_task_queues != NULL, "task_queues allocation failure.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2644,7 +2660,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||||
popular_region->set_popular_pending(false);
|
popular_region->set_popular_pending(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
release_gc_alloc_regions();
|
release_gc_alloc_regions(false /* totally */);
|
||||||
|
|
||||||
cleanup_surviving_young_words();
|
cleanup_surviving_young_words();
|
||||||
|
|
||||||
|
@ -2735,6 +2751,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(HeapRegion* popular_region) {
|
||||||
|
|
||||||
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
|
void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
|
||||||
assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
|
assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
|
||||||
|
// make sure we don't call set_gc_alloc_region() multiple times on
|
||||||
|
// the same region
|
||||||
|
assert(r == NULL || !r->is_gc_alloc_region(),
|
||||||
|
"shouldn't already be a GC alloc region");
|
||||||
HeapWord* original_top = NULL;
|
HeapWord* original_top = NULL;
|
||||||
if (r != NULL)
|
if (r != NULL)
|
||||||
original_top = r->top();
|
original_top = r->top();
|
||||||
|
@ -2851,23 +2871,55 @@ bool G1CollectedHeap::check_gc_alloc_regions() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::get_gc_alloc_regions() {
|
void G1CollectedHeap::get_gc_alloc_regions() {
|
||||||
|
// First, let's check that the GC alloc region list is empty (it should)
|
||||||
|
assert(_gc_alloc_region_list == NULL, "invariant");
|
||||||
|
|
||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
|
assert(_gc_alloc_regions[ap] == NULL, "invariant");
|
||||||
|
|
||||||
// Create new GC alloc regions.
|
// Create new GC alloc regions.
|
||||||
HeapRegion* alloc_region = _gc_alloc_regions[ap];
|
HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
|
||||||
// Clear this alloc region, so that in case it turns out to be
|
_retained_gc_alloc_regions[ap] = NULL;
|
||||||
// unacceptable, we end up with no allocation region, rather than a bad
|
|
||||||
// one.
|
if (alloc_region != NULL) {
|
||||||
_gc_alloc_regions[ap] = NULL;
|
assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
|
||||||
if (alloc_region == NULL || alloc_region->in_collection_set()) {
|
|
||||||
// Can't re-use old one. Allocate a new one.
|
// let's make sure that the GC alloc region is not tagged as such
|
||||||
|
// outside a GC operation
|
||||||
|
assert(!alloc_region->is_gc_alloc_region(), "sanity");
|
||||||
|
|
||||||
|
if (alloc_region->in_collection_set() ||
|
||||||
|
alloc_region->top() == alloc_region->end() ||
|
||||||
|
alloc_region->top() == alloc_region->bottom()) {
|
||||||
|
// we will discard the current GC alloc region if it's in the
|
||||||
|
// collection set (it can happen!), if it's already full (no
|
||||||
|
// point in using it), or if it's empty (this means that it
|
||||||
|
// was emptied during a cleanup and it should be on the free
|
||||||
|
// list now).
|
||||||
|
|
||||||
|
alloc_region = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (alloc_region == NULL) {
|
||||||
|
// we will get a new GC alloc region
|
||||||
alloc_region = newAllocRegionWithExpansion(ap, 0);
|
alloc_region = newAllocRegionWithExpansion(ap, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (alloc_region != NULL) {
|
if (alloc_region != NULL) {
|
||||||
|
assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
|
||||||
set_gc_alloc_region(ap, alloc_region);
|
set_gc_alloc_region(ap, alloc_region);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(_gc_alloc_regions[ap] == NULL ||
|
||||||
|
_gc_alloc_regions[ap]->is_gc_alloc_region(),
|
||||||
|
"the GC alloc region should be tagged as such");
|
||||||
|
assert(_gc_alloc_regions[ap] == NULL ||
|
||||||
|
_gc_alloc_regions[ap] == _gc_alloc_region_list,
|
||||||
|
"the GC alloc region should be the same as the GC alloc list head");
|
||||||
}
|
}
|
||||||
// Set alternative regions for allocation purposes that have reached
|
// Set alternative regions for allocation purposes that have reached
|
||||||
// thier limit.
|
// their limit.
|
||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
|
GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
|
||||||
if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
|
if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
|
||||||
|
@ -2877,28 +2929,56 @@ void G1CollectedHeap::get_gc_alloc_regions() {
|
||||||
assert(check_gc_alloc_regions(), "alloc regions messed up");
|
assert(check_gc_alloc_regions(), "alloc regions messed up");
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::release_gc_alloc_regions() {
|
void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
|
||||||
// We keep a separate list of all regions that have been alloc regions in
|
// We keep a separate list of all regions that have been alloc regions in
|
||||||
// the current collection pause. Forget that now.
|
// the current collection pause. Forget that now. This method will
|
||||||
|
// untag the GC alloc regions and tear down the GC alloc region
|
||||||
|
// list. It's desirable that no regions are tagged as GC alloc
|
||||||
|
// outside GCs.
|
||||||
forget_alloc_region_list();
|
forget_alloc_region_list();
|
||||||
|
|
||||||
// The current alloc regions contain objs that have survived
|
// The current alloc regions contain objs that have survived
|
||||||
// collection. Make them no longer GC alloc regions.
|
// collection. Make them no longer GC alloc regions.
|
||||||
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
HeapRegion* r = _gc_alloc_regions[ap];
|
HeapRegion* r = _gc_alloc_regions[ap];
|
||||||
if (r != NULL && r->is_empty()) {
|
_retained_gc_alloc_regions[ap] = NULL;
|
||||||
{
|
|
||||||
|
if (r != NULL) {
|
||||||
|
// we retain nothing on _gc_alloc_regions between GCs
|
||||||
|
set_gc_alloc_region(ap, NULL);
|
||||||
|
_gc_alloc_region_counts[ap] = 0;
|
||||||
|
|
||||||
|
if (r->is_empty()) {
|
||||||
|
// we didn't actually allocate anything in it; let's just put
|
||||||
|
// it on the free list
|
||||||
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
|
||||||
r->set_zero_fill_complete();
|
r->set_zero_fill_complete();
|
||||||
put_free_region_on_list_locked(r);
|
put_free_region_on_list_locked(r);
|
||||||
|
} else if (_retain_gc_alloc_region[ap] && !totally) {
|
||||||
|
// retain it so that we can use it at the beginning of the next GC
|
||||||
|
_retained_gc_alloc_regions[ap] = r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// set_gc_alloc_region will also NULLify all aliases to the region
|
|
||||||
set_gc_alloc_region(ap, NULL);
|
|
||||||
_gc_alloc_region_counts[ap] = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// Useful for debugging
|
||||||
|
|
||||||
|
void G1CollectedHeap::print_gc_alloc_regions() {
|
||||||
|
gclog_or_tty->print_cr("GC alloc regions");
|
||||||
|
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
|
||||||
|
HeapRegion* r = _gc_alloc_regions[ap];
|
||||||
|
if (r == NULL) {
|
||||||
|
gclog_or_tty->print_cr(" %2d : "PTR_FORMAT, ap, NULL);
|
||||||
|
} else {
|
||||||
|
gclog_or_tty->print_cr(" %2d : "PTR_FORMAT" "SIZE_FORMAT,
|
||||||
|
ap, r->bottom(), r->used());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // PRODUCT
|
||||||
|
|
||||||
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
|
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
|
||||||
_drain_in_progress = false;
|
_drain_in_progress = false;
|
||||||
set_evac_failure_closure(cl);
|
set_evac_failure_closure(cl);
|
||||||
|
|
|
@ -172,7 +172,6 @@ private:
|
||||||
NumAPIs = HeapRegion::MaxAge
|
NumAPIs = HeapRegion::MaxAge
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// The one and only G1CollectedHeap, so static functions can find it.
|
// The one and only G1CollectedHeap, so static functions can find it.
|
||||||
static G1CollectedHeap* _g1h;
|
static G1CollectedHeap* _g1h;
|
||||||
|
|
||||||
|
@ -217,11 +216,20 @@ private:
|
||||||
|
|
||||||
// Postcondition: cur_alloc_region == NULL.
|
// Postcondition: cur_alloc_region == NULL.
|
||||||
void abandon_cur_alloc_region();
|
void abandon_cur_alloc_region();
|
||||||
|
void abandon_gc_alloc_regions();
|
||||||
|
|
||||||
// The to-space memory regions into which objects are being copied during
|
// The to-space memory regions into which objects are being copied during
|
||||||
// a GC.
|
// a GC.
|
||||||
HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
|
HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
|
||||||
size_t _gc_alloc_region_counts[GCAllocPurposeCount];
|
size_t _gc_alloc_region_counts[GCAllocPurposeCount];
|
||||||
|
// These are the regions, one per GCAllocPurpose, that are half-full
|
||||||
|
// at the end of a collection and that we want to reuse during the
|
||||||
|
// next collection.
|
||||||
|
HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount];
|
||||||
|
// This specifies whether we will keep the last half-full region at
|
||||||
|
// the end of a collection so that it can be reused during the next
|
||||||
|
// collection (this is specified per GCAllocPurpose)
|
||||||
|
bool _retain_gc_alloc_region[GCAllocPurposeCount];
|
||||||
|
|
||||||
// A list of the regions that have been set to be alloc regions in the
|
// A list of the regions that have been set to be alloc regions in the
|
||||||
// current collection.
|
// current collection.
|
||||||
|
@ -589,8 +597,21 @@ protected:
|
||||||
|
|
||||||
// Ensure that the relevant gc_alloc regions are set.
|
// Ensure that the relevant gc_alloc regions are set.
|
||||||
void get_gc_alloc_regions();
|
void get_gc_alloc_regions();
|
||||||
// We're done with GC alloc regions; release them, as appropriate.
|
// We're done with GC alloc regions. We are going to tear down the
|
||||||
void release_gc_alloc_regions();
|
// gc alloc list and remove the gc alloc tag from all the regions on
|
||||||
|
// that list. However, we will also retain the last (i.e., the one
|
||||||
|
// that is half-full) GC alloc region, per GCAllocPurpose, for
|
||||||
|
// possible reuse during the next collection, provided
|
||||||
|
// _retain_gc_alloc_region[] indicates that it should be the
|
||||||
|
// case. Said regions are kept in the _retained_gc_alloc_regions[]
|
||||||
|
// array. If the parameter totally is set, we will not retain any
|
||||||
|
// regions, irrespective of what _retain_gc_alloc_region[]
|
||||||
|
// indicates.
|
||||||
|
void release_gc_alloc_regions(bool totally);
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// Useful for debugging.
|
||||||
|
void print_gc_alloc_regions();
|
||||||
|
#endif // !PRODUCT
|
||||||
|
|
||||||
// ("Weak") Reference processing support
|
// ("Weak") Reference processing support
|
||||||
ReferenceProcessor* _ref_processor;
|
ReferenceProcessor* _ref_processor;
|
||||||
|
|
|
@ -1087,6 +1087,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
|
||||||
|
|
||||||
assert(_g1->used_regions() == _g1->recalculate_used_regions(),
|
assert(_g1->used_regions() == _g1->recalculate_used_regions(),
|
||||||
"sanity");
|
"sanity");
|
||||||
|
assert(_g1->used() == _g1->recalculate_used(), "sanity");
|
||||||
|
|
||||||
double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
|
double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
|
||||||
_all_stop_world_times_ms->add(s_w_t_ms);
|
_all_stop_world_times_ms->add(s_w_t_ms);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue