8151178: Move the collection set out of the G1 collector policy

Create a G1CollectionSet class

Reviewed-by: jwilhelm, tbenson, tschatzl
This commit is contained in:
Mikael Gerdin 2016-03-07 17:23:59 +01:00
parent c5a4113326
commit b7486d2544
10 changed files with 700 additions and 560 deletions

View file

@ -34,6 +34,7 @@
#include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1EvacStats.inline.hpp"
@ -1302,9 +1303,9 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
// set between the last GC or pause and now. We need to clear the // set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh // incremental collection set and then start rebuilding it afresh
// after this full GC. // after this full GC.
abandon_collection_set(g1_policy()->inc_cset_head()); abandon_collection_set(collection_set()->inc_head());
g1_policy()->clear_incremental_cset(); collection_set()->clear_incremental();
g1_policy()->stop_incremental_cset_building(); collection_set()->stop_incremental_building();
tear_down_region_sets(false /* free_list_only */); tear_down_region_sets(false /* free_list_only */);
collector_state()->set_gcs_are_young(true); collector_state()->set_gcs_are_young(true);
@ -1426,8 +1427,8 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
_verifier->check_bitmaps("Full GC End"); _verifier->check_bitmaps("Full GC End");
// Start a new incremental collection set for the next pause // Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be"); assert(collection_set()->head() == NULL, "must be");
g1_policy()->start_incremental_cset_building(); collection_set()->start_incremental_building();
clear_cset_fast_test(); clear_cset_fast_test();
@ -1741,6 +1742,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
CollectedHeap(), CollectedHeap(),
_g1_policy(policy_), _g1_policy(policy_),
_collection_set(this),
_dirty_card_queue_set(false), _dirty_card_queue_set(false),
_is_alive_closure_cm(this), _is_alive_closure_cm(this),
_is_alive_closure_stw(this), _is_alive_closure_stw(this),
@ -2545,8 +2547,8 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
// p threads // p threads
// Then thread t will start at region floor ((t * n) / p) // Then thread t will start at region floor ((t * n) / p)
result = g1_policy()->collection_set(); result = collection_set()->head();
uint cs_size = g1_policy()->cset_region_length(); uint cs_size = collection_set()->region_length();
uint active_workers = workers()->active_workers(); uint active_workers = workers()->active_workers();
uint end_ind = (cs_size * worker_i) / active_workers; uint end_ind = (cs_size * worker_i) / active_workers;
@ -2577,7 +2579,7 @@ HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
} }
void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
HeapRegion* r = g1_policy()->collection_set(); HeapRegion* r = collection_set()->head();
while (r != NULL) { while (r != NULL) {
HeapRegion* next = r->next_in_collection_set(); HeapRegion* next = r->next_in_collection_set();
if (cl->doHeapRegion(r)) { if (cl->doHeapRegion(r)) {
@ -2606,7 +2608,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
} }
cur = next; cur = next;
} }
cur = g1_policy()->collection_set(); cur = collection_set()->head();
while (cur != r) { while (cur != r) {
HeapRegion* next = cur->next_in_collection_set(); HeapRegion* next = cur->next_in_collection_set();
if (cl->doHeapRegion(cur) && false) { if (cl->doHeapRegion(cur) && false) {
@ -3336,10 +3338,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
concurrent_mark()->checkpointRootsInitialPre(); concurrent_mark()->checkpointRootsInitialPre();
} }
double time_remaining_ms = g1_policy()->finalize_young_cset_part(target_pause_time_ms); g1_policy()->finalize_collection_set(target_pause_time_ms);
g1_policy()->finalize_old_cset_part(time_remaining_ms);
evacuation_info.set_collectionset_regions(g1_policy()->cset_region_length()); evacuation_info.set_collectionset_regions(collection_set()->region_length());
// Make sure the remembered sets are up to date. This needs to be // Make sure the remembered sets are up to date. This needs to be
// done before register_humongous_regions_with_cset(), because the // done before register_humongous_regions_with_cset(), because the
@ -3358,7 +3359,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_cm->verify_no_cset_oops(); _cm->verify_no_cset_oops();
if (_hr_printer.is_active()) { if (_hr_printer.is_active()) {
HeapRegion* hr = g1_policy()->collection_set(); HeapRegion* hr = collection_set()->head();
while (hr != NULL) { while (hr != NULL) {
_hr_printer.cset(hr); _hr_printer.cset(hr);
hr = hr->next_in_collection_set(); hr = hr->next_in_collection_set();
@ -3373,7 +3374,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Initialize the GC alloc regions. // Initialize the GC alloc regions.
_allocator->init_gc_alloc_regions(evacuation_info); _allocator->init_gc_alloc_regions(evacuation_info);
G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), g1_policy()->young_cset_region_length()); G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
pre_evacuate_collection_set(); pre_evacuate_collection_set();
// Actually do the work... // Actually do the work...
@ -3382,18 +3383,18 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
post_evacuate_collection_set(evacuation_info, &per_thread_states); post_evacuate_collection_set(evacuation_info, &per_thread_states);
const size_t* surviving_young_words = per_thread_states.surviving_young_words(); const size_t* surviving_young_words = per_thread_states.surviving_young_words();
free_collection_set(g1_policy()->collection_set(), evacuation_info, surviving_young_words); free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
eagerly_reclaim_humongous_regions(); eagerly_reclaim_humongous_regions();
g1_policy()->clear_collection_set(); collection_set()->clear_head();
record_obj_copy_mem_stats(); record_obj_copy_mem_stats();
_survivor_evac_stats.adjust_desired_plab_sz(); _survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz(); _old_evac_stats.adjust_desired_plab_sz();
// Start a new incremental collection set for the next pause. // Start a new incremental collection set for the next pause.
g1_policy()->start_incremental_cset_building(); collection_set()->start_incremental_building();
clear_cset_fast_test(); clear_cset_fast_test();
@ -3468,7 +3469,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
size_t total_cards_scanned = per_thread_states.total_cards_scanned(); size_t total_cards_scanned = per_thread_states.total_cards_scanned();
g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc); g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
evacuation_info.set_collectionset_used_before(g1_policy()->collection_set_bytes_used_before()); evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc()); evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
MemoryService::track_memory_usage(); MemoryService::track_memory_usage();
@ -4909,7 +4910,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& e
if (cur->is_young()) { if (cur->is_young()) {
int index = cur->young_index_in_cset(); int index = cur->young_index_in_cset();
assert(index != -1, "invariant"); assert(index != -1, "invariant");
assert((uint) index < policy->young_cset_region_length(), "invariant"); assert((uint) index < collection_set()->young_region_length(), "invariant");
size_t words_survived = surviving_young_words[index]; size_t words_survived = surviving_young_words[index];
cur->record_surv_words_in_group(words_survived); cur->record_surv_words_in_group(words_survived);
@ -5382,7 +5383,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
assert(alloc_region->is_eden(), "all mutator alloc regions should be eden"); assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
g1_policy()->add_region_to_incremental_cset_lhs(alloc_region); collection_set()->add_eden_region(alloc_region);
increase_used(allocated_bytes); increase_used(allocated_bytes);
_hr_printer.retire(alloc_region); _hr_printer.retire(alloc_region);
// We update the eden sizes here, when the region is retired, // We update the eden sizes here, when the region is retired,

View file

@ -28,6 +28,7 @@
#include "gc/g1/evacuationInfo.hpp" #include "gc/g1/evacuationInfo.hpp"
#include "gc/g1/g1AllocationContext.hpp" #include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1BiasedArray.hpp" #include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1HRPrinter.hpp" #include "gc/g1/g1HRPrinter.hpp"
@ -65,6 +66,7 @@ class ObjectClosure;
class SpaceClosure; class SpaceClosure;
class CompactibleSpaceClosure; class CompactibleSpaceClosure;
class Space; class Space;
class G1CollectionSet;
class G1CollectorPolicy; class G1CollectorPolicy;
class G1RemSet; class G1RemSet;
class HeapRegionRemSetIterator; class HeapRegionRemSetIterator;
@ -363,6 +365,8 @@ protected:
// The current policy object for the collector. // The current policy object for the collector.
G1CollectorPolicy* _g1_policy; G1CollectorPolicy* _g1_policy;
G1CollectionSet _collection_set;
// This is the second level of trying to allocate a new region. If // This is the second level of trying to allocate a new region. If
// new_region() didn't find a region on the free_list, this call will // new_region() didn't find a region on the free_list, this call will
// check whether there's anything available on the // check whether there's anything available on the
@ -985,6 +989,9 @@ public:
// The current policy object for the collector. // The current policy object for the collector.
G1CollectorPolicy* g1_policy() const { return _g1_policy; } G1CollectorPolicy* g1_policy() const { return _g1_policy; }
const G1CollectionSet* collection_set() const { return &_collection_set; }
G1CollectionSet* collection_set() { return &_collection_set; }
virtual CollectorPolicy* collector_policy() const; virtual CollectorPolicy* collector_policy() const;
// Adaptive size policy. No such thing for g1. // Adaptive size policy. No such thing for g1.

View file

@ -0,0 +1,426 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.hpp"
#include "utilities/debug.hpp"
G1CollectorState* G1CollectionSet::collector_state() {
return _g1->collector_state();
}
G1GCPhaseTimes* G1CollectionSet::phase_times() {
return _policy->phase_times();
}
CollectionSetChooser* G1CollectionSet::cset_chooser() {
return _cset_chooser;
}
double G1CollectionSet::predict_region_elapsed_time_ms(HeapRegion* hr) {
return _policy->predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
}
G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h) :
_g1(g1h),
_policy(NULL),
_cset_chooser(new CollectionSetChooser()),
_eden_region_length(0),
_survivor_region_length(0),
_old_region_length(0),
_head(NULL),
_bytes_used_before(0),
_recorded_rs_lengths(0),
// Incremental CSet attributes
_inc_build_state(Inactive),
_inc_head(NULL),
_inc_tail(NULL),
_inc_bytes_used_before(0),
_inc_recorded_rs_lengths(0),
_inc_recorded_rs_lengths_diffs(0),
_inc_predicted_elapsed_time_ms(0.0),
_inc_predicted_elapsed_time_ms_diffs(0.0) {}
G1CollectionSet::~G1CollectionSet() {
delete _cset_chooser;
}
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) {
_eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length;
_old_region_length = 0;
}
void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
_recorded_rs_lengths = rs_lengths;
}
// Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert(_inc_build_state == Active, "Precondition");
assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1->register_old_region_with_cset(hr);
hr->set_next_in_collection_set(_head);
_head = hr;
_bytes_used_before += hr->used();
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
_old_region_length += 1;
}
// Initialize the per-collection-set information
void G1CollectionSet::start_incremental_building() {
assert(_inc_build_state == Inactive, "Precondition");
_inc_head = NULL;
_inc_tail = NULL;
_inc_bytes_used_before = 0;
_inc_recorded_rs_lengths = 0;
_inc_recorded_rs_lengths_diffs = 0;
_inc_predicted_elapsed_time_ms = 0.0;
_inc_predicted_elapsed_time_ms_diffs = 0.0;
_inc_build_state = Active;
}
void G1CollectionSet::finalize_incremental_building() {
assert(_inc_build_state == Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
// The two "main" fields, _inc_recorded_rs_lengths and
// _inc_predicted_elapsed_time_ms, are updated by the thread
// that adds a new region to the CSet. Further updates by the
// concurrent refinement thread that samples the young RSet lengths
// are accumulated in the *_diffs fields. Here we add the diffs to
// the "main" fields.
if (_inc_recorded_rs_lengths_diffs >= 0) {
_inc_recorded_rs_lengths += _inc_recorded_rs_lengths_diffs;
} else {
// This is defensive. The diff should in theory be always positive
// as RSets can only grow between GCs. However, given that we
// sample their size concurrently with other threads updating them
// it's possible that we might get the wrong size back, which
// could make the calculations somewhat inaccurate.
size_t diffs = (size_t) (-_inc_recorded_rs_lengths_diffs);
if (_inc_recorded_rs_lengths >= diffs) {
_inc_recorded_rs_lengths -= diffs;
} else {
_inc_recorded_rs_lengths = 0;
}
}
_inc_predicted_elapsed_time_ms += _inc_predicted_elapsed_time_ms_diffs;
_inc_recorded_rs_lengths_diffs = 0;
_inc_predicted_elapsed_time_ms_diffs = 0.0;
}
void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
size_t new_rs_length) {
// Update the CSet information that is dependent on the new RS length
assert(hr->is_young(), "Precondition");
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint");
// We could have updated _inc_recorded_rs_lengths and
// _inc_predicted_elapsed_time_ms directly but we'd need to do
// that atomically, as this code is executed by a concurrent
// refinement thread, potentially concurrently with a mutator thread
// allocating a new region and also updating the same fields. To
// avoid the atomic operations we accumulate these updates on two
// separate fields (*_diffs) and we'll just add them to the "main"
// fields at the start of a GC.
ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
_inc_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
hr->set_recorded_rs_length(new_rs_length);
hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
}
void G1CollectionSet::add_young_region_common(HeapRegion* hr) {
assert(hr->is_young(), "invariant");
assert(hr->young_index_in_cset() > -1, "should have already been set");
assert(_inc_build_state == Active, "Precondition");
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
// evacuation pause or
// * adding the current allocation region to the incremental cset
// when it is retired.
// Therefore this routine may be called at a safepoint by the
// VM thread, or in-between safepoints by mutator threads (when
// retiring the current allocation region)
// We need to clear and set the cached recorded/cached collection set
// information in the heap region here (before the region gets added
// to the collection set). An individual heap region's cached values
// are calculated, aggregated with the policy collection set info,
// and cached in the heap region here (initially) and (subsequently)
// by the Young List sampling code.
size_t rs_length = hr->rem_set()->occupied();
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
// Cache the values we have added to the aggregated information
// in the heap region in case we have to remove this region from
// the incremental collection set, or it is updated by the
// rset sampling code
hr->set_recorded_rs_length(rs_length);
hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
size_t used_bytes = hr->used();
_inc_recorded_rs_lengths += rs_length;
_inc_predicted_elapsed_time_ms += region_elapsed_time_ms;
_inc_bytes_used_before += used_bytes;
assert(!hr->in_collection_set(), "invariant");
_g1->register_young_region_with_cset(hr);
assert(hr->next_in_collection_set() == NULL, "invariant");
}
// Add the region at the RHS of the incremental cset
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
// We should only ever be appending survivors at the end of a pause
assert(hr->is_survivor(), "Logic");
// Do the 'common' stuff
add_young_region_common(hr);
// Now add the region at the right hand side
if (_inc_tail == NULL) {
assert(_inc_head == NULL, "invariant");
_inc_head = hr;
} else {
_inc_tail->set_next_in_collection_set(hr);
}
_inc_tail = hr;
}
// Add the region to the LHS of the incremental cset
void G1CollectionSet::add_eden_region(HeapRegion* hr) {
// Survivors should be added to the RHS at the end of a pause
assert(hr->is_eden(), "Logic");
// Do the 'common' stuff
add_young_region_common(hr);
// Add the region at the left hand side
hr->set_next_in_collection_set(_inc_head);
if (_inc_head == NULL) {
assert(_inc_tail == NULL, "Invariant");
_inc_tail = hr;
}
_inc_head = hr;
}
#ifndef PRODUCT
void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) {
assert(list_head == inc_head() || list_head == head(), "must be");
st->print_cr("\nCollection_set:");
HeapRegion* csr = list_head;
while (csr != NULL) {
HeapRegion* next = csr->next_in_collection_set();
assert(csr->in_collection_set(), "bad CS");
st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
HR_FORMAT_PARAMS(csr),
p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
csr->age_in_surv_rate_group_cond());
csr = next;
}
}
#endif // !PRODUCT
double G1CollectionSet::finalize_young_part(double target_pause_time_ms) {
double young_start_time_sec = os::elapsedTime();
YoungList* young_list = _g1->young_list();
finalize_incremental_building();
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
guarantee(_head == NULL, "Precondition");
size_t pending_cards = _policy->pending_cards();
double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
// The young list is laid with the survivor regions from the previous
// pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause].
uint survivor_region_length = young_list->survivor_length();
uint eden_region_length = young_list->eden_length();
init_region_lengths(eden_region_length, survivor_region_length);
HeapRegion* hr = young_list->first_survivor_region();
while (hr != NULL) {
assert(hr->is_survivor(), "badly formed young list");
// There is a convention that all the young regions in the CSet
// are tagged as "eden", so we do this for the survivors here. We
// use the special set_eden_pre_gc() as it doesn't check that the
// region is free (which is not the case here).
hr->set_eden_pre_gc();
hr = hr->get_next_young_region();
}
// Clear the fields that point to the survivor list - they are all young now.
young_list->clear_survivors();
_head = _inc_head;
_bytes_used_before = _inc_bytes_used_before;
time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
eden_region_length, survivor_region_length, _inc_predicted_elapsed_time_ms, target_pause_time_ms);
// The number of recorded young regions is the incremental
// collection set's current size
set_recorded_rs_lengths(_inc_recorded_rs_lengths);
double young_end_time_sec = os::elapsedTime();
phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
return time_remaining_ms;
}
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
double non_young_start_time_sec = os::elapsedTime();
double predicted_old_time_ms = 0.0;
if (!collector_state()->gcs_are_young()) {
cset_chooser()->verify();
const uint min_old_cset_length = _policy->calc_min_old_cset_length();
const uint max_old_cset_length = _policy->calc_max_old_cset_length();
uint expensive_region_num = 0;
bool check_time_remaining = _policy->adaptive_young_list_length();
HeapRegion* hr = cset_chooser()->peek();
while (hr != NULL) {
if (old_region_length() >= max_old_cset_length) {
// Added maximum number of old regions to the CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
old_region_length(), max_old_cset_length);
break;
}
// Stop adding regions if the remaining reclaimable space is
// not above G1HeapWastePercent.
size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
double reclaimable_perc = _policy->reclaimable_bytes_perc(reclaimable_bytes);
double threshold = (double) G1HeapWastePercent;
if (reclaimable_perc <= threshold) {
// We've added enough old regions that the amount of uncollected
// reclaimable space is at or below the waste threshold. Stop
// adding old regions to the CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
"old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
old_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
break;
}
double predicted_time_ms = predict_region_elapsed_time_ms(hr);
if (check_time_remaining) {
if (predicted_time_ms > time_remaining_ms) {
// Too expensive for the current CSet.
if (old_region_length() >= min_old_cset_length) {
// We have added the minimum number of old regions to the CSet,
// we are done with this CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
"predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
predicted_time_ms, time_remaining_ms, old_region_length(), min_old_cset_length);
break;
}
// We'll add it anyway given that we haven't reached the
// minimum number of old regions.
expensive_region_num += 1;
}
} else {
if (old_region_length() >= min_old_cset_length) {
// In the non-auto-tuning case, we'll finish adding regions
// to the CSet if we reach the minimum.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
old_region_length(), min_old_cset_length);
break;
}
}
// We will add this region to the CSet.
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
predicted_old_time_ms += predicted_time_ms;
cset_chooser()->pop(); // already have region via peek()
_g1->old_set_remove(hr);
add_old_region(hr);
hr = cset_chooser()->peek();
}
if (hr == NULL) {
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
}
if (expensive_region_num > 0) {
// We print the information once here at the end, predicated on
// whether we added any apparently expensive regions or not, to
// avoid generating output per region.
log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
"old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
old_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
}
cset_chooser()->verify();
}
stop_incremental_building();
log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
old_region_length(), predicted_old_time_ms, time_remaining_ms);
double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
}

View file

@ -0,0 +1,207 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
#define SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
#include "gc/g1/collectionSetChooser.hpp"
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
class G1CollectedHeap;
class G1CollectorPolicy;
class G1CollectorState;
class G1GCPhaseTimes;
class HeapRegion;
class G1CollectionSet VALUE_OBJ_CLASS_SPEC {
G1CollectedHeap* _g1;
G1CollectorPolicy* _policy;
CollectionSetChooser* _cset_chooser;
uint _eden_region_length;
uint _survivor_region_length;
uint _old_region_length;
// The head of the list (via "next_in_collection_set()") representing the
// current collection set. Set from the incrementally built collection
// set at the start of the pause.
HeapRegion* _head;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause, and incremented in finalize_old_part() when adding old regions
// (if any) to the collection set.
size_t _bytes_used_before;
size_t _recorded_rs_lengths;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
enum CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_build_state;
// The head of the incrementally built collection set.
HeapRegion* _inc_head;
// The tail of the incrementally built collection set.
HeapRegion* _inc_tail;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
size_t _inc_bytes_used_before;
// The RSet lengths recorded for regions in the CSet. It is updated
// by the thread that adds a new region to the CSet. We assume that
// only one thread can be allocating a new CSet region (currently,
// it does so after taking the Heap_lock) hence no need to
// synchronize updates to this field.
size_t _inc_recorded_rs_lengths;
// A concurrent refinement thread periodically samples the young
// region RSets and needs to update _inc_recorded_rs_lengths as
// the RSets grow. Instead of having to synchronize updates to that
// field we accumulate them in this field and add it to
// _inc_recorded_rs_lengths_diffs at the start of a GC.
ssize_t _inc_recorded_rs_lengths_diffs;
// The predicted elapsed time it will take to collect the regions in
// the CSet. This is updated by the thread that adds a new region to
// the CSet. See the comment for _inc_recorded_rs_lengths about
// MT-safety assumptions.
double _inc_predicted_elapsed_time_ms;
// See the comment for _inc_recorded_rs_lengths_diffs.
double _inc_predicted_elapsed_time_ms_diffs;
G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times();
double predict_region_elapsed_time_ms(HeapRegion* hr);
public:
G1CollectionSet(G1CollectedHeap* g1h);
~G1CollectionSet();
void set_policy(G1CollectorPolicy* g1p) {
assert(_policy == NULL, "should only initialize once");
_policy = g1p;
}
CollectionSetChooser* cset_chooser();
void init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length);
void set_recorded_rs_lengths(size_t rs_lengths);
uint region_length() const { return young_region_length() +
old_region_length(); }
uint young_region_length() const { return eden_region_length() +
survivor_region_length(); }
uint eden_region_length() const { return _eden_region_length; }
uint survivor_region_length() const { return _survivor_region_length; }
uint old_region_length() const { return _old_region_length; }
// Incremental CSet Support
// The head of the incrementally built collection set.
HeapRegion* inc_head() { return _inc_head; }
// The tail of the incrementally built collection set.
HeapRegion* inc_tail() { return _inc_tail; }
// Initialize incremental collection set info.
void start_incremental_building();
// Perform any final calculations on the incremental CSet fields
// before we can use them.
void finalize_incremental_building();
void clear_incremental() {
_inc_head = NULL;
_inc_tail = NULL;
}
// Stop adding regions to the incremental collection set
void stop_incremental_building() { _inc_build_state = Inactive; }
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
HeapRegion* head() { return _head; }
void clear_head() { _head = NULL; }
size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
size_t bytes_used_before() const {
return _bytes_used_before;
}
void reset_bytes_used_before() {
_bytes_used_before = 0;
}
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
double finalize_young_part(double target_pause_time_ms);
void finalize_old_part(double time_remaining_ms);
// Add old region "hr" to the CSet.
void add_old_region(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
// Add hr to the LHS of the incremental collection set.
void add_eden_region(HeapRegion* hr);
// Add hr to the RHS of the incremental collection set.
void add_survivor_regions(HeapRegion* hr);
#ifndef PRODUCT
void print(HeapRegion* list_head, outputStream* st);
#endif // !PRODUCT
private:
// Update the incremental cset information when adding a region
// (should not be called directly).
void add_young_region_common(HeapRegion* hr);
};
#endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP

View file

@ -26,6 +26,7 @@
#include "gc/g1/concurrentG1Refine.hpp" #include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/concurrentMarkThread.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1IHOPControl.hpp" #include "gc/g1/g1IHOPControl.hpp"
@ -115,23 +116,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_rs_lengths_prediction(0), _rs_lengths_prediction(0),
_max_survivor_regions(0), _max_survivor_regions(0),
_eden_cset_region_length(0),
_survivor_cset_region_length(0),
_old_cset_region_length(0),
_collection_set(NULL),
_collection_set_bytes_used_before(0),
// Incremental CSet attributes
_inc_cset_build_state(Inactive),
_inc_cset_head(NULL),
_inc_cset_tail(NULL),
_inc_cset_bytes_used_before(0),
_inc_cset_recorded_rs_lengths(0),
_inc_cset_recorded_rs_lengths_diffs(0),
_inc_cset_predicted_elapsed_time_ms(0.0),
_inc_cset_predicted_elapsed_time_ms_diffs(0.0),
// add here any more surv rate groups // add here any more surv rate groups
_recorded_survivor_regions(0), _recorded_survivor_regions(0),
_recorded_survivor_head(NULL), _recorded_survivor_head(NULL),
@ -268,8 +252,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
// for the first time during initialization. // for the first time during initialization.
_reserve_regions = 0; _reserve_regions = 0;
_cset_chooser = new CollectionSetChooser();
_ihop_control = create_ihop_control(); _ihop_control = create_ihop_control();
} }
@ -489,6 +471,8 @@ void G1CollectorPolicy::initialize_flags() {
void G1CollectorPolicy::init() { void G1CollectorPolicy::init() {
// Set aside an initial future to_space. // Set aside an initial future to_space.
_g1 = G1CollectedHeap::heap(); _g1 = G1CollectedHeap::heap();
_collection_set = _g1->collection_set();
_collection_set->set_policy(this);
assert(Heap_lock->owned_by_self(), "Locking discipline."); assert(Heap_lock->owned_by_self(), "Locking discipline.");
@ -504,7 +488,7 @@ void G1CollectorPolicy::init() {
update_young_list_max_and_target_length(); update_young_list_max_and_target_length();
// We may immediately start allocating regions and placing them on the // We may immediately start allocating regions and placing them on the
// collection set list. Initialize the per-collection set info // collection set list. Initialize the per-collection set info
start_incremental_cset_building(); _collection_set->start_incremental_building();
} }
void G1CollectorPolicy::note_gc_start(uint num_active_workers) { void G1CollectorPolicy::note_gc_start(uint num_active_workers) {
@ -913,7 +897,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
phase_times()->record_cur_collection_start_sec(start_time_sec); phase_times()->record_cur_collection_start_sec(start_time_sec);
_pending_cards = _g1->pending_card_num(); _pending_cards = _g1->pending_card_num();
_collection_set_bytes_used_before = 0; _collection_set->reset_bytes_used_before();
_bytes_copied_during_gc = 0; _bytes_copied_during_gc = 0;
collector_state()->set_last_gc_was_young(false); collector_state()->set_last_gc_was_young(false);
@ -988,6 +972,10 @@ double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms(); return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
} }
CollectionSetChooser* G1CollectorPolicy::cset_chooser() const {
return _collection_set->cset_chooser();
}
bool G1CollectorPolicy::about_to_start_mixed_phase() const { bool G1CollectorPolicy::about_to_start_mixed_phase() const {
return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc(); return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
} }
@ -1053,7 +1041,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
// given that humongous object allocations do not really affect // given that humongous object allocations do not really affect
// either the pause's duration nor when the next pause will take // either the pause's duration nor when the next pause will take
// place we can safely ignore them here. // place we can safely ignore them here.
uint regions_allocated = eden_cset_region_length(); uint regions_allocated = _collection_set->eden_region_length();
double alloc_rate_ms = (double) regions_allocated / app_time_ms; double alloc_rate_ms = (double) regions_allocated / app_time_ms;
_alloc_rate_ms_seq->add(alloc_rate_ms); _alloc_rate_ms_seq->add(alloc_rate_ms);
@ -1163,13 +1151,14 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
// say, it's in mid-coarsening). So I'll leave in the defensive // say, it's in mid-coarsening). So I'll leave in the defensive
// conditional below just in case. // conditional below just in case.
size_t rs_length_diff = 0; size_t rs_length_diff = 0;
if (_max_rs_lengths > _recorded_rs_lengths) { size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
rs_length_diff = _max_rs_lengths - _recorded_rs_lengths; if (_max_rs_lengths > recorded_rs_lengths) {
rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
} }
_rs_length_diff_seq->add((double) rs_length_diff); _rs_length_diff_seq->add((double) rs_length_diff);
size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes; size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes; size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
double cost_per_byte_ms = 0.0; double cost_per_byte_ms = 0.0;
if (copied_bytes > 0) { if (copied_bytes > 0) {
@ -1181,14 +1170,14 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
} }
} }
if (young_cset_region_length() > 0) { if (_collection_set->young_region_length() > 0) {
_young_other_cost_per_region_ms_seq->add(young_other_time_ms() / _young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
young_cset_region_length()); _collection_set->young_region_length());
} }
if (old_cset_region_length() > 0) { if (_collection_set->old_region_length() > 0) {
_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() / _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
old_cset_region_length()); _collection_set->old_region_length());
} }
_constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms)); _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
@ -1501,17 +1490,6 @@ double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
return region_elapsed_time_ms; return region_elapsed_time_ms;
} }
void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) {
_eden_cset_region_length = eden_cset_region_length;
_survivor_cset_region_length = survivor_cset_region_length;
_old_cset_region_length = 0;
}
void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
_recorded_rs_lengths = rs_lengths;
}
void G1CollectorPolicy::update_recent_gc_times(double end_time_sec, void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
double elapsed_ms) { double elapsed_ms) {
_recent_gc_times_ms->add(elapsed_ms); _recent_gc_times_ms->add(elapsed_ms);
@ -1818,198 +1796,6 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
} }
// Add the heap region at the head of the non-incremental collection set
void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
assert(_inc_cset_build_state == Active, "Precondition");
assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1->register_old_region_with_cset(hr);
hr->set_next_in_collection_set(_collection_set);
_collection_set = hr;
_collection_set_bytes_used_before += hr->used();
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
_old_cset_region_length += 1;
}
// Initialize the per-collection-set information
void G1CollectorPolicy::start_incremental_cset_building() {
assert(_inc_cset_build_state == Inactive, "Precondition");
_inc_cset_head = NULL;
_inc_cset_tail = NULL;
_inc_cset_bytes_used_before = 0;
_inc_cset_recorded_rs_lengths = 0;
_inc_cset_recorded_rs_lengths_diffs = 0;
_inc_cset_predicted_elapsed_time_ms = 0.0;
_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
_inc_cset_build_state = Active;
}
void G1CollectorPolicy::finalize_incremental_cset_building() {
assert(_inc_cset_build_state == Active, "Precondition");
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
// The two "main" fields, _inc_cset_recorded_rs_lengths and
// _inc_cset_predicted_elapsed_time_ms, are updated by the thread
// that adds a new region to the CSet. Further updates by the
// concurrent refinement thread that samples the young RSet lengths
// are accumulated in the *_diffs fields. Here we add the diffs to
// the "main" fields.
if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
_inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
} else {
// This is defensive. The diff should in theory be always positive
// as RSets can only grow between GCs. However, given that we
// sample their size concurrently with other threads updating them
// it's possible that we might get the wrong size back, which
// could make the calculations somewhat inaccurate.
size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
if (_inc_cset_recorded_rs_lengths >= diffs) {
_inc_cset_recorded_rs_lengths -= diffs;
} else {
_inc_cset_recorded_rs_lengths = 0;
}
}
_inc_cset_predicted_elapsed_time_ms +=
_inc_cset_predicted_elapsed_time_ms_diffs;
_inc_cset_recorded_rs_lengths_diffs = 0;
_inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
}
void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
// evacuation pause,
// * adding the current allocation region to the incremental cset
// when it is retired, and
// * updating existing policy information for a region in the
// incremental cset via young list RSet sampling.
// Therefore this routine may be called at a safepoint by the
// VM thread, or in-between safepoints by mutator threads (when
// retiring the current allocation region) or a concurrent
// refine thread (RSet sampling).
double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
size_t used_bytes = hr->used();
_inc_cset_recorded_rs_lengths += rs_length;
_inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
_inc_cset_bytes_used_before += used_bytes;
// Cache the values we have added to the aggregated information
// in the heap region in case we have to remove this region from
// the incremental collection set, or it is updated by the
// rset sampling code
hr->set_recorded_rs_length(rs_length);
hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
}
void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
size_t new_rs_length) {
// Update the CSet information that is dependent on the new RS length
assert(hr->is_young(), "Precondition");
assert(!SafepointSynchronize::is_at_safepoint(),
"should not be at a safepoint");
// We could have updated _inc_cset_recorded_rs_lengths and
// _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
// that atomically, as this code is executed by a concurrent
// refinement thread, potentially concurrently with a mutator thread
// allocating a new region and also updating the same fields. To
// avoid the atomic operations we accumulate these updates on two
// separate fields (*_diffs) and we'll just add them to the "main"
// fields at the start of a GC.
ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
_inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
_inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
hr->set_recorded_rs_length(new_rs_length);
hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
}
void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
assert(hr->is_young(), "invariant");
assert(hr->young_index_in_cset() > -1, "should have already been set");
assert(_inc_cset_build_state == Active, "Precondition");
// We need to clear and set the cached recorded/cached collection set
// information in the heap region here (before the region gets added
// to the collection set). An individual heap region's cached values
// are calculated, aggregated with the policy collection set info,
// and cached in the heap region here (initially) and (subsequently)
// by the Young List sampling code.
size_t rs_length = hr->rem_set()->occupied();
add_to_incremental_cset_info(hr, rs_length);
assert(!hr->in_collection_set(), "invariant");
_g1->register_young_region_with_cset(hr);
assert(hr->next_in_collection_set() == NULL, "invariant");
}
// Add the region at the RHS of the incremental cset
void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
// We should only ever be appending survivors at the end of a pause
assert(hr->is_survivor(), "Logic");
// Do the 'common' stuff
add_region_to_incremental_cset_common(hr);
// Now add the region at the right hand side
if (_inc_cset_tail == NULL) {
assert(_inc_cset_head == NULL, "invariant");
_inc_cset_head = hr;
} else {
_inc_cset_tail->set_next_in_collection_set(hr);
}
_inc_cset_tail = hr;
}
// Add the region to the LHS of the incremental cset
void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
// Survivors should be added to the RHS at the end of a pause
assert(hr->is_eden(), "Logic");
// Do the 'common' stuff
add_region_to_incremental_cset_common(hr);
// Add the region at the left hand side
hr->set_next_in_collection_set(_inc_cset_head);
if (_inc_cset_head == NULL) {
assert(_inc_cset_tail == NULL, "Invariant");
_inc_cset_tail = hr;
}
_inc_cset_head = hr;
}
#ifndef PRODUCT
void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
st->print_cr("\nCollection_set:");
HeapRegion* csr = list_head;
while (csr != NULL) {
HeapRegion* next = csr->next_in_collection_set();
assert(csr->in_collection_set(), "bad CS");
st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
HR_FORMAT_PARAMS(csr),
p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
csr->age_in_surv_rate_group_cond());
csr = next;
}
}
#endif // !PRODUCT
double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const { double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
// Returns the given amount of reclaimable bytes (that represents // Returns the given amount of reclaimable bytes (that represents
// the amount of reclaimable space still to be collected) as a // the amount of reclaimable space still to be collected) as a
@ -2139,161 +1925,8 @@ uint G1CollectorPolicy::calc_max_old_cset_length() const {
return (uint) result; return (uint) result;
} }
void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) {
double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) { double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
double young_start_time_sec = os::elapsedTime(); _collection_set->finalize_old_part(time_remaining_ms);
YoungList* young_list = _g1->young_list();
finalize_incremental_cset_building();
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
guarantee(_collection_set == NULL, "Precondition");
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
_pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
// The young list is laid with the survivor regions from the previous
// pause are appended to the RHS of the young list, i.e.
// [Newly Young Regions ++ Survivors from last pause].
uint survivor_region_length = young_list->survivor_length();
uint eden_region_length = young_list->eden_length();
init_cset_region_lengths(eden_region_length, survivor_region_length);
HeapRegion* hr = young_list->first_survivor_region();
while (hr != NULL) {
assert(hr->is_survivor(), "badly formed young list");
// There is a convention that all the young regions in the CSet
// are tagged as "eden", so we do this for the survivors here. We
// use the special set_eden_pre_gc() as it doesn't check that the
// region is free (which is not the case here).
hr->set_eden_pre_gc();
hr = hr->get_next_young_region();
} }
// Clear the fields that point to the survivor list - they are all young now.
young_list->clear_survivors();
_collection_set = _inc_cset_head;
_collection_set_bytes_used_before = _inc_cset_bytes_used_before;
time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms);
// The number of recorded young regions is the incremental
// collection set's current size
set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
double young_end_time_sec = os::elapsedTime();
phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
return time_remaining_ms;
}
void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
double non_young_start_time_sec = os::elapsedTime();
double predicted_old_time_ms = 0.0;
if (!collector_state()->gcs_are_young()) {
cset_chooser()->verify();
const uint min_old_cset_length = calc_min_old_cset_length();
const uint max_old_cset_length = calc_max_old_cset_length();
uint expensive_region_num = 0;
bool check_time_remaining = adaptive_young_list_length();
HeapRegion* hr = cset_chooser()->peek();
while (hr != NULL) {
if (old_cset_region_length() >= max_old_cset_length) {
// Added maximum number of old regions to the CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
old_cset_region_length(), max_old_cset_length);
break;
}
// Stop adding regions if the remaining reclaimable space is
// not above G1HeapWastePercent.
size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
double threshold = (double) G1HeapWastePercent;
if (reclaimable_perc <= threshold) {
// We've added enough old regions that the amount of uncollected
// reclaimable space is at or below the waste threshold. Stop
// adding old regions to the CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
"old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
break;
}
double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
if (check_time_remaining) {
if (predicted_time_ms > time_remaining_ms) {
// Too expensive for the current CSet.
if (old_cset_region_length() >= min_old_cset_length) {
// We have added the minimum number of old regions to the CSet,
// we are done with this CSet.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
"predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length);
break;
}
// We'll add it anyway given that we haven't reached the
// minimum number of old regions.
expensive_region_num += 1;
}
} else {
if (old_cset_region_length() >= min_old_cset_length) {
// In the non-auto-tuning case, we'll finish adding regions
// to the CSet if we reach the minimum.
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
old_cset_region_length(), min_old_cset_length);
break;
}
}
// We will add this region to the CSet.
time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
predicted_old_time_ms += predicted_time_ms;
cset_chooser()->pop(); // already have region via peek()
_g1->old_set_remove(hr);
add_old_region_to_cset(hr);
hr = cset_chooser()->peek();
}
if (hr == NULL) {
log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
}
if (expensive_region_num > 0) {
// We print the information once here at the end, predicated on
// whether we added any apparently expensive regions or not, to
// avoid generating output per region.
log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
"old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
}
cset_chooser()->verify();
}
stop_incremental_cset_building();
log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
double non_young_end_time_sec = os::elapsedTime();
phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
}

View file

@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP #ifndef SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
#define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP #define SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
#include "gc/g1/collectionSetChooser.hpp"
#include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1InCSetState.hpp" #include "gc/g1/g1InCSetState.hpp"
@ -41,6 +40,7 @@
// * when to collect. // * when to collect.
class HeapRegion; class HeapRegion;
class G1CollectionSet;
class CollectionSetChooser; class CollectionSetChooser;
class G1IHOPControl; class G1IHOPControl;
class G1YoungGenSizer; class G1YoungGenSizer;
@ -66,8 +66,6 @@ class G1CollectorPolicy: public CollectorPolicy {
void initialize_alignments(); void initialize_alignments();
void initialize_flags(); void initialize_flags();
CollectionSetChooser* _cset_chooser;
double _full_collection_start_sec; double _full_collection_start_sec;
// These exclude marking times. // These exclude marking times.
@ -128,20 +126,8 @@ class G1CollectorPolicy: public CollectorPolicy {
G1YoungGenSizer* _young_gen_sizer; G1YoungGenSizer* _young_gen_sizer;
uint _eden_cset_region_length;
uint _survivor_cset_region_length;
uint _old_cset_region_length;
void init_cset_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length);
uint eden_cset_region_length() const { return _eden_cset_region_length; }
uint survivor_cset_region_length() const { return _survivor_cset_region_length; }
uint old_cset_region_length() const { return _old_cset_region_length; }
uint _free_regions_at_end_of_collection; uint _free_regions_at_end_of_collection;
size_t _recorded_rs_lengths;
size_t _max_rs_lengths; size_t _max_rs_lengths;
size_t _rs_lengths_prediction; size_t _rs_lengths_prediction;
@ -229,13 +215,6 @@ public:
size_t predict_bytes_to_copy(HeapRegion* hr) const; size_t predict_bytes_to_copy(HeapRegion* hr) const;
double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const; double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
void set_recorded_rs_lengths(size_t rs_lengths);
uint cset_region_length() const { return young_cset_region_length() +
old_cset_region_length(); }
uint young_cset_region_length() const { return eden_cset_region_length() +
survivor_cset_region_length(); }
double predict_survivor_regions_evac_time() const; double predict_survivor_regions_evac_time() const;
bool should_update_surv_rate_group_predictors() { bool should_update_surv_rate_group_predictors() {
@ -274,6 +253,7 @@ public:
double accum_yg_surv_rate_pred(int age) const; double accum_yg_surv_rate_pred(int age) const;
protected: protected:
G1CollectionSet* _collection_set;
virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const; virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
virtual double other_time_ms(double pause_time_ms) const; virtual double other_time_ms(double pause_time_ms) const;
@ -281,10 +261,7 @@ protected:
double non_young_other_time_ms() const; double non_young_other_time_ms() const;
double constant_other_time_ms(double pause_time_ms) const; double constant_other_time_ms(double pause_time_ms) const;
CollectionSetChooser* cset_chooser() const { CollectionSetChooser* cset_chooser() const;
return _cset_chooser;
}
private: private:
// Statistics kept per GC stoppage, pause or full. // Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec; TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
@ -292,65 +269,9 @@ private:
// Add a new GC of the given duration and end time to the record. // Add a new GC of the given duration and end time to the record.
void update_recent_gc_times(double end_time_sec, double elapsed_ms); void update_recent_gc_times(double end_time_sec, double elapsed_ms);
// The head of the list (via "next_in_collection_set()") representing the
// current collection set. Set from the incrementally built collection
// set at the start of the pause.
HeapRegion* _collection_set;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause, and incremented in finalize_old_cset_part() when adding old regions
// (if any) to the collection set.
size_t _collection_set_bytes_used_before;
// The number of bytes copied during the GC. // The number of bytes copied during the GC.
size_t _bytes_copied_during_gc; size_t _bytes_copied_during_gc;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
enum CSetBuildType {
Active, // We are actively building the collection set
Inactive // We are not actively building the collection set
};
CSetBuildType _inc_cset_build_state;
// The head of the incrementally built collection set.
HeapRegion* _inc_cset_head;
// The tail of the incrementally built collection set.
HeapRegion* _inc_cset_tail;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
size_t _inc_cset_bytes_used_before;
// The RSet lengths recorded for regions in the CSet. It is updated
// by the thread that adds a new region to the CSet. We assume that
// only one thread can be allocating a new CSet region (currently,
// it does so after taking the Heap_lock) hence no need to
// synchronize updates to this field.
size_t _inc_cset_recorded_rs_lengths;
// A concurrent refinement thread periodically samples the young
// region RSets and needs to update _inc_cset_recorded_rs_lengths as
// the RSets grow. Instead of having to synchronize updates to that
// field we accumulate them in this field and add it to
// _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
ssize_t _inc_cset_recorded_rs_lengths_diffs;
// The predicted elapsed time it will take to collect the regions in
// the CSet. This is updated by the thread that adds a new region to
// the CSet. See the comment for _inc_cset_recorded_rs_lengths about
// MT-safety assumptions.
double _inc_cset_predicted_elapsed_time_ms;
// See the comment for _inc_cset_recorded_rs_lengths_diffs.
double _inc_cset_predicted_elapsed_time_ms_diffs;
// Stash a pointer to the g1 heap. // Stash a pointer to the g1 heap.
G1CollectedHeap* _g1; G1CollectedHeap* _g1;
@ -424,6 +345,9 @@ private:
bool predict_will_fit(uint young_length, double base_time_ms, bool predict_will_fit(uint young_length, double base_time_ms,
uint base_free_regions, double target_pause_time_ms) const; uint base_free_regions, double target_pause_time_ms) const;
public:
size_t pending_cards() const { return _pending_cards; }
// Calculate the minimum number of old regions we'll add to the CSet // Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC. // during a mixed GC.
uint calc_min_old_cset_length() const; uint calc_min_old_cset_length() const;
@ -436,6 +360,7 @@ private:
// as a percentage of the current heap capacity. // as a percentage of the current heap capacity.
double reclaimable_bytes_perc(size_t reclaimable_bytes) const; double reclaimable_bytes_perc(size_t reclaimable_bytes) const;
private:
// Sets up marking if proper conditions are met. // Sets up marking if proper conditions are met.
void maybe_start_marking(); void maybe_start_marking();
@ -520,83 +445,20 @@ public:
return _bytes_copied_during_gc; return _bytes_copied_during_gc;
} }
size_t collection_set_bytes_used_before() const {
return _collection_set_bytes_used_before;
}
// Determine whether there are candidate regions so that the // Determine whether there are candidate regions so that the
// next GC should be mixed. The two action strings are used // next GC should be mixed. The two action strings are used
// in the ergo output when the method returns true or false. // in the ergo output when the method returns true or false.
bool next_gc_should_be_mixed(const char* true_action_str, bool next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str) const; const char* false_action_str) const;
// Choose a new collection set. Marks the chosen regions as being virtual void finalize_collection_set(double target_pause_time_ms);
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
double finalize_young_cset_part(double target_pause_time_ms);
virtual void finalize_old_cset_part(double time_remaining_ms);
// The head of the list (via "next_in_collection_set()") representing the
// current collection set.
HeapRegion* collection_set() { return _collection_set; }
void clear_collection_set() { _collection_set = NULL; }
// Add old region "hr" to the CSet.
void add_old_region_to_cset(HeapRegion* hr);
// Incremental CSet Support
// The head of the incrementally built collection set.
HeapRegion* inc_cset_head() { return _inc_cset_head; }
// The tail of the incrementally built collection set.
HeapRegion* inc_set_tail() { return _inc_cset_tail; }
// Initialize incremental collection set info.
void start_incremental_cset_building();
// Perform any final calculations on the incremental CSet fields
// before we can use them.
void finalize_incremental_cset_building();
void clear_incremental_cset() {
_inc_cset_head = NULL;
_inc_cset_tail = NULL;
}
// Stop adding regions to the incremental collection set
void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
// Add information about hr to the aggregated information for the
// incrementally built collection set.
void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
private: private:
// Update the incremental cset information when adding a region
// (should not be called directly).
void add_region_to_incremental_cset_common(HeapRegion* hr);
// Set the state to start a concurrent marking cycle and clear // Set the state to start a concurrent marking cycle and clear
// _initiate_conc_mark_if_possible because it has now been // _initiate_conc_mark_if_possible because it has now been
// acted on. // acted on.
void initiate_conc_mark(); void initiate_conc_mark();
public: public:
// Add hr to the LHS of the incremental collection set.
void add_region_to_incremental_cset_lhs(HeapRegion* hr);
// Add hr to the RHS of the incremental collection set.
void add_region_to_incremental_cset_rhs(HeapRegion* hr);
#ifndef PRODUCT
void print_collection_set(HeapRegion* list_head, outputStream* st);
#endif // !PRODUCT
// This sets the initiate_conc_mark_if_possible() flag to start a // This sets the initiate_conc_mark_if_possible() flag to start a
// new cycle, as long as we are not already in one. It's best if it // new cycle, as long as we are not already in one. It's best if it
// is called during a safepoint when the test whether a cycle is in // is called during a safepoint when the test whether a cycle is in

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,9 @@
#ifndef SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP #ifndef SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
#define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP #define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
#include "utilities/globalDefinitions.hpp"
#include "gc/g1/g1YCTypes.hpp" #include "gc/g1/g1YCTypes.hpp"
#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
// Various state variables that indicate // Various state variables that indicate
// the phase of the G1 collection. // the phase of the G1 collection.

View file

@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1Allocator.inline.hpp" #include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp" #include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootClosures.hpp"
@ -80,7 +81,7 @@ void G1ParScanThreadState::flush(size_t* surviving_young_words) {
_plab_allocator->flush_and_retire_stats(); _plab_allocator->flush_and_retire_stats();
_g1h->g1_policy()->record_age_table(&_age_table); _g1h->g1_policy()->record_age_table(&_age_table);
uint length = _g1h->g1_policy()->young_cset_region_length(); uint length = _g1h->collection_set()->young_region_length();
for (uint region_index = 0; region_index < length; region_index++) { for (uint region_index = 0; region_index < length; region_index++) {
surviving_young_words[region_index] += _surviving_young_words[region_index]; surviving_young_words[region_index] += _surviving_young_words[region_index];
} }

View file

@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp" #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
#include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/heapRegionRemSet.hpp"
@ -114,7 +115,7 @@ void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
// retired as the current allocation region). // retired as the current allocation region).
if (hr->in_collection_set()) { if (hr->in_collection_set()) {
// Update the collection set policy information for this region // Update the collection set policy information for this region
g1p->update_incremental_cset_info(hr, rs_length); g1h->collection_set()->update_young_region_prediction(hr, rs_length);
} }
++regions_visited; ++regions_visited;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/heapRegion.hpp" #include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegion.inline.hpp"
@ -153,7 +154,7 @@ YoungList::reset_auxilary_lists() {
// The region is a non-empty survivor so let's add it to // The region is a non-empty survivor so let's add it to
// the incremental collection set for the next evacuation // the incremental collection set for the next evacuation
// pause. // pause.
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr); _g1h->collection_set()->add_survivor_regions(curr);
young_index_in_cset += 1; young_index_in_cset += 1;
} }
assert((uint) young_index_in_cset == _survivor_length, "post-condition"); assert((uint) young_index_in_cset == _survivor_length, "post-condition");