mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 23:34:52 +02:00
8034246: remove CMS and ParNew adaptive size policy code
Reviewed-by: tschatzl, jwilhelm, mgerdin
This commit is contained in:
parent
aec070cb69
commit
900ca33ab0
21 changed files with 29 additions and 3651 deletions
File diff suppressed because it is too large
Load diff
|
@ -1,477 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
|
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
|
|
||||||
|
|
||||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
|
||||||
#include "runtime/timer.hpp"
|
|
||||||
|
|
||||||
// This class keeps statistical information and computes the
|
|
||||||
// size of the heap for the concurrent mark sweep collector.
|
|
||||||
//
|
|
||||||
// Cost for garbage collector include cost for
|
|
||||||
// minor collection
|
|
||||||
// concurrent collection
|
|
||||||
// stop-the-world component
|
|
||||||
// concurrent component
|
|
||||||
// major compacting collection
|
|
||||||
// uses decaying cost
|
|
||||||
|
|
||||||
// Forward decls
|
|
||||||
class elapsedTimer;
|
|
||||||
|
|
||||||
class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
|
|
||||||
friend class CMSGCAdaptivePolicyCounters;
|
|
||||||
friend class CMSCollector;
|
|
||||||
private:
|
|
||||||
|
|
||||||
// Total number of processors available
|
|
||||||
int _processor_count;
|
|
||||||
// Number of processors used by the concurrent phases of GC
|
|
||||||
// This number is assumed to be the same for all concurrent
|
|
||||||
// phases.
|
|
||||||
int _concurrent_processor_count;
|
|
||||||
|
|
||||||
// Time that the mutators run exclusive of a particular
|
|
||||||
// phase. For example, the time the mutators run excluding
|
|
||||||
// the time during which the cms collector runs concurrently
|
|
||||||
// with the mutators.
|
|
||||||
// Between end of most recent cms reset and start of initial mark
|
|
||||||
// This may be redundant
|
|
||||||
double _latest_cms_reset_end_to_initial_mark_start_secs;
|
|
||||||
// Between end of the most recent initial mark and start of remark
|
|
||||||
double _latest_cms_initial_mark_end_to_remark_start_secs;
|
|
||||||
// Between end of most recent collection and start of
|
|
||||||
// a concurrent collection
|
|
||||||
double _latest_cms_collection_end_to_collection_start_secs;
|
|
||||||
// Times of the concurrent phases of the most recent
|
|
||||||
// concurrent collection
|
|
||||||
double _latest_cms_concurrent_marking_time_secs;
|
|
||||||
double _latest_cms_concurrent_precleaning_time_secs;
|
|
||||||
double _latest_cms_concurrent_sweeping_time_secs;
|
|
||||||
// Between end of most recent STW MSC and start of next STW MSC
|
|
||||||
double _latest_cms_msc_end_to_msc_start_time_secs;
|
|
||||||
// Between end of most recent MS and start of next MS
|
|
||||||
// This does not include any time spent during a concurrent
|
|
||||||
// collection.
|
|
||||||
double _latest_cms_ms_end_to_ms_start;
|
|
||||||
// Between start and end of the initial mark of the most recent
|
|
||||||
// concurrent collection.
|
|
||||||
double _latest_cms_initial_mark_start_to_end_time_secs;
|
|
||||||
// Between start and end of the remark phase of the most recent
|
|
||||||
// concurrent collection
|
|
||||||
double _latest_cms_remark_start_to_end_time_secs;
|
|
||||||
// Between start and end of the most recent MS STW marking phase
|
|
||||||
double _latest_cms_ms_marking_start_to_end_time_secs;
|
|
||||||
|
|
||||||
// Pause time timers
|
|
||||||
static elapsedTimer _STW_timer;
|
|
||||||
// Concurrent collection timer. Used for total of all concurrent phases
|
|
||||||
// during 1 collection cycle.
|
|
||||||
static elapsedTimer _concurrent_timer;
|
|
||||||
|
|
||||||
// When the size of the generation is changed, the size
|
|
||||||
// of the change will rounded up or down (depending on the
|
|
||||||
// type of change) by this value.
|
|
||||||
size_t _generation_alignment;
|
|
||||||
|
|
||||||
// If this variable is true, the size of the young generation
|
|
||||||
// may be changed in order to reduce the pause(s) of the
|
|
||||||
// collection of the tenured generation in order to meet the
|
|
||||||
// pause time goal. It is common to change the size of the
|
|
||||||
// tenured generation in order to meet the pause time goal
|
|
||||||
// for the tenured generation. With the CMS collector for
|
|
||||||
// the tenured generation, the size of the young generation
|
|
||||||
// can have an significant affect on the pause times for collecting the
|
|
||||||
// tenured generation.
|
|
||||||
// This is a duplicate of a variable in PSAdaptiveSizePolicy. It
|
|
||||||
// is duplicated because it is not clear that it is general enough
|
|
||||||
// to go into AdaptiveSizePolicy.
|
|
||||||
int _change_young_gen_for_maj_pauses;
|
|
||||||
|
|
||||||
// Variable that is set to true after a collection.
|
|
||||||
bool _first_after_collection;
|
|
||||||
|
|
||||||
// Fraction of collections that are of each type
|
|
||||||
double concurrent_fraction() const;
|
|
||||||
double STW_msc_fraction() const;
|
|
||||||
double STW_ms_fraction() const;
|
|
||||||
|
|
||||||
// This call cannot be put into the epilogue as long as some
|
|
||||||
// of the counters can be set during concurrent phases.
|
|
||||||
virtual void clear_generation_free_space_flags();
|
|
||||||
|
|
||||||
void set_first_after_collection() { _first_after_collection = true; }
|
|
||||||
|
|
||||||
protected:
|
|
||||||
// Average of the sum of the concurrent times for
|
|
||||||
// one collection in seconds.
|
|
||||||
AdaptiveWeightedAverage* _avg_concurrent_time;
|
|
||||||
// Average time between concurrent collections in seconds.
|
|
||||||
AdaptiveWeightedAverage* _avg_concurrent_interval;
|
|
||||||
// Average cost of the concurrent part of a collection
|
|
||||||
// in seconds.
|
|
||||||
AdaptiveWeightedAverage* _avg_concurrent_gc_cost;
|
|
||||||
|
|
||||||
// Average of the initial pause of a concurrent collection in seconds.
|
|
||||||
AdaptivePaddedAverage* _avg_initial_pause;
|
|
||||||
// Average of the remark pause of a concurrent collection in seconds.
|
|
||||||
AdaptivePaddedAverage* _avg_remark_pause;
|
|
||||||
|
|
||||||
// Average of the stop-the-world (STW) (initial mark + remark)
|
|
||||||
// times in seconds for concurrent collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_cms_STW_time;
|
|
||||||
// Average of the STW collection cost for concurrent collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_cms_STW_gc_cost;
|
|
||||||
|
|
||||||
// Average of the bytes free at the start of the sweep.
|
|
||||||
AdaptiveWeightedAverage* _avg_cms_free_at_sweep;
|
|
||||||
// Average of the bytes free at the end of the collection.
|
|
||||||
AdaptiveWeightedAverage* _avg_cms_free;
|
|
||||||
// Average of the bytes promoted between cms collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_cms_promo;
|
|
||||||
|
|
||||||
// stop-the-world (STW) mark-sweep-compact
|
|
||||||
// Average of the pause time in seconds for STW mark-sweep-compact
|
|
||||||
// collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_msc_pause;
|
|
||||||
// Average of the interval in seconds between STW mark-sweep-compact
|
|
||||||
// collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_msc_interval;
|
|
||||||
// Average of the collection costs for STW mark-sweep-compact
|
|
||||||
// collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_msc_gc_cost;
|
|
||||||
|
|
||||||
// Averages for mark-sweep collections.
|
|
||||||
// The collection may have started as a background collection
|
|
||||||
// that completes in a stop-the-world (STW) collection.
|
|
||||||
// Average of the pause time in seconds for mark-sweep
|
|
||||||
// collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_ms_pause;
|
|
||||||
// Average of the interval in seconds between mark-sweep
|
|
||||||
// collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_ms_interval;
|
|
||||||
// Average of the collection costs for mark-sweep
|
|
||||||
// collections.
|
|
||||||
AdaptiveWeightedAverage* _avg_ms_gc_cost;
|
|
||||||
|
|
||||||
// These variables contain a linear fit of
|
|
||||||
// a generation size as the independent variable
|
|
||||||
// and a pause time as the dependent variable.
|
|
||||||
// For example _remark_pause_old_estimator
|
|
||||||
// is a fit of the old generation size as the
|
|
||||||
// independent variable and the remark pause
|
|
||||||
// as the dependent variable.
|
|
||||||
// remark pause time vs. cms gen size
|
|
||||||
LinearLeastSquareFit* _remark_pause_old_estimator;
|
|
||||||
// initial pause time vs. cms gen size
|
|
||||||
LinearLeastSquareFit* _initial_pause_old_estimator;
|
|
||||||
// remark pause time vs. young gen size
|
|
||||||
LinearLeastSquareFit* _remark_pause_young_estimator;
|
|
||||||
// initial pause time vs. young gen size
|
|
||||||
LinearLeastSquareFit* _initial_pause_young_estimator;
|
|
||||||
|
|
||||||
// Accessors
|
|
||||||
int processor_count() const { return _processor_count; }
|
|
||||||
int concurrent_processor_count() const { return _concurrent_processor_count; }
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_concurrent_time() const {
|
|
||||||
return _avg_concurrent_time;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_concurrent_interval() const {
|
|
||||||
return _avg_concurrent_interval;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_concurrent_gc_cost() const {
|
|
||||||
return _avg_concurrent_gc_cost;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_cms_STW_time() const {
|
|
||||||
return _avg_cms_STW_time;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_cms_STW_gc_cost() const {
|
|
||||||
return _avg_cms_STW_gc_cost;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptivePaddedAverage* avg_initial_pause() const {
|
|
||||||
return _avg_initial_pause;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptivePaddedAverage* avg_remark_pause() const {
|
|
||||||
return _avg_remark_pause;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_cms_free() const {
|
|
||||||
return _avg_cms_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_cms_free_at_sweep() const {
|
|
||||||
return _avg_cms_free_at_sweep;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_msc_pause() const {
|
|
||||||
return _avg_msc_pause;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_msc_interval() const {
|
|
||||||
return _avg_msc_interval;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_msc_gc_cost() const {
|
|
||||||
return _avg_msc_gc_cost;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_ms_pause() const {
|
|
||||||
return _avg_ms_pause;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_ms_interval() const {
|
|
||||||
return _avg_ms_interval;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_ms_gc_cost() const {
|
|
||||||
return _avg_ms_gc_cost;
|
|
||||||
}
|
|
||||||
|
|
||||||
LinearLeastSquareFit* remark_pause_old_estimator() {
|
|
||||||
return _remark_pause_old_estimator;
|
|
||||||
}
|
|
||||||
LinearLeastSquareFit* initial_pause_old_estimator() {
|
|
||||||
return _initial_pause_old_estimator;
|
|
||||||
}
|
|
||||||
LinearLeastSquareFit* remark_pause_young_estimator() {
|
|
||||||
return _remark_pause_young_estimator;
|
|
||||||
}
|
|
||||||
LinearLeastSquareFit* initial_pause_young_estimator() {
|
|
||||||
return _initial_pause_young_estimator;
|
|
||||||
}
|
|
||||||
|
|
||||||
// These *slope() methods return the slope
|
|
||||||
// m for the linear fit of an independent
|
|
||||||
// variable vs. a dependent variable. For
|
|
||||||
// example
|
|
||||||
// remark_pause = m * old_generation_size + c
|
|
||||||
// These may be used to determine if an
|
|
||||||
// adjustment should be made to achieve a goal.
|
|
||||||
// For example, if remark_pause_old_slope() is
|
|
||||||
// positive, a reduction of the old generation
|
|
||||||
// size has on average resulted in the reduction
|
|
||||||
// of the remark pause.
|
|
||||||
float remark_pause_old_slope() {
|
|
||||||
return _remark_pause_old_estimator->slope();
|
|
||||||
}
|
|
||||||
|
|
||||||
float initial_pause_old_slope() {
|
|
||||||
return _initial_pause_old_estimator->slope();
|
|
||||||
}
|
|
||||||
|
|
||||||
float remark_pause_young_slope() {
|
|
||||||
return _remark_pause_young_estimator->slope();
|
|
||||||
}
|
|
||||||
|
|
||||||
float initial_pause_young_slope() {
|
|
||||||
return _initial_pause_young_estimator->slope();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update estimators
|
|
||||||
void update_minor_pause_old_estimator(double minor_pause_in_ms);
|
|
||||||
|
|
||||||
// Fraction of processors used by the concurrent phases.
|
|
||||||
double concurrent_processor_fraction();
|
|
||||||
|
|
||||||
// Returns the total times for the concurrent part of the
|
|
||||||
// latest collection in seconds.
|
|
||||||
double concurrent_collection_time();
|
|
||||||
|
|
||||||
// Return the total times for the concurrent part of the
|
|
||||||
// latest collection in seconds where the times of the various
|
|
||||||
// concurrent phases are scaled by the processor fraction used
|
|
||||||
// during the phase.
|
|
||||||
double scaled_concurrent_collection_time();
|
|
||||||
|
|
||||||
// Dimensionless concurrent GC cost for all the concurrent phases.
|
|
||||||
double concurrent_collection_cost(double interval_in_seconds);
|
|
||||||
|
|
||||||
// Dimensionless GC cost
|
|
||||||
double collection_cost(double pause_in_seconds, double interval_in_seconds);
|
|
||||||
|
|
||||||
virtual GCPolicyKind kind() const { return _gc_cms_adaptive_size_policy; }
|
|
||||||
|
|
||||||
virtual double time_since_major_gc() const;
|
|
||||||
|
|
||||||
// This returns the maximum average for the concurrent, ms, and
|
|
||||||
// msc collections. This is meant to be used for the calculation
|
|
||||||
// of the decayed major gc cost and is not in general the
|
|
||||||
// average of all the different types of major collections.
|
|
||||||
virtual double major_gc_interval_average_for_decay() const;
|
|
||||||
|
|
||||||
public:
|
|
||||||
CMSAdaptiveSizePolicy(size_t init_eden_size,
|
|
||||||
size_t init_promo_size,
|
|
||||||
size_t init_survivor_size,
|
|
||||||
double max_gc_minor_pause_sec,
|
|
||||||
double max_gc_pause_sec,
|
|
||||||
uint gc_cost_ratio);
|
|
||||||
|
|
||||||
// The timers for the stop-the-world phases measure a total
|
|
||||||
// stop-the-world time. The timer is started and stopped
|
|
||||||
// for each phase but is only reset after the final checkpoint.
|
|
||||||
void checkpoint_roots_initial_begin();
|
|
||||||
void checkpoint_roots_initial_end(GCCause::Cause gc_cause);
|
|
||||||
void checkpoint_roots_final_begin();
|
|
||||||
void checkpoint_roots_final_end(GCCause::Cause gc_cause);
|
|
||||||
|
|
||||||
// Methods for gathering information about the
|
|
||||||
// concurrent marking phase of the collection.
|
|
||||||
// Records the mutator times and
|
|
||||||
// resets the concurrent timer.
|
|
||||||
void concurrent_marking_begin();
|
|
||||||
// Resets concurrent phase timer in the begin methods and
|
|
||||||
// saves the time for a phase in the end methods.
|
|
||||||
void concurrent_marking_end();
|
|
||||||
void concurrent_sweeping_begin();
|
|
||||||
void concurrent_sweeping_end();
|
|
||||||
// Similar to the above (e.g., concurrent_marking_end()) and
|
|
||||||
// is used for both the precleaning an abortable precleaning
|
|
||||||
// phases.
|
|
||||||
void concurrent_precleaning_begin();
|
|
||||||
void concurrent_precleaning_end();
|
|
||||||
// Stops the concurrent phases time. Gathers
|
|
||||||
// information and resets the timer.
|
|
||||||
void concurrent_phases_end(GCCause::Cause gc_cause,
|
|
||||||
size_t cur_eden,
|
|
||||||
size_t cur_promo);
|
|
||||||
|
|
||||||
// Methods for gather information about STW Mark-Sweep-Compact
|
|
||||||
void msc_collection_begin();
|
|
||||||
void msc_collection_end(GCCause::Cause gc_cause);
|
|
||||||
|
|
||||||
// Methods for gather information about Mark-Sweep done
|
|
||||||
// in the foreground.
|
|
||||||
void ms_collection_begin();
|
|
||||||
void ms_collection_end(GCCause::Cause gc_cause);
|
|
||||||
|
|
||||||
// Cost for a mark-sweep tenured gen collection done in the foreground
|
|
||||||
double ms_gc_cost() const {
|
|
||||||
return MAX2(0.0F, _avg_ms_gc_cost->average());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cost of collecting the tenured generation. Includes
|
|
||||||
// concurrent collection and STW collection costs
|
|
||||||
double cms_gc_cost() const;
|
|
||||||
|
|
||||||
// Cost of STW mark-sweep-compact tenured gen collection.
|
|
||||||
double msc_gc_cost() const {
|
|
||||||
return MAX2(0.0F, _avg_msc_gc_cost->average());
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
double compacting_gc_cost() const {
|
|
||||||
double result = MIN2(1.0, minor_gc_cost() + msc_gc_cost());
|
|
||||||
assert(result >= 0.0, "Both minor and major costs are non-negative");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restarts the concurrent phases timer.
|
|
||||||
void concurrent_phases_resume();
|
|
||||||
|
|
||||||
// Time beginning and end of the marking phase for
|
|
||||||
// a synchronous MS collection. A MS collection
|
|
||||||
// that finishes in the foreground can have started
|
|
||||||
// in the background. These methods capture the
|
|
||||||
// completion of the marking (after the initial
|
|
||||||
// marking) that is done in the foreground.
|
|
||||||
void ms_collection_marking_begin();
|
|
||||||
void ms_collection_marking_end(GCCause::Cause gc_cause);
|
|
||||||
|
|
||||||
static elapsedTimer* concurrent_timer_ptr() {
|
|
||||||
return &_concurrent_timer;
|
|
||||||
}
|
|
||||||
|
|
||||||
AdaptiveWeightedAverage* avg_cms_promo() const {
|
|
||||||
return _avg_cms_promo;
|
|
||||||
}
|
|
||||||
|
|
||||||
int change_young_gen_for_maj_pauses() {
|
|
||||||
return _change_young_gen_for_maj_pauses;
|
|
||||||
}
|
|
||||||
void set_change_young_gen_for_maj_pauses(int v) {
|
|
||||||
_change_young_gen_for_maj_pauses = v;
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear_internal_time_intervals();
|
|
||||||
|
|
||||||
|
|
||||||
// Either calculated_promo_size_in_bytes() or promo_size()
|
|
||||||
// should be deleted.
|
|
||||||
size_t promo_size() { return _promo_size; }
|
|
||||||
void set_promo_size(size_t v) { _promo_size = v; }
|
|
||||||
|
|
||||||
// Cost of GC for all types of collections.
|
|
||||||
virtual double gc_cost() const;
|
|
||||||
|
|
||||||
size_t generation_alignment() { return _generation_alignment; }
|
|
||||||
|
|
||||||
virtual void compute_eden_space_size(size_t cur_eden,
|
|
||||||
size_t max_eden_size);
|
|
||||||
// Calculates new survivor space size; returns a new tenuring threshold
|
|
||||||
// value. Stores new survivor size in _survivor_size.
|
|
||||||
virtual uint compute_survivor_space_size_and_threshold(
|
|
||||||
bool is_survivor_overflow,
|
|
||||||
uint tenuring_threshold,
|
|
||||||
size_t survivor_limit);
|
|
||||||
|
|
||||||
virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
|
|
||||||
size_t max_tenured_available,
|
|
||||||
size_t cur_eden);
|
|
||||||
|
|
||||||
size_t eden_decrement_aligned_down(size_t cur_eden);
|
|
||||||
size_t eden_increment_aligned_up(size_t cur_eden);
|
|
||||||
|
|
||||||
size_t adjust_eden_for_pause_time(size_t cur_eden);
|
|
||||||
size_t adjust_eden_for_throughput(size_t cur_eden);
|
|
||||||
size_t adjust_eden_for_footprint(size_t cur_eden);
|
|
||||||
|
|
||||||
size_t promo_decrement_aligned_down(size_t cur_promo);
|
|
||||||
size_t promo_increment_aligned_up(size_t cur_promo);
|
|
||||||
|
|
||||||
size_t adjust_promo_for_pause_time(size_t cur_promo);
|
|
||||||
size_t adjust_promo_for_throughput(size_t cur_promo);
|
|
||||||
size_t adjust_promo_for_footprint(size_t cur_promo, size_t cur_eden);
|
|
||||||
|
|
||||||
// Scale down the input size by the ratio of the cost to collect the
|
|
||||||
// generation to the total GC cost.
|
|
||||||
size_t scale_by_gen_gc_cost(size_t base_change, double gen_gc_cost);
|
|
||||||
|
|
||||||
// Return the value and clear it.
|
|
||||||
bool get_and_clear_first_after_collection();
|
|
||||||
|
|
||||||
// Printing support
|
|
||||||
virtual bool print_adaptive_size_policy_on(outputStream* st) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
|
|
|
@ -23,9 +23,8 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
|
||||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
||||||
#include "gc_implementation/shared/gcPolicyCounters.hpp"
|
#include "gc_implementation/shared/gcPolicyCounters.hpp"
|
||||||
#include "gc_implementation/shared/vmGCOperations.hpp"
|
#include "gc_implementation/shared/vmGCOperations.hpp"
|
||||||
|
@ -57,25 +56,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||||
if (_generations == NULL)
|
if (_generations == NULL)
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||||
|
|
||||||
if (UseParNewGC) {
|
Generation::Name yg_name =
|
||||||
if (UseAdaptiveSizePolicy) {
|
UseParNewGC ? Generation::ParNew : Generation::DefNew;
|
||||||
_generations[0] = new GenerationSpec(Generation::ASParNew,
|
_generations[0] = new GenerationSpec(yg_name, _initial_young_size,
|
||||||
_initial_young_size, _max_young_size);
|
_max_young_size);
|
||||||
} else {
|
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
|
||||||
_generations[0] = new GenerationSpec(Generation::ParNew,
|
_initial_old_size, _max_old_size);
|
||||||
_initial_young_size, _max_young_size);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_generations[0] = new GenerationSpec(Generation::DefNew,
|
|
||||||
_initial_young_size, _max_young_size);
|
|
||||||
}
|
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
_generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep,
|
|
||||||
_initial_old_size, _max_old_size);
|
|
||||||
} else {
|
|
||||||
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
|
|
||||||
_initial_old_size, _max_old_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_generations[0] == NULL || _generations[1] == NULL) {
|
if (_generations[0] == NULL || _generations[1] == NULL) {
|
||||||
vm_exit_during_initialization("Unable to allocate gen spec");
|
vm_exit_during_initialization("Unable to allocate gen spec");
|
||||||
|
@ -85,14 +71,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
|
||||||
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
|
||||||
size_t init_promo_size,
|
size_t init_promo_size,
|
||||||
size_t init_survivor_size) {
|
size_t init_survivor_size) {
|
||||||
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
|
|
||||||
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
|
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
|
||||||
_size_policy = new CMSAdaptiveSizePolicy(init_eden_size,
|
_size_policy = new AdaptiveSizePolicy(init_eden_size,
|
||||||
init_promo_size,
|
init_promo_size,
|
||||||
init_survivor_size,
|
init_survivor_size,
|
||||||
max_gc_minor_pause_sec,
|
max_gc_pause_sec,
|
||||||
max_gc_pause_sec,
|
GCTimeRatio);
|
||||||
GCTimeRatio);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
||||||
|
@ -110,22 +94,3 @@ bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
|
||||||
{
|
{
|
||||||
return CMSIncrementalMode;
|
return CMSIncrementalMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// ASConcurrentMarkSweepPolicy methods
|
|
||||||
//
|
|
||||||
|
|
||||||
void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
|
|
||||||
|
|
||||||
assert(size_policy() != NULL, "A size policy is required");
|
|
||||||
// initialize the policy counters - 2 collectors, 3 generations
|
|
||||||
if (UseParNewGC) {
|
|
||||||
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
|
|
||||||
size_policy());
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
|
|
||||||
size_policy());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -47,19 +47,4 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
|
||||||
virtual bool has_soft_ended_eden();
|
virtual bool has_soft_ended_eden();
|
||||||
};
|
};
|
||||||
|
|
||||||
class ASConcurrentMarkSweepPolicy : public ConcurrentMarkSweepPolicy {
|
|
||||||
public:
|
|
||||||
|
|
||||||
// Initialize the jstat counters. This method requires a
|
|
||||||
// size policy. The size policy is expected to be created
|
|
||||||
// after the generations are fully initialized so the
|
|
||||||
// initialization of the counters need to be done post
|
|
||||||
// the initialization of the generations.
|
|
||||||
void initialize_gc_policy_counters();
|
|
||||||
|
|
||||||
virtual CollectorPolicy::Name kind() {
|
|
||||||
return CollectorPolicy::ASConcurrentMarkSweepPolicyKind;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
|
||||||
|
|
|
@ -1,303 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
|
||||||
#include "memory/resourceArea.hpp"
|
|
||||||
|
|
||||||
CMSGCAdaptivePolicyCounters::CMSGCAdaptivePolicyCounters(const char* name_arg,
|
|
||||||
int collectors,
|
|
||||||
int generations,
|
|
||||||
AdaptiveSizePolicy* size_policy_arg)
|
|
||||||
: GCAdaptivePolicyCounters(name_arg,
|
|
||||||
collectors,
|
|
||||||
generations,
|
|
||||||
size_policy_arg) {
|
|
||||||
if (UsePerfData) {
|
|
||||||
EXCEPTION_MARK;
|
|
||||||
ResourceMark rm;
|
|
||||||
|
|
||||||
const char* cname =
|
|
||||||
PerfDataManager::counter_name(name_space(), "cmsCapacity");
|
|
||||||
_cms_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Bytes, (jlong) OldSize, CHECK);
|
|
||||||
#ifdef NOT_PRODUCT
|
|
||||||
cname =
|
|
||||||
PerfDataManager::counter_name(name_space(), "initialPause");
|
|
||||||
_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_initial_pause()->last_sample(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "remarkPause");
|
|
||||||
_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_remark_pause()->last_sample(),
|
|
||||||
CHECK);
|
|
||||||
#endif
|
|
||||||
cname =
|
|
||||||
PerfDataManager::counter_name(name_space(), "avgInitialPause");
|
|
||||||
_avg_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_initial_pause()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgRemarkPause");
|
|
||||||
_avg_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_remark_pause()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgSTWGcCost");
|
|
||||||
_avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_cms_STW_gc_cost()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgSTWTime");
|
|
||||||
_avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_cms_STW_time()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentTime");
|
|
||||||
_avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_concurrent_time()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname =
|
|
||||||
PerfDataManager::counter_name(name_space(), "avgConcurrentInterval");
|
|
||||||
_avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_concurrent_interval()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentGcCost");
|
|
||||||
_avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgCMSFreeAtSweep");
|
|
||||||
_avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgCMSFree");
|
|
||||||
_avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_cms_free()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgCMSPromo");
|
|
||||||
_avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_cms_promo()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgMscPause");
|
|
||||||
_avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_msc_pause()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgMscInterval");
|
|
||||||
_avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_msc_interval()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "mscGcCost");
|
|
||||||
_msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_msc_gc_cost()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgMsPause");
|
|
||||||
_avg_ms_pause_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_ms_pause()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgMsInterval");
|
|
||||||
_avg_ms_interval_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_ms_interval()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "msGcCost");
|
|
||||||
_ms_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
|
|
||||||
cname,
|
|
||||||
PerfData::U_Ticks,
|
|
||||||
(jlong) cms_size_policy()->avg_ms_gc_cost()->average(),
|
|
||||||
CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "majorGcCost");
|
|
||||||
_major_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, cname,
|
|
||||||
PerfData::U_Ticks, (jlong) cms_size_policy()->cms_gc_cost(), CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
|
|
||||||
_promoted_avg_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
|
||||||
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
|
|
||||||
_promoted_avg_dev_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
|
||||||
(jlong) 0 , CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
|
|
||||||
_promoted_padded_avg_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
|
||||||
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(),
|
|
||||||
"changeYoungGenForMajPauses");
|
|
||||||
_change_young_gen_for_maj_pauses_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
|
|
||||||
(jlong)0, CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "remarkPauseOldSlope");
|
|
||||||
_remark_pause_old_slope_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
|
||||||
(jlong) cms_size_policy()->remark_pause_old_slope(), CHECK);
|
|
||||||
|
|
||||||
cname = PerfDataManager::counter_name(name_space(), "initialPauseOldSlope");
|
|
||||||
_initial_pause_old_slope_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
|
||||||
(jlong) cms_size_policy()->initial_pause_old_slope(), CHECK);
|
|
||||||
|
|
||||||
cname =
|
|
||||||
PerfDataManager::counter_name(name_space(), "remarkPauseYoungSlope") ;
|
|
||||||
_remark_pause_young_slope_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
|
||||||
(jlong) cms_size_policy()->remark_pause_young_slope(), CHECK);
|
|
||||||
|
|
||||||
cname =
|
|
||||||
PerfDataManager::counter_name(name_space(), "initialPauseYoungSlope");
|
|
||||||
_initial_pause_young_slope_counter =
|
|
||||||
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
|
|
||||||
(jlong) cms_size_policy()->initial_pause_young_slope(), CHECK);
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
assert(size_policy()->is_gc_cms_adaptive_size_policy(),
|
|
||||||
"Wrong type of size policy");
|
|
||||||
}
|
|
||||||
|
|
||||||
void CMSGCAdaptivePolicyCounters::update_counters() {
|
|
||||||
if (UsePerfData) {
|
|
||||||
GCAdaptivePolicyCounters::update_counters_from_policy();
|
|
||||||
update_counters_from_policy();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CMSGCAdaptivePolicyCounters::update_counters(CMSGCStats* gc_stats) {
|
|
||||||
if (UsePerfData) {
|
|
||||||
update_counters();
|
|
||||||
update_promoted((size_t) gc_stats->avg_promoted()->last_sample());
|
|
||||||
update_avg_promoted_avg(gc_stats);
|
|
||||||
update_avg_promoted_dev(gc_stats);
|
|
||||||
update_avg_promoted_padded_avg(gc_stats);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CMSGCAdaptivePolicyCounters::update_counters_from_policy() {
|
|
||||||
if (UsePerfData && (cms_size_policy() != NULL)) {
|
|
||||||
|
|
||||||
GCAdaptivePolicyCounters::update_counters_from_policy();
|
|
||||||
|
|
||||||
update_major_gc_cost_counter();
|
|
||||||
update_mutator_cost_counter();
|
|
||||||
|
|
||||||
update_eden_size();
|
|
||||||
update_promo_size();
|
|
||||||
|
|
||||||
// If these updates from the last_sample() work,
|
|
||||||
// revise the update methods for these counters
|
|
||||||
// (both here and in PS).
|
|
||||||
update_survived((size_t) cms_size_policy()->avg_survived()->last_sample());
|
|
||||||
|
|
||||||
update_avg_concurrent_time_counter();
|
|
||||||
update_avg_concurrent_interval_counter();
|
|
||||||
update_avg_concurrent_gc_cost_counter();
|
|
||||||
#ifdef NOT_PRODUCT
|
|
||||||
update_initial_pause_counter();
|
|
||||||
update_remark_pause_counter();
|
|
||||||
#endif
|
|
||||||
update_avg_initial_pause_counter();
|
|
||||||
update_avg_remark_pause_counter();
|
|
||||||
|
|
||||||
update_avg_cms_STW_time_counter();
|
|
||||||
update_avg_cms_STW_gc_cost_counter();
|
|
||||||
|
|
||||||
update_avg_cms_free_counter();
|
|
||||||
update_avg_cms_free_at_sweep_counter();
|
|
||||||
update_avg_cms_promo_counter();
|
|
||||||
|
|
||||||
update_avg_msc_pause_counter();
|
|
||||||
update_avg_msc_interval_counter();
|
|
||||||
update_msc_gc_cost_counter();
|
|
||||||
|
|
||||||
update_avg_ms_pause_counter();
|
|
||||||
update_avg_ms_interval_counter();
|
|
||||||
update_ms_gc_cost_counter();
|
|
||||||
|
|
||||||
update_avg_old_live_counter();
|
|
||||||
|
|
||||||
update_survivor_size_counters();
|
|
||||||
update_avg_survived_avg_counters();
|
|
||||||
update_avg_survived_dev_counters();
|
|
||||||
|
|
||||||
update_decrement_tenuring_threshold_for_gc_cost();
|
|
||||||
update_increment_tenuring_threshold_for_gc_cost();
|
|
||||||
update_decrement_tenuring_threshold_for_survivor_limit();
|
|
||||||
|
|
||||||
update_change_young_gen_for_maj_pauses();
|
|
||||||
|
|
||||||
update_major_collection_slope_counter();
|
|
||||||
update_remark_pause_old_slope_counter();
|
|
||||||
update_initial_pause_old_slope_counter();
|
|
||||||
update_remark_pause_young_slope_counter();
|
|
||||||
update_initial_pause_young_slope_counter();
|
|
||||||
|
|
||||||
update_decide_at_full_gc_counter();
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,308 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
|
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
|
|
||||||
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
|
||||||
#include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
|
|
||||||
#include "gc_implementation/shared/gcStats.hpp"
|
|
||||||
#include "runtime/perfData.hpp"
|
|
||||||
|
|
||||||
// CMSGCAdaptivePolicyCounters is a holder class for performance counters
|
|
||||||
// that track the data and decisions for the ergonomics policy for the
|
|
||||||
// concurrent mark sweep collector
|
|
||||||
|
|
||||||
class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
|
|
||||||
friend class VMStructs;
|
|
||||||
|
|
||||||
private:
|
|
||||||
|
|
||||||
// Capacity of tenured generation recorded at the end of
|
|
||||||
// any collection.
|
|
||||||
PerfVariable* _cms_capacity_counter; // Make this common with PS _old_capacity
|
|
||||||
|
|
||||||
// Average stop-the-world pause time for both initial and
|
|
||||||
// remark pauses sampled at the end of the checkpointRootsFinalWork.
|
|
||||||
PerfVariable* _avg_cms_STW_time_counter;
|
|
||||||
// Average stop-the-world (STW) GC cost for the STW pause time
|
|
||||||
// _avg_cms_STW_time_counter.
|
|
||||||
PerfVariable* _avg_cms_STW_gc_cost_counter;
|
|
||||||
|
|
||||||
#ifdef NOT_PRODUCT
|
|
||||||
// These are useful to see how the most recent values of these
|
|
||||||
// counters compare to their respective averages but
|
|
||||||
// do not control behavior.
|
|
||||||
PerfVariable* _initial_pause_counter;
|
|
||||||
PerfVariable* _remark_pause_counter;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Average of the initial marking pause for a concurrent collection.
|
|
||||||
PerfVariable* _avg_initial_pause_counter;
|
|
||||||
// Average of the remark pause for a concurrent collection.
|
|
||||||
PerfVariable* _avg_remark_pause_counter;
|
|
||||||
|
|
||||||
// Average for the sum of all the concurrent times per collection.
|
|
||||||
PerfVariable* _avg_concurrent_time_counter;
|
|
||||||
// Average for the time between the most recent end of a
|
|
||||||
// concurrent collection and the beginning of the next
|
|
||||||
// concurrent collection.
|
|
||||||
PerfVariable* _avg_concurrent_interval_counter;
|
|
||||||
// Average of the concurrent GC costs based on _avg_concurrent_time_counter
|
|
||||||
// and _avg_concurrent_interval_counter.
|
|
||||||
PerfVariable* _avg_concurrent_gc_cost_counter;
|
|
||||||
|
|
||||||
// Average of the free space in the tenured generation at the
|
|
||||||
// end of the sweep of the tenured generation.
|
|
||||||
PerfVariable* _avg_cms_free_counter;
|
|
||||||
// Average of the free space in the tenured generation at the
|
|
||||||
// start of the sweep of the tenured generation.
|
|
||||||
PerfVariable* _avg_cms_free_at_sweep_counter;
|
|
||||||
// Average of the free space in the tenured generation at the
|
|
||||||
// after any resizing of the tenured generation at the end
|
|
||||||
// of a collection of the tenured generation.
|
|
||||||
PerfVariable* _avg_cms_promo_counter;
|
|
||||||
|
|
||||||
// Average of the mark-sweep-compact (MSC) pause time for a collection
|
|
||||||
// of the tenured generation.
|
|
||||||
PerfVariable* _avg_msc_pause_counter;
|
|
||||||
// Average for the time between the most recent end of a
|
|
||||||
// MSC collection and the beginning of the next MSC collection.
|
|
||||||
PerfVariable* _avg_msc_interval_counter;
|
|
||||||
// Average for the GC cost of a MSC collection based on
|
|
||||||
// _avg_msc_pause_counter and _avg_msc_interval_counter.
|
|
||||||
PerfVariable* _msc_gc_cost_counter;
|
|
||||||
|
|
||||||
// Average of the mark-sweep (MS) pause time for a collection
|
|
||||||
// of the tenured generation.
|
|
||||||
PerfVariable* _avg_ms_pause_counter;
|
|
||||||
// Average for the time between the most recent end of a
|
|
||||||
// MS collection and the beginning of the next MS collection.
|
|
||||||
PerfVariable* _avg_ms_interval_counter;
|
|
||||||
// Average for the GC cost of a MS collection based on
|
|
||||||
// _avg_ms_pause_counter and _avg_ms_interval_counter.
|
|
||||||
PerfVariable* _ms_gc_cost_counter;
|
|
||||||
|
|
||||||
// Average of the bytes promoted per minor collection.
|
|
||||||
PerfVariable* _promoted_avg_counter;
|
|
||||||
// Average of the deviation of the promoted average.
|
|
||||||
PerfVariable* _promoted_avg_dev_counter;
|
|
||||||
// Padded average of the bytes promoted per minor collection.
|
|
||||||
PerfVariable* _promoted_padded_avg_counter;
|
|
||||||
|
|
||||||
// See description of the _change_young_gen_for_maj_pauses
|
|
||||||
// variable recently in cmsAdaptiveSizePolicy.hpp.
|
|
||||||
PerfVariable* _change_young_gen_for_maj_pauses_counter;
|
|
||||||
|
|
||||||
// See descriptions of _remark_pause_old_slope, _initial_pause_old_slope,
|
|
||||||
// etc. variables recently in cmsAdaptiveSizePolicy.hpp.
|
|
||||||
PerfVariable* _remark_pause_old_slope_counter;
|
|
||||||
PerfVariable* _initial_pause_old_slope_counter;
|
|
||||||
PerfVariable* _remark_pause_young_slope_counter;
|
|
||||||
PerfVariable* _initial_pause_young_slope_counter;
|
|
||||||
|
|
||||||
CMSAdaptiveSizePolicy* cms_size_policy() {
|
|
||||||
assert(_size_policy->kind() ==
|
|
||||||
AdaptiveSizePolicy::_gc_cms_adaptive_size_policy,
|
|
||||||
"Wrong size policy");
|
|
||||||
return (CMSAdaptiveSizePolicy*)_size_policy;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_cms_STW_time_counter() {
|
|
||||||
_avg_cms_STW_time_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_cms_STW_time()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_cms_STW_gc_cost_counter() {
|
|
||||||
_avg_cms_STW_gc_cost_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_cms_STW_gc_cost()->average() * 100.0));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_initial_pause_counter() {
|
|
||||||
_avg_initial_pause_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
#ifdef NOT_PRODUCT
|
|
||||||
inline void update_avg_remark_pause_counter() {
|
|
||||||
_avg_remark_pause_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()-> avg_remark_pause()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_initial_pause_counter() {
|
|
||||||
_initial_pause_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
inline void update_remark_pause_counter() {
|
|
||||||
_remark_pause_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_concurrent_time_counter() {
|
|
||||||
_avg_concurrent_time_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_concurrent_interval_counter() {
|
|
||||||
_avg_concurrent_interval_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_concurrent_interval()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_concurrent_gc_cost_counter() {
|
|
||||||
_avg_concurrent_gc_cost_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_concurrent_gc_cost()->average() * 100.0));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_cms_free_counter() {
|
|
||||||
_avg_cms_free_counter->set_value(
|
|
||||||
(jlong) cms_size_policy()->avg_cms_free()->average());
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_cms_free_at_sweep_counter() {
|
|
||||||
_avg_cms_free_at_sweep_counter->set_value(
|
|
||||||
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average());
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_cms_promo_counter() {
|
|
||||||
_avg_cms_promo_counter->set_value(
|
|
||||||
(jlong) cms_size_policy()->avg_cms_promo()->average());
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_old_live_counter() {
|
|
||||||
_avg_old_live_counter->set_value(
|
|
||||||
(jlong)(cms_size_policy()->avg_old_live()->average())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_msc_pause_counter() {
|
|
||||||
_avg_msc_pause_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_msc_pause()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_msc_interval_counter() {
|
|
||||||
_avg_msc_interval_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_msc_interval()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_msc_gc_cost_counter() {
|
|
||||||
_msc_gc_cost_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_msc_gc_cost()->average() * 100.0));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_ms_pause_counter() {
|
|
||||||
_avg_ms_pause_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_ms_pause()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_ms_interval_counter() {
|
|
||||||
_avg_ms_interval_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_ms_interval()->average() *
|
|
||||||
(double) MILLIUNITS));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_ms_gc_cost_counter() {
|
|
||||||
_ms_gc_cost_counter->set_value(
|
|
||||||
(jlong) (cms_size_policy()->avg_ms_gc_cost()->average() * 100.0));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_major_gc_cost_counter() {
|
|
||||||
_major_gc_cost_counter->set_value(
|
|
||||||
(jlong)(cms_size_policy()->cms_gc_cost() * 100.0)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_mutator_cost_counter() {
|
|
||||||
_mutator_cost_counter->set_value(
|
|
||||||
(jlong)(cms_size_policy()->mutator_cost() * 100.0)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void update_avg_promoted_avg(CMSGCStats* gc_stats) {
|
|
||||||
_promoted_avg_counter->set_value(
|
|
||||||
(jlong)(gc_stats->avg_promoted()->average())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_avg_promoted_dev(CMSGCStats* gc_stats) {
|
|
||||||
_promoted_avg_dev_counter->set_value(
|
|
||||||
(jlong)(gc_stats->avg_promoted()->deviation())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_avg_promoted_padded_avg(CMSGCStats* gc_stats) {
|
|
||||||
_promoted_padded_avg_counter->set_value(
|
|
||||||
(jlong)(gc_stats->avg_promoted()->padded_average())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_remark_pause_old_slope_counter() {
|
|
||||||
_remark_pause_old_slope_counter->set_value(
|
|
||||||
(jlong)(cms_size_policy()->remark_pause_old_slope() * 1000)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_initial_pause_old_slope_counter() {
|
|
||||||
_initial_pause_old_slope_counter->set_value(
|
|
||||||
(jlong)(cms_size_policy()->initial_pause_old_slope() * 1000)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_remark_pause_young_slope_counter() {
|
|
||||||
_remark_pause_young_slope_counter->set_value(
|
|
||||||
(jlong)(cms_size_policy()->remark_pause_young_slope() * 1000)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_initial_pause_young_slope_counter() {
|
|
||||||
_initial_pause_young_slope_counter->set_value(
|
|
||||||
(jlong)(cms_size_policy()->initial_pause_young_slope() * 1000)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
inline void update_change_young_gen_for_maj_pauses() {
|
|
||||||
_change_young_gen_for_maj_pauses_counter->set_value(
|
|
||||||
cms_size_policy()->change_young_gen_for_maj_pauses());
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
CMSGCAdaptivePolicyCounters(const char* name, int collectors, int generations,
|
|
||||||
AdaptiveSizePolicy* size_policy);
|
|
||||||
|
|
||||||
// update counters
|
|
||||||
void update_counters();
|
|
||||||
void update_counters(CMSGCStats* gc_stats);
|
|
||||||
void update_counters_from_policy();
|
|
||||||
|
|
||||||
inline void update_cms_capacity_counter(size_t size_in_bytes) {
|
|
||||||
_cms_capacity_counter->set_value(size_in_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual GCPolicyCounters::Name kind() const {
|
|
||||||
return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
|
|
|
@ -70,7 +70,6 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
|
||||||
class CompactibleFreeListSpace: public CompactibleSpace {
|
class CompactibleFreeListSpace: public CompactibleSpace {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
friend class ConcurrentMarkSweepGeneration;
|
friend class ConcurrentMarkSweepGeneration;
|
||||||
friend class ASConcurrentMarkSweepGeneration;
|
|
||||||
friend class CMSCollector;
|
friend class CMSCollector;
|
||||||
// Local alloc buffer for promotion into this space.
|
// Local alloc buffer for promotion into this space.
|
||||||
friend class CFLS_LAB;
|
friend class CFLS_LAB;
|
||||||
|
|
|
@ -27,9 +27,8 @@
|
||||||
#include "classfile/stringTable.hpp"
|
#include "classfile/stringTable.hpp"
|
||||||
#include "classfile/systemDictionary.hpp"
|
#include "classfile/systemDictionary.hpp"
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
|
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
|
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
|
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
|
||||||
|
@ -319,27 +318,13 @@ void CMSCollector::ref_processor_init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
|
AdaptiveSizePolicy* CMSCollector::size_policy() {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
||||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
||||||
"Wrong type of heap");
|
"Wrong type of heap");
|
||||||
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
|
return gch->gen_policy()->size_policy();
|
||||||
gch->gen_policy()->size_policy();
|
|
||||||
assert(sp->is_gc_cms_adaptive_size_policy(),
|
|
||||||
"Wrong type of size policy");
|
|
||||||
return sp;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
|
|
||||||
CMSGCAdaptivePolicyCounters* results =
|
|
||||||
(CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
|
|
||||||
assert(
|
|
||||||
results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
|
||||||
"Wrong gc policy counter kind");
|
|
||||||
return results;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
|
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
|
||||||
|
|
||||||
const char* gen_name = "old";
|
const char* gen_name = "old";
|
||||||
|
@ -2031,11 +2016,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
||||||
"collections passed to foreground collector", _full_gcs_since_conc_gc);
|
"collections passed to foreground collector", _full_gcs_since_conc_gc);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sample collection interval time and reset for collection pause.
|
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->msc_collection_begin();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Temporarily widen the span of the weak reference processing to
|
// Temporarily widen the span of the weak reference processing to
|
||||||
// the entire heap.
|
// the entire heap.
|
||||||
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
|
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
|
||||||
|
@ -2111,11 +2091,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
|
||||||
_inter_sweep_timer.reset();
|
_inter_sweep_timer.reset();
|
||||||
_inter_sweep_timer.start();
|
_inter_sweep_timer.start();
|
||||||
|
|
||||||
// Sample collection pause time and reset for collection interval.
|
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->msc_collection_end(gch->gc_cause());
|
|
||||||
}
|
|
||||||
|
|
||||||
gc_timer->register_gc_end();
|
gc_timer->register_gc_end();
|
||||||
|
|
||||||
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
|
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
|
||||||
|
@ -2373,26 +2348,14 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case Precleaning:
|
case Precleaning:
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_precleaning_begin();
|
|
||||||
}
|
|
||||||
// marking from roots in markFromRoots has been completed
|
// marking from roots in markFromRoots has been completed
|
||||||
preclean();
|
preclean();
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_precleaning_end();
|
|
||||||
}
|
|
||||||
assert(_collectorState == AbortablePreclean ||
|
assert(_collectorState == AbortablePreclean ||
|
||||||
_collectorState == FinalMarking,
|
_collectorState == FinalMarking,
|
||||||
"Collector state should have changed");
|
"Collector state should have changed");
|
||||||
break;
|
break;
|
||||||
case AbortablePreclean:
|
case AbortablePreclean:
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_phases_resume();
|
|
||||||
}
|
|
||||||
abortable_preclean();
|
abortable_preclean();
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_precleaning_end();
|
|
||||||
}
|
|
||||||
assert(_collectorState == FinalMarking, "Collector state should "
|
assert(_collectorState == FinalMarking, "Collector state should "
|
||||||
"have changed");
|
"have changed");
|
||||||
break;
|
break;
|
||||||
|
@ -2406,23 +2369,12 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
|
||||||
assert(_foregroundGCShouldWait, "block post-condition");
|
assert(_foregroundGCShouldWait, "block post-condition");
|
||||||
break;
|
break;
|
||||||
case Sweeping:
|
case Sweeping:
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_sweeping_begin();
|
|
||||||
}
|
|
||||||
// final marking in checkpointRootsFinal has been completed
|
// final marking in checkpointRootsFinal has been completed
|
||||||
sweep(true);
|
sweep(true);
|
||||||
assert(_collectorState == Resizing, "Collector state change "
|
assert(_collectorState == Resizing, "Collector state change "
|
||||||
"to Resizing must be done under the free_list_lock");
|
"to Resizing must be done under the free_list_lock");
|
||||||
_full_gcs_since_conc_gc = 0;
|
_full_gcs_since_conc_gc = 0;
|
||||||
|
|
||||||
// Stop the timers for adaptive size policy for the concurrent phases
|
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_sweeping_end();
|
|
||||||
size_policy()->concurrent_phases_end(gch->gc_cause(),
|
|
||||||
gch->prev_gen(_cmsGen)->capacity(),
|
|
||||||
_cmsGen->free());
|
|
||||||
}
|
|
||||||
|
|
||||||
case Resizing: {
|
case Resizing: {
|
||||||
// Sweeping has been completed...
|
// Sweeping has been completed...
|
||||||
// At this point the background collection has completed.
|
// At this point the background collection has completed.
|
||||||
|
@ -2539,9 +2491,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
|
||||||
const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
|
const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
|
||||||
NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
|
NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
|
||||||
true, NULL, gc_id);)
|
true, NULL, gc_id);)
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->ms_collection_begin();
|
|
||||||
}
|
|
||||||
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
|
||||||
|
|
||||||
HandleMark hm; // Discard invalid handles created during verification
|
HandleMark hm; // Discard invalid handles created during verification
|
||||||
|
@ -2633,11 +2582,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
size_policy()->ms_collection_end(gch->gc_cause());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (VerifyAfterGC &&
|
if (VerifyAfterGC &&
|
||||||
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
|
||||||
Universe::verify();
|
Universe::verify();
|
||||||
|
@ -3687,9 +3631,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||||
|
|
||||||
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
|
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
|
||||||
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
|
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->checkpoint_roots_initial_begin();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset all the PLAB chunk arrays if necessary.
|
// Reset all the PLAB chunk arrays if necessary.
|
||||||
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
|
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
|
||||||
|
@ -3769,9 +3710,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
|
||||||
// Save the end of the used_region of the constituent generations
|
// Save the end of the used_region of the constituent generations
|
||||||
// to be used to limit the extent of sweep in each generation.
|
// to be used to limit the extent of sweep in each generation.
|
||||||
save_sweep_limits();
|
save_sweep_limits();
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
|
|
||||||
}
|
|
||||||
verify_overflow_empty();
|
verify_overflow_empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3788,15 +3726,6 @@ bool CMSCollector::markFromRoots(bool asynch) {
|
||||||
|
|
||||||
bool res;
|
bool res;
|
||||||
if (asynch) {
|
if (asynch) {
|
||||||
|
|
||||||
// Start the timers for adaptive size policy for the concurrent phases
|
|
||||||
// Do it here so that the foreground MS can use the concurrent
|
|
||||||
// timer since a foreground MS might has the sweep done concurrently
|
|
||||||
// or STW.
|
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_marking_begin();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Weak ref discovery note: We may be discovering weak
|
// Weak ref discovery note: We may be discovering weak
|
||||||
// refs in this generation concurrent (but interleaved) with
|
// refs in this generation concurrent (but interleaved) with
|
||||||
// weak ref discovery by a younger generation collector.
|
// weak ref discovery by a younger generation collector.
|
||||||
|
@ -3814,22 +3743,12 @@ bool CMSCollector::markFromRoots(bool asynch) {
|
||||||
gclog_or_tty->print_cr("bailing out to foreground collection");
|
gclog_or_tty->print_cr("bailing out to foreground collection");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->concurrent_marking_end();
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(),
|
assert(SafepointSynchronize::is_at_safepoint(),
|
||||||
"inconsistent with asynch == false");
|
"inconsistent with asynch == false");
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->ms_collection_marking_begin();
|
|
||||||
}
|
|
||||||
// already have locks
|
// already have locks
|
||||||
res = markFromRootsWork(asynch);
|
res = markFromRootsWork(asynch);
|
||||||
_collectorState = FinalMarking;
|
_collectorState = FinalMarking;
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
size_policy()->ms_collection_marking_end(gch->gc_cause());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
verify_overflow_empty();
|
verify_overflow_empty();
|
||||||
return res;
|
return res;
|
||||||
|
@ -4705,8 +4624,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
|
||||||
|
|
||||||
if (clean_survivor) { // preclean the active survivor space(s)
|
if (clean_survivor) { // preclean the active survivor space(s)
|
||||||
assert(_young_gen->kind() == Generation::DefNew ||
|
assert(_young_gen->kind() == Generation::DefNew ||
|
||||||
_young_gen->kind() == Generation::ParNew ||
|
_young_gen->kind() == Generation::ParNew,
|
||||||
_young_gen->kind() == Generation::ASParNew,
|
|
||||||
"incorrect type for cast");
|
"incorrect type for cast");
|
||||||
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
|
||||||
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
|
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
|
||||||
|
@ -5077,10 +4995,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||||
assert(haveFreelistLocks(), "must have free list locks");
|
assert(haveFreelistLocks(), "must have free list locks");
|
||||||
assert_lock_strong(bitMapLock());
|
assert_lock_strong(bitMapLock());
|
||||||
|
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->checkpoint_roots_final_begin();
|
|
||||||
}
|
|
||||||
|
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
HandleMark hm;
|
HandleMark hm;
|
||||||
|
|
||||||
|
@ -5214,9 +5128,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
|
||||||
"Should be clear by end of the final marking");
|
"Should be clear by end of the final marking");
|
||||||
assert(_ct->klass_rem_set()->mod_union_is_clear(),
|
assert(_ct->klass_rem_set()->mod_union_is_clear(),
|
||||||
"Should be clear by end of the final marking");
|
"Should be clear by end of the final marking");
|
||||||
if (UseAdaptiveSizePolicy) {
|
|
||||||
size_policy()->checkpoint_roots_final_end(gch->gc_cause());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CMSParInitialMarkTask::work(uint worker_id) {
|
void CMSParInitialMarkTask::work(uint worker_id) {
|
||||||
|
@ -6329,7 +6240,6 @@ void CMSCollector::sweep(bool asynch) {
|
||||||
|
|
||||||
_inter_sweep_timer.stop();
|
_inter_sweep_timer.stop();
|
||||||
_inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
|
_inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
|
||||||
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
|
|
||||||
|
|
||||||
assert(!_intra_sweep_timer.is_active(), "Should not be active");
|
assert(!_intra_sweep_timer.is_active(), "Should not be active");
|
||||||
_intra_sweep_timer.reset();
|
_intra_sweep_timer.reset();
|
||||||
|
@ -6454,17 +6364,6 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
|
||||||
"Wrong type of heap");
|
|
||||||
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
|
|
||||||
gch->gen_policy()->size_policy();
|
|
||||||
assert(sp->is_gc_cms_adaptive_size_policy(),
|
|
||||||
"Wrong type of size policy");
|
|
||||||
return sp;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
|
void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
|
||||||
if (PrintGCDetails && Verbose) {
|
if (PrintGCDetails && Verbose) {
|
||||||
gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
|
gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
|
||||||
|
@ -6540,9 +6439,6 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
|
||||||
// Reset CMS data structures (for now just the marking bit map)
|
// Reset CMS data structures (for now just the marking bit map)
|
||||||
// preparatory for the next cycle.
|
// preparatory for the next cycle.
|
||||||
void CMSCollector::reset(bool asynch) {
|
void CMSCollector::reset(bool asynch) {
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
CMSAdaptiveSizePolicy* sp = size_policy();
|
|
||||||
AdaptiveSizePolicyOutput(sp, gch->total_collections());
|
|
||||||
if (asynch) {
|
if (asynch) {
|
||||||
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
CMSTokenSyncWithLocks ts(true, bitMapLock());
|
||||||
|
|
||||||
|
@ -6597,7 +6493,7 @@ void CMSCollector::reset(bool asynch) {
|
||||||
// Because only the full (i.e., concurrent mode failure) collections
|
// Because only the full (i.e., concurrent mode failure) collections
|
||||||
// are being measured for gc overhead limits, clean the "near" flag
|
// are being measured for gc overhead limits, clean the "near" flag
|
||||||
// and count.
|
// and count.
|
||||||
sp->reset_gc_overhead_limit_count();
|
size_policy()->reset_gc_overhead_limit_count();
|
||||||
_collectorState = Idling;
|
_collectorState = Idling;
|
||||||
} else {
|
} else {
|
||||||
// already have the lock
|
// already have the lock
|
||||||
|
@ -7064,7 +6960,6 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
|
@ -7225,7 +7120,6 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
|
@ -7298,7 +7192,6 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
|
@ -7457,7 +7350,6 @@ void MarkFromRootsClosure::do_yield_work() {
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
|
@ -8099,7 +7991,6 @@ void CMSPrecleanRefsYieldClosure::do_yield_work() {
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||||
|
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
|
@ -8780,7 +8671,6 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
|
||||||
ConcurrentMarkSweepThread::desynchronize(true);
|
ConcurrentMarkSweepThread::desynchronize(true);
|
||||||
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
ConcurrentMarkSweepThread::acknowledge_yield_request();
|
||||||
_collector->stopTimer();
|
_collector->stopTimer();
|
||||||
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
|
|
||||||
if (PrintCMSStatistics != 0) {
|
if (PrintCMSStatistics != 0) {
|
||||||
_collector->incrementYields();
|
_collector->incrementYields();
|
||||||
}
|
}
|
||||||
|
@ -9327,172 +9217,6 @@ bool CMSCollector::no_preserved_marks() const {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
|
|
||||||
{
|
|
||||||
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
|
|
||||||
CMSAdaptiveSizePolicy* size_policy =
|
|
||||||
(CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
|
|
||||||
assert(size_policy->is_gc_cms_adaptive_size_policy(),
|
|
||||||
"Wrong type for size policy");
|
|
||||||
return size_policy;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
|
|
||||||
size_t desired_promo_size) {
|
|
||||||
if (cur_promo_size < desired_promo_size) {
|
|
||||||
size_t expand_bytes = desired_promo_size - cur_promo_size;
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
|
|
||||||
"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
|
|
||||||
expand_bytes);
|
|
||||||
}
|
|
||||||
expand(expand_bytes,
|
|
||||||
MinHeapDeltaBytes,
|
|
||||||
CMSExpansionCause::_adaptive_size_policy);
|
|
||||||
} else if (desired_promo_size < cur_promo_size) {
|
|
||||||
size_t shrink_bytes = cur_promo_size - desired_promo_size;
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
|
|
||||||
"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
|
|
||||||
shrink_bytes);
|
|
||||||
}
|
|
||||||
shrink(shrink_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
CMSGCAdaptivePolicyCounters* counters =
|
|
||||||
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
|
|
||||||
assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
|
||||||
"Wrong kind of counters");
|
|
||||||
return counters;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void ASConcurrentMarkSweepGeneration::update_counters() {
|
|
||||||
if (UsePerfData) {
|
|
||||||
_space_counters->update_all();
|
|
||||||
_gen_counters->update_all();
|
|
||||||
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
|
|
||||||
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
|
|
||||||
"Wrong gc statistics type");
|
|
||||||
counters->update_counters(gc_stats_l);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
|
|
||||||
if (UsePerfData) {
|
|
||||||
_space_counters->update_used(used);
|
|
||||||
_space_counters->update_capacity();
|
|
||||||
_gen_counters->update_all();
|
|
||||||
|
|
||||||
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
|
|
||||||
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
|
|
||||||
"Wrong gc statistics type");
|
|
||||||
counters->update_counters(gc_stats_l);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
|
|
||||||
assert_locked_or_safepoint(Heap_lock);
|
|
||||||
assert_lock_strong(freelistLock());
|
|
||||||
HeapWord* old_end = _cmsSpace->end();
|
|
||||||
HeapWord* unallocated_start = _cmsSpace->unallocated_block();
|
|
||||||
assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
|
|
||||||
FreeChunk* chunk_at_end = find_chunk_at_end();
|
|
||||||
if (chunk_at_end == NULL) {
|
|
||||||
// No room to shrink
|
|
||||||
if (PrintGCDetails && Verbose) {
|
|
||||||
gclog_or_tty->print_cr("No room to shrink: old_end "
|
|
||||||
PTR_FORMAT " unallocated_start " PTR_FORMAT
|
|
||||||
" chunk_at_end " PTR_FORMAT,
|
|
||||||
old_end, unallocated_start, chunk_at_end);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
|
|
||||||
// Find the chunk at the end of the space and determine
|
|
||||||
// how much it can be shrunk.
|
|
||||||
size_t shrinkable_size_in_bytes = chunk_at_end->size();
|
|
||||||
size_t aligned_shrinkable_size_in_bytes =
|
|
||||||
align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
|
|
||||||
assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
|
|
||||||
"Inconsistent chunk at end of space");
|
|
||||||
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
|
|
||||||
size_t word_size_before = heap_word_size(_virtual_space.committed_size());
|
|
||||||
|
|
||||||
// Shrink the underlying space
|
|
||||||
_virtual_space.shrink_by(bytes);
|
|
||||||
if (PrintGCDetails && Verbose) {
|
|
||||||
gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
|
|
||||||
" desired_bytes " SIZE_FORMAT
|
|
||||||
" shrinkable_size_in_bytes " SIZE_FORMAT
|
|
||||||
" aligned_shrinkable_size_in_bytes " SIZE_FORMAT
|
|
||||||
" bytes " SIZE_FORMAT,
|
|
||||||
desired_bytes, shrinkable_size_in_bytes,
|
|
||||||
aligned_shrinkable_size_in_bytes, bytes);
|
|
||||||
gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
|
|
||||||
" unallocated_start " SIZE_FORMAT,
|
|
||||||
old_end, unallocated_start);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the space did shrink (shrinking is not guaranteed),
|
|
||||||
// shrink the chunk at the end by the appropriate amount.
|
|
||||||
if (((HeapWord*)_virtual_space.high()) < old_end) {
|
|
||||||
size_t new_word_size =
|
|
||||||
heap_word_size(_virtual_space.committed_size());
|
|
||||||
|
|
||||||
// Have to remove the chunk from the dictionary because it is changing
|
|
||||||
// size and might be someplace elsewhere in the dictionary.
|
|
||||||
|
|
||||||
// Get the chunk at end, shrink it, and put it
|
|
||||||
// back.
|
|
||||||
_cmsSpace->removeChunkFromDictionary(chunk_at_end);
|
|
||||||
size_t word_size_change = word_size_before - new_word_size;
|
|
||||||
size_t chunk_at_end_old_size = chunk_at_end->size();
|
|
||||||
assert(chunk_at_end_old_size >= word_size_change,
|
|
||||||
"Shrink is too large");
|
|
||||||
chunk_at_end->set_size(chunk_at_end_old_size -
|
|
||||||
word_size_change);
|
|
||||||
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
|
|
||||||
word_size_change);
|
|
||||||
|
|
||||||
_cmsSpace->returnChunkToDictionary(chunk_at_end);
|
|
||||||
|
|
||||||
MemRegion mr(_cmsSpace->bottom(), new_word_size);
|
|
||||||
_bts->resize(new_word_size); // resize the block offset shared array
|
|
||||||
Universe::heap()->barrier_set()->resize_covered_region(mr);
|
|
||||||
_cmsSpace->assert_locked();
|
|
||||||
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
|
|
||||||
|
|
||||||
NOT_PRODUCT(_cmsSpace->dictionary()->verify());
|
|
||||||
|
|
||||||
// update the space and generation capacity counters
|
|
||||||
if (UsePerfData) {
|
|
||||||
_space_counters->update_capacity();
|
|
||||||
_gen_counters->update_all();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Verbose && PrintGCDetails) {
|
|
||||||
size_t new_mem_size = _virtual_space.committed_size();
|
|
||||||
size_t old_mem_size = new_mem_size + bytes;
|
|
||||||
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
|
|
||||||
name(), old_mem_size/K, bytes/K, new_mem_size/K);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
|
|
||||||
"Inconsistency at end of space");
|
|
||||||
assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
|
|
||||||
"Shrinking is inconsistent");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Transfer some number of overflown objects to usual marking
|
// Transfer some number of overflown objects to usual marking
|
||||||
// stack. Return true if some objects were transferred.
|
// stack. Return true if some objects were transferred.
|
||||||
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
|
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
|
||||||
|
|
|
@ -52,7 +52,7 @@
|
||||||
// Concurrent mode failures are currently handled by
|
// Concurrent mode failures are currently handled by
|
||||||
// means of a sliding mark-compact.
|
// means of a sliding mark-compact.
|
||||||
|
|
||||||
class CMSAdaptiveSizePolicy;
|
class AdaptiveSizePolicy;
|
||||||
class CMSConcMarkingTask;
|
class CMSConcMarkingTask;
|
||||||
class CMSGCAdaptivePolicyCounters;
|
class CMSGCAdaptivePolicyCounters;
|
||||||
class CMSTracer;
|
class CMSTracer;
|
||||||
|
@ -1009,8 +1009,7 @@ class CMSCollector: public CHeapObj<mtGC> {
|
||||||
void icms_wait(); // Called at yield points.
|
void icms_wait(); // Called at yield points.
|
||||||
|
|
||||||
// Adaptive size policy
|
// Adaptive size policy
|
||||||
CMSAdaptiveSizePolicy* size_policy();
|
AdaptiveSizePolicy* size_policy();
|
||||||
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
|
|
||||||
|
|
||||||
static void print_on_error(outputStream* st);
|
static void print_on_error(outputStream* st);
|
||||||
|
|
||||||
|
@ -1150,9 +1149,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
|
|
||||||
virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
|
virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
|
||||||
|
|
||||||
// Adaptive size policy
|
|
||||||
CMSAdaptiveSizePolicy* size_policy();
|
|
||||||
|
|
||||||
void set_did_compact(bool v) { _did_compact = v; }
|
void set_did_compact(bool v) { _did_compact = v; }
|
||||||
|
|
||||||
bool refs_discovery_is_atomic() const { return false; }
|
bool refs_discovery_is_atomic() const { return false; }
|
||||||
|
@ -1346,37 +1342,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
||||||
void rotate_debug_collection_type();
|
void rotate_debug_collection_type();
|
||||||
};
|
};
|
||||||
|
|
||||||
class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
|
|
||||||
|
|
||||||
// Return the size policy from the heap's collector
|
|
||||||
// policy casted to CMSAdaptiveSizePolicy*.
|
|
||||||
CMSAdaptiveSizePolicy* cms_size_policy() const;
|
|
||||||
|
|
||||||
// Resize the generation based on the adaptive size
|
|
||||||
// policy.
|
|
||||||
void resize(size_t cur_promo, size_t desired_promo);
|
|
||||||
|
|
||||||
// Return the GC counters from the collector policy
|
|
||||||
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
|
|
||||||
|
|
||||||
virtual void shrink_by(size_t bytes);
|
|
||||||
|
|
||||||
public:
|
|
||||||
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
|
|
||||||
int level, CardTableRS* ct,
|
|
||||||
bool use_adaptive_freelists,
|
|
||||||
FreeBlockDictionary<FreeChunk>::DictionaryChoice
|
|
||||||
dictionaryChoice) :
|
|
||||||
ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
|
|
||||||
use_adaptive_freelists, dictionaryChoice) {}
|
|
||||||
|
|
||||||
virtual const char* short_name() const { return "ASCMS"; }
|
|
||||||
virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
|
|
||||||
|
|
||||||
virtual void update_counters();
|
|
||||||
virtual void update_counters(size_t used);
|
|
||||||
};
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Closures of various sorts used by CMS to accomplish its work
|
// Closures of various sorts used by CMS to accomplish its work
|
||||||
//
|
//
|
||||||
|
|
|
@ -1,657 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
|
||||||
#include "gc_implementation/parNew/asParNewGeneration.hpp"
|
|
||||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
|
||||||
#include "gc_implementation/shared/markSweep.inline.hpp"
|
|
||||||
#include "gc_implementation/shared/spaceDecorator.hpp"
|
|
||||||
#include "memory/defNewGeneration.inline.hpp"
|
|
||||||
#include "memory/referencePolicy.hpp"
|
|
||||||
#include "oops/markOop.inline.hpp"
|
|
||||||
#include "oops/oop.pcgc.inline.hpp"
|
|
||||||
|
|
||||||
ASParNewGeneration::ASParNewGeneration(ReservedSpace rs,
|
|
||||||
size_t initial_byte_size,
|
|
||||||
size_t min_byte_size,
|
|
||||||
int level) :
|
|
||||||
ParNewGeneration(rs, initial_byte_size, level),
|
|
||||||
_min_gen_size(min_byte_size) {}
|
|
||||||
|
|
||||||
const char* ASParNewGeneration::name() const {
|
|
||||||
return "adaptive size par new generation";
|
|
||||||
}
|
|
||||||
|
|
||||||
void ASParNewGeneration::adjust_desired_tenuring_threshold() {
|
|
||||||
assert(UseAdaptiveSizePolicy,
|
|
||||||
"Should only be used with UseAdaptiveSizePolicy");
|
|
||||||
}
|
|
||||||
|
|
||||||
void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) {
|
|
||||||
// Resize the generation if needed. If the generation resize
|
|
||||||
// reports false, do not attempt to resize the spaces.
|
|
||||||
if (resize_generation(eden_size, survivor_size)) {
|
|
||||||
// Then we lay out the spaces inside the generation
|
|
||||||
resize_spaces(eden_size, survivor_size);
|
|
||||||
|
|
||||||
space_invariants();
|
|
||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr("Young generation size: "
|
|
||||||
"desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
|
|
||||||
" used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
|
|
||||||
" gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
|
|
||||||
eden_size, survivor_size, used(), capacity(),
|
|
||||||
max_gen_size(), min_gen_size());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ASParNewGeneration::available_to_min_gen() {
|
|
||||||
assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
|
|
||||||
return virtual_space()->committed_size() - min_gen_size();
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method assumes that from-space has live data and that
|
|
||||||
// any shrinkage of the young gen is limited by location of
|
|
||||||
// from-space.
|
|
||||||
size_t ASParNewGeneration::available_to_live() const {
|
|
||||||
#undef SHRINKS_AT_END_OF_EDEN
|
|
||||||
#ifdef SHRINKS_AT_END_OF_EDEN
|
|
||||||
size_t delta_in_survivor = 0;
|
|
||||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
|
||||||
const size_t space_alignment = heap->intra_heap_alignment();
|
|
||||||
const size_t gen_alignment = heap->object_heap_alignment();
|
|
||||||
|
|
||||||
MutableSpace* space_shrinking = NULL;
|
|
||||||
if (from_space()->end() > to_space()->end()) {
|
|
||||||
space_shrinking = from_space();
|
|
||||||
} else {
|
|
||||||
space_shrinking = to_space();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Include any space that is committed but not included in
|
|
||||||
// the survivor spaces.
|
|
||||||
assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
|
|
||||||
"Survivor space beyond high end");
|
|
||||||
size_t unused_committed = pointer_delta(virtual_space()->high(),
|
|
||||||
space_shrinking->end(), sizeof(char));
|
|
||||||
|
|
||||||
if (space_shrinking->is_empty()) {
|
|
||||||
// Don't let the space shrink to 0
|
|
||||||
assert(space_shrinking->capacity_in_bytes() >= space_alignment,
|
|
||||||
"Space is too small");
|
|
||||||
delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
|
|
||||||
} else {
|
|
||||||
delta_in_survivor = pointer_delta(space_shrinking->end(),
|
|
||||||
space_shrinking->top(),
|
|
||||||
sizeof(char));
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t delta_in_bytes = unused_committed + delta_in_survivor;
|
|
||||||
delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
|
|
||||||
return delta_in_bytes;
|
|
||||||
#else
|
|
||||||
// The only space available for shrinking is in to-space if it
|
|
||||||
// is above from-space.
|
|
||||||
if (to()->bottom() > from()->bottom()) {
|
|
||||||
const size_t alignment = os::vm_page_size();
|
|
||||||
if (to()->capacity() < alignment) {
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
return to()->capacity() - alignment;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the number of bytes available for resizing down the young
|
|
||||||
// generation. This is the minimum of
|
|
||||||
// input "bytes"
|
|
||||||
// bytes to the minimum young gen size
|
|
||||||
// bytes to the size currently being used + some small extra
|
|
||||||
size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
|
|
||||||
// Allow shrinkage into the current eden but keep eden large enough
|
|
||||||
// to maintain the minimum young gen size
|
|
||||||
bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
|
|
||||||
return align_size_down(bytes, os::vm_page_size());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note that the the alignment used is the OS page size as
|
|
||||||
// opposed to an alignment associated with the virtual space
|
|
||||||
// (as is done in the ASPSYoungGen/ASPSOldGen)
|
|
||||||
bool ASParNewGeneration::resize_generation(size_t eden_size,
|
|
||||||
size_t survivor_size) {
|
|
||||||
const size_t alignment = os::vm_page_size();
|
|
||||||
size_t orig_size = virtual_space()->committed_size();
|
|
||||||
bool size_changed = false;
|
|
||||||
|
|
||||||
// There used to be this guarantee there.
|
|
||||||
// guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments");
|
|
||||||
// Code below forces this requirement. In addition the desired eden
|
|
||||||
// size and desired survivor sizes are desired goals and may
|
|
||||||
// exceed the total generation size.
|
|
||||||
|
|
||||||
assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
|
|
||||||
"just checking");
|
|
||||||
|
|
||||||
// Adjust new generation size
|
|
||||||
const size_t eden_plus_survivors =
|
|
||||||
align_size_up(eden_size + 2 * survivor_size, alignment);
|
|
||||||
size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()),
|
|
||||||
min_gen_size());
|
|
||||||
assert(desired_size <= max_gen_size(), "just checking");
|
|
||||||
|
|
||||||
if (desired_size > orig_size) {
|
|
||||||
// Grow the generation
|
|
||||||
size_t change = desired_size - orig_size;
|
|
||||||
assert(change % alignment == 0, "just checking");
|
|
||||||
if (expand(change)) {
|
|
||||||
return false; // Error if we fail to resize!
|
|
||||||
}
|
|
||||||
size_changed = true;
|
|
||||||
} else if (desired_size < orig_size) {
|
|
||||||
size_t desired_change = orig_size - desired_size;
|
|
||||||
assert(desired_change % alignment == 0, "just checking");
|
|
||||||
|
|
||||||
desired_change = limit_gen_shrink(desired_change);
|
|
||||||
|
|
||||||
if (desired_change > 0) {
|
|
||||||
virtual_space()->shrink_by(desired_change);
|
|
||||||
reset_survivors_after_shrink();
|
|
||||||
|
|
||||||
size_changed = true;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (Verbose && PrintGC) {
|
|
||||||
if (orig_size == max_gen_size()) {
|
|
||||||
gclog_or_tty->print_cr("ASParNew generation size at maximum: "
|
|
||||||
SIZE_FORMAT "K", orig_size/K);
|
|
||||||
} else if (orig_size == min_gen_size()) {
|
|
||||||
gclog_or_tty->print_cr("ASParNew generation size at minium: "
|
|
||||||
SIZE_FORMAT "K", orig_size/K);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (size_changed) {
|
|
||||||
MemRegion cmr((HeapWord*)virtual_space()->low(),
|
|
||||||
(HeapWord*)virtual_space()->high());
|
|
||||||
GenCollectedHeap::heap()->barrier_set()->resize_covered_region(cmr);
|
|
||||||
|
|
||||||
if (Verbose && PrintGC) {
|
|
||||||
size_t current_size = virtual_space()->committed_size();
|
|
||||||
gclog_or_tty->print_cr("ASParNew generation size changed: "
|
|
||||||
SIZE_FORMAT "K->" SIZE_FORMAT "K",
|
|
||||||
orig_size/K, current_size/K);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
|
|
||||||
virtual_space()->committed_size() == max_gen_size(), "Sanity");
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ASParNewGeneration::reset_survivors_after_shrink() {
|
|
||||||
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
HeapWord* new_end = (HeapWord*)virtual_space()->high();
|
|
||||||
|
|
||||||
if (from()->end() > to()->end()) {
|
|
||||||
assert(new_end >= from()->end(), "Shrinking past from-space");
|
|
||||||
} else {
|
|
||||||
assert(new_end >= to()->bottom(), "Shrink was too large");
|
|
||||||
// Was there a shrink of the survivor space?
|
|
||||||
if (new_end < to()->end()) {
|
|
||||||
MemRegion mr(to()->bottom(), new_end);
|
|
||||||
to()->initialize(mr,
|
|
||||||
SpaceDecorator::DontClear,
|
|
||||||
SpaceDecorator::DontMangle);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
|
|
||||||
size_t requested_survivor_size) {
|
|
||||||
assert(UseAdaptiveSizePolicy, "sanity check");
|
|
||||||
assert(requested_eden_size > 0 && requested_survivor_size > 0,
|
|
||||||
"just checking");
|
|
||||||
CollectedHeap* heap = Universe::heap();
|
|
||||||
assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
|
|
||||||
|
|
||||||
|
|
||||||
// We require eden and to space to be empty
|
|
||||||
if ((!eden()->is_empty()) || (!to()->is_empty())) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t cur_eden_size = eden()->capacity();
|
|
||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
|
|
||||||
SIZE_FORMAT
|
|
||||||
", requested_survivor_size: " SIZE_FORMAT ")",
|
|
||||||
requested_eden_size, requested_survivor_size);
|
|
||||||
gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
|
||||||
SIZE_FORMAT,
|
|
||||||
p2i(eden()->bottom()),
|
|
||||||
p2i(eden()->end()),
|
|
||||||
pointer_delta(eden()->end(),
|
|
||||||
eden()->bottom(),
|
|
||||||
sizeof(char)));
|
|
||||||
gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
|
||||||
SIZE_FORMAT,
|
|
||||||
p2i(from()->bottom()),
|
|
||||||
p2i(from()->end()),
|
|
||||||
pointer_delta(from()->end(),
|
|
||||||
from()->bottom(),
|
|
||||||
sizeof(char)));
|
|
||||||
gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "
|
|
||||||
SIZE_FORMAT,
|
|
||||||
p2i(to()->bottom()),
|
|
||||||
p2i(to()->end()),
|
|
||||||
pointer_delta( to()->end(),
|
|
||||||
to()->bottom(),
|
|
||||||
sizeof(char)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's nothing to do if the new sizes are the same as the current
|
|
||||||
if (requested_survivor_size == to()->capacity() &&
|
|
||||||
requested_survivor_size == from()->capacity() &&
|
|
||||||
requested_eden_size == eden()->capacity()) {
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(" capacities are the right sizes, returning");
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
char* eden_start = (char*)eden()->bottom();
|
|
||||||
char* eden_end = (char*)eden()->end();
|
|
||||||
char* from_start = (char*)from()->bottom();
|
|
||||||
char* from_end = (char*)from()->end();
|
|
||||||
char* to_start = (char*)to()->bottom();
|
|
||||||
char* to_end = (char*)to()->end();
|
|
||||||
|
|
||||||
const size_t alignment = os::vm_page_size();
|
|
||||||
const bool maintain_minimum =
|
|
||||||
(requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
|
|
||||||
|
|
||||||
// Check whether from space is below to space
|
|
||||||
if (from_start < to_start) {
|
|
||||||
// Eden, from, to
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(" Eden, from, to:");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set eden
|
|
||||||
// "requested_eden_size" is a goal for the size of eden
|
|
||||||
// and may not be attainable. "eden_size" below is
|
|
||||||
// calculated based on the location of from-space and
|
|
||||||
// the goal for the size of eden. from-space is
|
|
||||||
// fixed in place because it contains live data.
|
|
||||||
// The calculation is done this way to avoid 32bit
|
|
||||||
// overflow (i.e., eden_start + requested_eden_size
|
|
||||||
// may too large for representation in 32bits).
|
|
||||||
size_t eden_size;
|
|
||||||
if (maintain_minimum) {
|
|
||||||
// Only make eden larger than the requested size if
|
|
||||||
// the minimum size of the generation has to be maintained.
|
|
||||||
// This could be done in general but policy at a higher
|
|
||||||
// level is determining a requested size for eden and that
|
|
||||||
// should be honored unless there is a fundamental reason.
|
|
||||||
eden_size = pointer_delta(from_start,
|
|
||||||
eden_start,
|
|
||||||
sizeof(char));
|
|
||||||
} else {
|
|
||||||
eden_size = MIN2(requested_eden_size,
|
|
||||||
pointer_delta(from_start, eden_start, sizeof(char)));
|
|
||||||
}
|
|
||||||
|
|
||||||
eden_size = align_size_down(eden_size, alignment);
|
|
||||||
eden_end = eden_start + eden_size;
|
|
||||||
assert(eden_end >= eden_start, "addition overflowed");
|
|
||||||
|
|
||||||
// To may resize into from space as long as it is clear of live data.
|
|
||||||
// From space must remain page aligned, though, so we need to do some
|
|
||||||
// extra calculations.
|
|
||||||
|
|
||||||
// First calculate an optimal to-space
|
|
||||||
to_end = (char*)virtual_space()->high();
|
|
||||||
to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
|
|
||||||
sizeof(char));
|
|
||||||
|
|
||||||
// Does the optimal to-space overlap from-space?
|
|
||||||
if (to_start < (char*)from()->end()) {
|
|
||||||
// Calculate the minimum offset possible for from_end
|
|
||||||
size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));
|
|
||||||
|
|
||||||
// Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
|
|
||||||
if (from_size == 0) {
|
|
||||||
from_size = alignment;
|
|
||||||
} else {
|
|
||||||
from_size = align_size_up(from_size, alignment);
|
|
||||||
}
|
|
||||||
|
|
||||||
from_end = from_start + from_size;
|
|
||||||
assert(from_end > from_start, "addition overflow or from_size problem");
|
|
||||||
|
|
||||||
guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");
|
|
||||||
|
|
||||||
// Now update to_start with the new from_end
|
|
||||||
to_start = MAX2(from_end, to_start);
|
|
||||||
} else {
|
|
||||||
// If shrinking, move to-space down to abut the end of from-space
|
|
||||||
// so that shrinking will move to-space down. If not shrinking
|
|
||||||
// to-space is moving up to allow for growth on the next expansion.
|
|
||||||
if (requested_eden_size <= cur_eden_size) {
|
|
||||||
to_start = from_end;
|
|
||||||
if (to_start + requested_survivor_size > to_start) {
|
|
||||||
to_end = to_start + requested_survivor_size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// else leave to_end pointing to the high end of the virtual space.
|
|
||||||
}
|
|
||||||
|
|
||||||
guarantee(to_start != to_end, "to space is zero sized");
|
|
||||||
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(" [eden_start .. eden_end): "
|
|
||||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
|
||||||
p2i(eden_start),
|
|
||||||
p2i(eden_end),
|
|
||||||
pointer_delta(eden_end, eden_start, sizeof(char)));
|
|
||||||
gclog_or_tty->print_cr(" [from_start .. from_end): "
|
|
||||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
|
||||||
p2i(from_start),
|
|
||||||
p2i(from_end),
|
|
||||||
pointer_delta(from_end, from_start, sizeof(char)));
|
|
||||||
gclog_or_tty->print_cr(" [ to_start .. to_end): "
|
|
||||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
|
||||||
p2i(to_start),
|
|
||||||
p2i(to_end),
|
|
||||||
pointer_delta( to_end, to_start, sizeof(char)));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Eden, to, from
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(" Eden, to, from:");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the to-space boundaries based on
|
|
||||||
// the start of from-space.
|
|
||||||
to_end = from_start;
|
|
||||||
to_start = (char*)pointer_delta(from_start,
|
|
||||||
(char*)requested_survivor_size,
|
|
||||||
sizeof(char));
|
|
||||||
// Calculate the ideal eden boundaries.
|
|
||||||
// eden_end is already at the bottom of the generation
|
|
||||||
assert(eden_start == virtual_space()->low(),
|
|
||||||
"Eden is not starting at the low end of the virtual space");
|
|
||||||
if (eden_start + requested_eden_size >= eden_start) {
|
|
||||||
eden_end = eden_start + requested_eden_size;
|
|
||||||
} else {
|
|
||||||
eden_end = to_start;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Does eden intrude into to-space? to-space
|
|
||||||
// gets priority but eden is not allowed to shrink
|
|
||||||
// to 0.
|
|
||||||
if (eden_end > to_start) {
|
|
||||||
eden_end = to_start;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't let eden shrink down to 0 or less.
|
|
||||||
eden_end = MAX2(eden_end, eden_start + alignment);
|
|
||||||
assert(eden_start + alignment >= eden_start, "Overflow");
|
|
||||||
|
|
||||||
size_t eden_size;
|
|
||||||
if (maintain_minimum) {
|
|
||||||
// Use all the space available.
|
|
||||||
eden_end = MAX2(eden_end, to_start);
|
|
||||||
eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
|
|
||||||
eden_size = MIN2(eden_size, cur_eden_size);
|
|
||||||
} else {
|
|
||||||
eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
|
|
||||||
}
|
|
||||||
eden_size = align_size_down(eden_size, alignment);
|
|
||||||
assert(maintain_minimum || eden_size <= requested_eden_size,
|
|
||||||
"Eden size is too large");
|
|
||||||
assert(eden_size >= alignment, "Eden size is too small");
|
|
||||||
eden_end = eden_start + eden_size;
|
|
||||||
|
|
||||||
// Move to-space down to eden.
|
|
||||||
if (requested_eden_size < cur_eden_size) {
|
|
||||||
to_start = eden_end;
|
|
||||||
if (to_start + requested_survivor_size > to_start) {
|
|
||||||
to_end = MIN2(from_start, to_start + requested_survivor_size);
|
|
||||||
} else {
|
|
||||||
to_end = from_start;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eden_end may have moved so again make sure
|
|
||||||
// the to-space and eden don't overlap.
|
|
||||||
to_start = MAX2(eden_end, to_start);
|
|
||||||
|
|
||||||
// from-space
|
|
||||||
size_t from_used = from()->used();
|
|
||||||
if (requested_survivor_size > from_used) {
|
|
||||||
if (from_start + requested_survivor_size >= from_start) {
|
|
||||||
from_end = from_start + requested_survivor_size;
|
|
||||||
}
|
|
||||||
if (from_end > virtual_space()->high()) {
|
|
||||||
from_end = virtual_space()->high();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(to_start >= eden_end, "to-space should be above eden");
|
|
||||||
if (PrintAdaptiveSizePolicy && Verbose) {
|
|
||||||
gclog_or_tty->print_cr(" [eden_start .. eden_end): "
|
|
||||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
|
||||||
p2i(eden_start),
|
|
||||||
p2i(eden_end),
|
|
||||||
pointer_delta(eden_end, eden_start, sizeof(char)));
|
|
||||||
gclog_or_tty->print_cr(" [ to_start .. to_end): "
|
|
||||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
|
||||||
p2i(to_start),
|
|
||||||
p2i(to_end),
|
|
||||||
pointer_delta( to_end, to_start, sizeof(char)));
|
|
||||||
gclog_or_tty->print_cr(" [from_start .. from_end): "
|
|
||||||
"[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
|
|
||||||
p2i(from_start),
|
|
||||||
p2i(from_end),
|
|
||||||
pointer_delta(from_end, from_start, sizeof(char)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
guarantee((HeapWord*)from_start <= from()->bottom(),
|
|
||||||
"from start moved to the right");
|
|
||||||
guarantee((HeapWord*)from_end >= from()->top(),
|
|
||||||
"from end moved into live data");
|
|
||||||
assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
|
|
||||||
assert(is_object_aligned((intptr_t)from_start), "checking alignment");
|
|
||||||
assert(is_object_aligned((intptr_t)to_start), "checking alignment");
|
|
||||||
|
|
||||||
MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
|
|
||||||
MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
|
|
||||||
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
|
|
||||||
|
|
||||||
// Let's make sure the call to initialize doesn't reset "top"!
|
|
||||||
HeapWord* old_from_top = from()->top();
|
|
||||||
|
|
||||||
// For PrintAdaptiveSizePolicy block below
|
|
||||||
size_t old_from = from()->capacity();
|
|
||||||
size_t old_to = to()->capacity();
|
|
||||||
|
|
||||||
// If not clearing the spaces, do some checking to verify that
|
|
||||||
// the spaces are already mangled.
|
|
||||||
|
|
||||||
// Must check mangling before the spaces are reshaped. Otherwise,
|
|
||||||
// the bottom or end of one space may have moved into another
|
|
||||||
// a failure of the check may not correctly indicate which space
|
|
||||||
// is not properly mangled.
|
|
||||||
if (ZapUnusedHeapArea) {
|
|
||||||
HeapWord* limit = (HeapWord*) virtual_space()->high();
|
|
||||||
eden()->check_mangled_unused_area(limit);
|
|
||||||
from()->check_mangled_unused_area(limit);
|
|
||||||
to()->check_mangled_unused_area(limit);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The call to initialize NULL's the next compaction space
|
|
||||||
eden()->initialize(edenMR,
|
|
||||||
SpaceDecorator::Clear,
|
|
||||||
SpaceDecorator::DontMangle);
|
|
||||||
eden()->set_next_compaction_space(from());
|
|
||||||
to()->initialize(toMR ,
|
|
||||||
SpaceDecorator::Clear,
|
|
||||||
SpaceDecorator::DontMangle);
|
|
||||||
from()->initialize(fromMR,
|
|
||||||
SpaceDecorator::DontClear,
|
|
||||||
SpaceDecorator::DontMangle);
|
|
||||||
|
|
||||||
assert(from()->top() == old_from_top, "from top changed!");
|
|
||||||
|
|
||||||
if (PrintAdaptiveSizePolicy) {
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
|
|
||||||
|
|
||||||
gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
|
|
||||||
"collection: %d "
|
|
||||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
|
|
||||||
"(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
|
|
||||||
gch->total_collections(),
|
|
||||||
old_from, old_to,
|
|
||||||
from()->capacity(),
|
|
||||||
to()->capacity());
|
|
||||||
gclog_or_tty->cr();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ASParNewGeneration::compute_new_size() {
|
|
||||||
GenCollectedHeap* gch = GenCollectedHeap::heap();
|
|
||||||
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
|
|
||||||
"not a CMS generational heap");
|
|
||||||
|
|
||||||
|
|
||||||
CMSAdaptiveSizePolicy* size_policy =
|
|
||||||
(CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
|
|
||||||
assert(size_policy->is_gc_cms_adaptive_size_policy(),
|
|
||||||
"Wrong type of size policy");
|
|
||||||
|
|
||||||
size_t survived = from()->used();
|
|
||||||
if (!survivor_overflow()) {
|
|
||||||
// Keep running averages on how much survived
|
|
||||||
size_policy->avg_survived()->sample(survived);
|
|
||||||
} else {
|
|
||||||
size_t promoted =
|
|
||||||
(size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
|
|
||||||
assert(promoted < gch->capacity(), "Conversion problem?");
|
|
||||||
size_t survived_guess = survived + promoted;
|
|
||||||
size_policy->avg_survived()->sample(survived_guess);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t survivor_limit = max_survivor_size();
|
|
||||||
_tenuring_threshold =
|
|
||||||
size_policy->compute_survivor_space_size_and_threshold(
|
|
||||||
_survivor_overflow,
|
|
||||||
_tenuring_threshold,
|
|
||||||
survivor_limit);
|
|
||||||
size_policy->avg_young_live()->sample(used());
|
|
||||||
size_policy->avg_eden_live()->sample(eden()->used());
|
|
||||||
|
|
||||||
size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size());
|
|
||||||
|
|
||||||
resize(size_policy->calculated_eden_size_in_bytes(),
|
|
||||||
size_policy->calculated_survivor_size_in_bytes());
|
|
||||||
|
|
||||||
if (UsePerfData) {
|
|
||||||
CMSGCAdaptivePolicyCounters* counters =
|
|
||||||
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
|
|
||||||
assert(counters->kind() ==
|
|
||||||
GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
|
|
||||||
"Wrong kind of counters");
|
|
||||||
counters->update_tenuring_threshold(_tenuring_threshold);
|
|
||||||
counters->update_survivor_overflowed(_survivor_overflow);
|
|
||||||
counters->update_young_capacity(capacity());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
// Changes from PSYoungGen version
|
|
||||||
// value of "alignment"
|
|
||||||
void ASParNewGeneration::space_invariants() {
|
|
||||||
const size_t alignment = os::vm_page_size();
|
|
||||||
|
|
||||||
// Currently, our eden size cannot shrink to zero
|
|
||||||
guarantee(eden()->capacity() >= alignment, "eden too small");
|
|
||||||
guarantee(from()->capacity() >= alignment, "from too small");
|
|
||||||
guarantee(to()->capacity() >= alignment, "to too small");
|
|
||||||
|
|
||||||
// Relationship of spaces to each other
|
|
||||||
char* eden_start = (char*)eden()->bottom();
|
|
||||||
char* eden_end = (char*)eden()->end();
|
|
||||||
char* from_start = (char*)from()->bottom();
|
|
||||||
char* from_end = (char*)from()->end();
|
|
||||||
char* to_start = (char*)to()->bottom();
|
|
||||||
char* to_end = (char*)to()->end();
|
|
||||||
|
|
||||||
guarantee(eden_start >= virtual_space()->low(), "eden bottom");
|
|
||||||
guarantee(eden_start < eden_end, "eden space consistency");
|
|
||||||
guarantee(from_start < from_end, "from space consistency");
|
|
||||||
guarantee(to_start < to_end, "to space consistency");
|
|
||||||
|
|
||||||
// Check whether from space is below to space
|
|
||||||
if (from_start < to_start) {
|
|
||||||
// Eden, from, to
|
|
||||||
guarantee(eden_end <= from_start, "eden/from boundary");
|
|
||||||
guarantee(from_end <= to_start, "from/to boundary");
|
|
||||||
guarantee(to_end <= virtual_space()->high(), "to end");
|
|
||||||
} else {
|
|
||||||
// Eden, to, from
|
|
||||||
guarantee(eden_end <= to_start, "eden/to boundary");
|
|
||||||
guarantee(to_end <= from_start, "to/from boundary");
|
|
||||||
guarantee(from_end <= virtual_space()->high(), "from end");
|
|
||||||
}
|
|
||||||
|
|
||||||
// More checks that the virtual space is consistent with the spaces
|
|
||||||
assert(virtual_space()->committed_size() >=
|
|
||||||
(eden()->capacity() +
|
|
||||||
to()->capacity() +
|
|
||||||
from()->capacity()), "Committed size is inconsistent");
|
|
||||||
assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
|
|
||||||
"Space invariant");
|
|
||||||
char* eden_top = (char*)eden()->top();
|
|
||||||
char* from_top = (char*)from()->top();
|
|
||||||
char* to_top = (char*)to()->top();
|
|
||||||
assert(eden_top <= virtual_space()->high(), "eden top");
|
|
||||||
assert(from_top <= virtual_space()->high(), "from top");
|
|
||||||
assert(to_top <= virtual_space()->high(), "to top");
|
|
||||||
}
|
|
||||||
#endif
|
|
|
@ -1,98 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
*
|
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License version 2 only, as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*
|
|
||||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
* accompanied this code).
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License version
|
|
||||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*
|
|
||||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
* or visit www.oracle.com if you need additional information or have any
|
|
||||||
* questions.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
|
|
||||||
#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
|
|
||||||
|
|
||||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
|
||||||
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
|
||||||
|
|
||||||
// A Generation that does parallel young-gen collection extended
|
|
||||||
// for adaptive size policy.
|
|
||||||
|
|
||||||
// Division of generation into spaces
|
|
||||||
// done by DefNewGeneration::compute_space_boundaries()
|
|
||||||
// +---------------+
|
|
||||||
// | uncommitted |
|
|
||||||
// |---------------|
|
|
||||||
// | ss0 |
|
|
||||||
// |---------------|
|
|
||||||
// | ss1 |
|
|
||||||
// |---------------|
|
|
||||||
// | |
|
|
||||||
// | eden |
|
|
||||||
// | |
|
|
||||||
// +---------------+ <-- low end of VirtualSpace
|
|
||||||
//
|
|
||||||
class ASParNewGeneration: public ParNewGeneration {
|
|
||||||
|
|
||||||
size_t _min_gen_size;
|
|
||||||
|
|
||||||
// Resize the generation based on the desired sizes of
|
|
||||||
// the constituent spaces.
|
|
||||||
bool resize_generation(size_t eden_size, size_t survivor_size);
|
|
||||||
// Resize the spaces based on their desired sizes but
|
|
||||||
// respecting the maximum size of the generation.
|
|
||||||
void resize_spaces(size_t eden_size, size_t survivor_size);
|
|
||||||
// Return the byte size remaining to the minimum generation size.
|
|
||||||
size_t available_to_min_gen();
|
|
||||||
// Return the byte size remaining to the live data in the generation.
|
|
||||||
size_t available_to_live() const;
|
|
||||||
// Return the byte size that the generation is allowed to shrink.
|
|
||||||
size_t limit_gen_shrink(size_t bytes);
|
|
||||||
// Reset the size of the spaces after a shrink of the generation.
|
|
||||||
void reset_survivors_after_shrink();
|
|
||||||
|
|
||||||
// Accessor
|
|
||||||
VirtualSpace* virtual_space() { return &_virtual_space; }
|
|
||||||
|
|
||||||
virtual void adjust_desired_tenuring_threshold();
|
|
||||||
|
|
||||||
public:
|
|
||||||
|
|
||||||
ASParNewGeneration(ReservedSpace rs,
|
|
||||||
size_t initial_byte_size,
|
|
||||||
size_t min_byte_size,
|
|
||||||
int level);
|
|
||||||
|
|
||||||
virtual const char* short_name() const { return "ASParNew"; }
|
|
||||||
virtual const char* name() const;
|
|
||||||
virtual Generation::Name kind() { return ASParNew; }
|
|
||||||
|
|
||||||
// Change the sizes of eden and the survivor spaces in
|
|
||||||
// the generation. The parameters are desired sizes
|
|
||||||
// and are not guaranteed to be met. For example, if
|
|
||||||
// the total is larger than the generation.
|
|
||||||
void resize(size_t eden_size, size_t survivor_size);
|
|
||||||
|
|
||||||
virtual void compute_new_size();
|
|
||||||
|
|
||||||
size_t max_gen_size() { return _reserved.byte_size(); }
|
|
||||||
size_t min_gen_size() const { return _min_gen_size; }
|
|
||||||
|
|
||||||
// Space boundary invariant checker
|
|
||||||
void space_invariants() PRODUCT_RETURN;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
|
|
|
@ -507,7 +507,7 @@ class AdaptiveSizePolicyOutput : StackObj {
|
||||||
// always fail (never do the print based on the interval test).
|
// always fail (never do the print based on the interval test).
|
||||||
return PrintGCDetails &&
|
return PrintGCDetails &&
|
||||||
UseAdaptiveSizePolicy &&
|
UseAdaptiveSizePolicy &&
|
||||||
(UseParallelGC || UseConcMarkSweepGC) &&
|
UseParallelGC &&
|
||||||
(AdaptiveSizePolicyOutputInterval > 0) &&
|
(AdaptiveSizePolicyOutputInterval > 0) &&
|
||||||
((count == 0) ||
|
((count == 0) ||
|
||||||
((count % AdaptiveSizePolicyOutputInterval) == 0));
|
((count % AdaptiveSizePolicyOutputInterval) == 0));
|
||||||
|
|
|
@ -216,16 +216,4 @@ class LinearLeastSquareFit : public CHeapObj<mtGC> {
|
||||||
bool increment_will_decrease();
|
bool increment_will_decrease();
|
||||||
};
|
};
|
||||||
|
|
||||||
class GCPauseTimer : StackObj {
|
|
||||||
elapsedTimer* _timer;
|
|
||||||
public:
|
|
||||||
GCPauseTimer(elapsedTimer* timer) {
|
|
||||||
_timer = timer;
|
|
||||||
_timer->stop();
|
|
||||||
}
|
|
||||||
~GCPauseTimer() {
|
|
||||||
_timer->start();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCUTIL_HPP
|
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCUTIL_HPP
|
||||||
|
|
|
@ -40,10 +40,6 @@
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
|
||||||
|
|
||||||
// CollectorPolicy methods
|
// CollectorPolicy methods
|
||||||
|
|
||||||
|
|
|
@ -115,7 +115,6 @@ class CollectorPolicy : public CHeapObj<mtGC> {
|
||||||
CollectorPolicyKind,
|
CollectorPolicyKind,
|
||||||
GenCollectorPolicyKind,
|
GenCollectorPolicyKind,
|
||||||
ConcurrentMarkSweepPolicyKind,
|
ConcurrentMarkSweepPolicyKind,
|
||||||
ASConcurrentMarkSweepPolicyKind,
|
|
||||||
G1CollectorPolicyKind
|
G1CollectorPolicyKind
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -202,13 +202,11 @@ void GenCollectedHeap::post_initialize() {
|
||||||
guarantee(policy->is_generation_policy(), "Illegal policy type");
|
guarantee(policy->is_generation_policy(), "Illegal policy type");
|
||||||
DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
|
DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
|
||||||
assert(def_new_gen->kind() == Generation::DefNew ||
|
assert(def_new_gen->kind() == Generation::DefNew ||
|
||||||
def_new_gen->kind() == Generation::ParNew ||
|
def_new_gen->kind() == Generation::ParNew,
|
||||||
def_new_gen->kind() == Generation::ASParNew,
|
|
||||||
"Wrong generation kind");
|
"Wrong generation kind");
|
||||||
|
|
||||||
Generation* old_gen = get_gen(1);
|
Generation* old_gen = get_gen(1);
|
||||||
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
|
assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
|
||||||
old_gen->kind() == Generation::ASConcurrentMarkSweep ||
|
|
||||||
old_gen->kind() == Generation::MarkSweepCompact,
|
old_gen->kind() == Generation::MarkSweepCompact,
|
||||||
"Wrong generation kind");
|
"Wrong generation kind");
|
||||||
|
|
||||||
|
@ -573,9 +571,6 @@ void GenCollectedHeap::do_collection(bool full,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AdaptiveSizePolicy* sp = gen_policy()->size_policy();
|
|
||||||
AdaptiveSizePolicyOutput(sp, total_collections());
|
|
||||||
|
|
||||||
print_heap_after_gc();
|
print_heap_after_gc();
|
||||||
|
|
||||||
#ifdef TRACESPINNING
|
#ifdef TRACESPINNING
|
||||||
|
@ -724,8 +719,7 @@ void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
bool GenCollectedHeap::create_cms_collector() {
|
bool GenCollectedHeap::create_cms_collector() {
|
||||||
|
|
||||||
assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
|
assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep,
|
||||||
(_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
|
|
||||||
"Unexpected generation kinds");
|
"Unexpected generation kinds");
|
||||||
// Skip two header words in the block content verification
|
// Skip two header words in the block content verification
|
||||||
NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
|
NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
|
||||||
|
|
|
@ -154,8 +154,7 @@ bool Generation::is_in(const void* p) const {
|
||||||
|
|
||||||
DefNewGeneration* Generation::as_DefNewGeneration() {
|
DefNewGeneration* Generation::as_DefNewGeneration() {
|
||||||
assert((kind() == Generation::DefNew) ||
|
assert((kind() == Generation::DefNew) ||
|
||||||
(kind() == Generation::ParNew) ||
|
(kind() == Generation::ParNew),
|
||||||
(kind() == Generation::ASParNew),
|
|
||||||
"Wrong youngest generation type");
|
"Wrong youngest generation type");
|
||||||
return (DefNewGeneration*) this;
|
return (DefNewGeneration*) this;
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,8 +131,6 @@ class Generation: public CHeapObj<mtGC> {
|
||||||
public:
|
public:
|
||||||
// The set of possible generation kinds.
|
// The set of possible generation kinds.
|
||||||
enum Name {
|
enum Name {
|
||||||
ASParNew,
|
|
||||||
ASConcurrentMarkSweep,
|
|
||||||
DefNew,
|
DefNew,
|
||||||
ParNew,
|
ParNew,
|
||||||
MarkSweepCompact,
|
MarkSweepCompact,
|
||||||
|
|
|
@ -32,7 +32,6 @@
|
||||||
#include "runtime/java.hpp"
|
#include "runtime/java.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
#include "gc_implementation/parNew/asParNewGeneration.hpp"
|
|
||||||
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
|
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
|
||||||
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
#include "gc_implementation/parNew/parNewGeneration.hpp"
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
@ -50,12 +49,6 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
|
||||||
case Generation::ParNew:
|
case Generation::ParNew:
|
||||||
return new ParNewGeneration(rs, init_size(), level);
|
return new ParNewGeneration(rs, init_size(), level);
|
||||||
|
|
||||||
case Generation::ASParNew:
|
|
||||||
return new ASParNewGeneration(rs,
|
|
||||||
init_size(),
|
|
||||||
init_size() /* min size */,
|
|
||||||
level);
|
|
||||||
|
|
||||||
case Generation::ConcurrentMarkSweep: {
|
case Generation::ConcurrentMarkSweep: {
|
||||||
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
|
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
|
||||||
CardTableRS* ctrs = remset->as_CardTableRS();
|
CardTableRS* ctrs = remset->as_CardTableRS();
|
||||||
|
@ -75,26 +68,6 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
|
||||||
|
|
||||||
return g;
|
return g;
|
||||||
}
|
}
|
||||||
|
|
||||||
case Generation::ASConcurrentMarkSweep: {
|
|
||||||
assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
|
|
||||||
CardTableRS* ctrs = remset->as_CardTableRS();
|
|
||||||
if (ctrs == NULL) {
|
|
||||||
vm_exit_during_initialization("Rem set incompatibility.");
|
|
||||||
}
|
|
||||||
// Otherwise
|
|
||||||
// The constructor creates the CMSCollector if needed,
|
|
||||||
// else registers with an existing CMSCollector
|
|
||||||
|
|
||||||
ASConcurrentMarkSweepGeneration* g = NULL;
|
|
||||||
g = new ASConcurrentMarkSweepGeneration(rs,
|
|
||||||
init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
|
|
||||||
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
|
|
||||||
|
|
||||||
g->initialize_performance_counters();
|
|
||||||
|
|
||||||
return g;
|
|
||||||
}
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -72,7 +72,7 @@
|
||||||
#include "utilities/preserveException.hpp"
|
#include "utilities/preserveException.hpp"
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
|
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
|
||||||
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
|
||||||
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
|
||||||
|
@ -802,13 +802,9 @@ jint Universe::initialize_heap() {
|
||||||
gc_policy = new MarkSweepPolicy();
|
gc_policy = new MarkSweepPolicy();
|
||||||
} else if (UseConcMarkSweepGC) {
|
} else if (UseConcMarkSweepGC) {
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
if (UseAdaptiveSizePolicy) {
|
gc_policy = new ConcurrentMarkSweepPolicy();
|
||||||
gc_policy = new ASConcurrentMarkSweepPolicy();
|
|
||||||
} else {
|
|
||||||
gc_policy = new ConcurrentMarkSweepPolicy();
|
|
||||||
}
|
|
||||||
#else // INCLUDE_ALL_GCS
|
#else // INCLUDE_ALL_GCS
|
||||||
fatal("UseConcMarkSweepGC not supported in this VM.");
|
fatal("UseConcMarkSweepGC not supported in this VM.");
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
} else { // default old generation
|
} else { // default old generation
|
||||||
gc_policy = new MarkSweepPolicy();
|
gc_policy = new MarkSweepPolicy();
|
||||||
|
|
|
@ -136,7 +136,6 @@ void MemoryService::add_gen_collected_heap_info(GenCollectedHeap* heap) {
|
||||||
break;
|
break;
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
case Generation::ParNew:
|
case Generation::ParNew:
|
||||||
case Generation::ASParNew:
|
|
||||||
_minor_gc_manager = MemoryManager::get_parnew_memory_manager();
|
_minor_gc_manager = MemoryManager::get_parnew_memory_manager();
|
||||||
break;
|
break;
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
|
@ -268,7 +267,6 @@ void MemoryService::add_generation_memory_pool(Generation* gen,
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
case Generation::ParNew:
|
case Generation::ParNew:
|
||||||
case Generation::ASParNew:
|
|
||||||
{
|
{
|
||||||
assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
|
assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
|
||||||
// Add a memory pool for each space and young gen doesn't
|
// Add a memory pool for each space and young gen doesn't
|
||||||
|
@ -300,7 +298,6 @@ void MemoryService::add_generation_memory_pool(Generation* gen,
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
case Generation::ConcurrentMarkSweep:
|
case Generation::ConcurrentMarkSweep:
|
||||||
case Generation::ASConcurrentMarkSweep:
|
|
||||||
{
|
{
|
||||||
assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
|
assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
|
||||||
ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
|
ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
|
||||||
|
@ -548,23 +545,20 @@ Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) {
|
||||||
// GC manager type depends on the type of Generation. Depending on the space
|
// GC manager type depends on the type of Generation. Depending on the space
|
||||||
// availablity and vm options the gc uses major gc manager or minor gc
|
// availablity and vm options the gc uses major gc manager or minor gc
|
||||||
// manager or both. The type of gc manager depends on the generation kind.
|
// manager or both. The type of gc manager depends on the generation kind.
|
||||||
// For DefNew, ParNew and ASParNew generation doing scavenge gc uses minor
|
// For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so
|
||||||
// gc manager (so _fullGC is set to false ) and for other generation kinds
|
// _fullGC is set to false ) and for other generation kinds doing
|
||||||
// doing mark-sweep-compact uses major gc manager (so _fullGC is set
|
// mark-sweep-compact uses major gc manager (so _fullGC is set to true).
|
||||||
// to true).
|
|
||||||
TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
|
TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
|
||||||
switch (kind) {
|
switch (kind) {
|
||||||
case Generation::DefNew:
|
case Generation::DefNew:
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
case Generation::ParNew:
|
case Generation::ParNew:
|
||||||
case Generation::ASParNew:
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
_fullGC=false;
|
_fullGC=false;
|
||||||
break;
|
break;
|
||||||
case Generation::MarkSweepCompact:
|
case Generation::MarkSweepCompact:
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
case Generation::ConcurrentMarkSweep:
|
case Generation::ConcurrentMarkSweep:
|
||||||
case Generation::ASConcurrentMarkSweep:
|
|
||||||
#endif // INCLUDE_ALL_GCS
|
#endif // INCLUDE_ALL_GCS
|
||||||
_fullGC=true;
|
_fullGC=true;
|
||||||
break;
|
break;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue