8034246: remove CMS and ParNew adaptive size policy code

Reviewed-by: tschatzl, jwilhelm, mgerdin
This commit is contained in:
John Coomes 2014-06-26 13:30:43 -07:00
parent aec070cb69
commit 900ca33ab0
21 changed files with 29 additions and 3651 deletions

View file

@ -1,477 +0,0 @@
/*
* Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "runtime/timer.hpp"
// This class keeps statistical information and computes the
// size of the heap for the concurrent mark sweep collector.
//
// Cost for garbage collector include cost for
// minor collection
// concurrent collection
// stop-the-world component
// concurrent component
// major compacting collection
// uses decaying cost
// Forward decls
class elapsedTimer;
class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
friend class CMSGCAdaptivePolicyCounters;
friend class CMSCollector;
private:
// Total number of processors available
int _processor_count;
// Number of processors used by the concurrent phases of GC
// This number is assumed to be the same for all concurrent
// phases.
int _concurrent_processor_count;
// Time that the mutators run exclusive of a particular
// phase. For example, the time the mutators run excluding
// the time during which the cms collector runs concurrently
// with the mutators.
// Between end of most recent cms reset and start of initial mark
// This may be redundant
double _latest_cms_reset_end_to_initial_mark_start_secs;
// Between end of the most recent initial mark and start of remark
double _latest_cms_initial_mark_end_to_remark_start_secs;
// Between end of most recent collection and start of
// a concurrent collection
double _latest_cms_collection_end_to_collection_start_secs;
// Times of the concurrent phases of the most recent
// concurrent collection
double _latest_cms_concurrent_marking_time_secs;
double _latest_cms_concurrent_precleaning_time_secs;
double _latest_cms_concurrent_sweeping_time_secs;
// Between end of most recent STW MSC and start of next STW MSC
double _latest_cms_msc_end_to_msc_start_time_secs;
// Between end of most recent MS and start of next MS
// This does not include any time spent during a concurrent
// collection.
double _latest_cms_ms_end_to_ms_start;
// Between start and end of the initial mark of the most recent
// concurrent collection.
double _latest_cms_initial_mark_start_to_end_time_secs;
// Between start and end of the remark phase of the most recent
// concurrent collection
double _latest_cms_remark_start_to_end_time_secs;
// Between start and end of the most recent MS STW marking phase
double _latest_cms_ms_marking_start_to_end_time_secs;
// Pause time timers
static elapsedTimer _STW_timer;
// Concurrent collection timer. Used for total of all concurrent phases
// during 1 collection cycle.
static elapsedTimer _concurrent_timer;
// When the size of the generation is changed, the size
// of the change will rounded up or down (depending on the
// type of change) by this value.
size_t _generation_alignment;
// If this variable is true, the size of the young generation
// may be changed in order to reduce the pause(s) of the
// collection of the tenured generation in order to meet the
// pause time goal. It is common to change the size of the
// tenured generation in order to meet the pause time goal
// for the tenured generation. With the CMS collector for
// the tenured generation, the size of the young generation
// can have an significant affect on the pause times for collecting the
// tenured generation.
// This is a duplicate of a variable in PSAdaptiveSizePolicy. It
// is duplicated because it is not clear that it is general enough
// to go into AdaptiveSizePolicy.
int _change_young_gen_for_maj_pauses;
// Variable that is set to true after a collection.
bool _first_after_collection;
// Fraction of collections that are of each type
double concurrent_fraction() const;
double STW_msc_fraction() const;
double STW_ms_fraction() const;
// This call cannot be put into the epilogue as long as some
// of the counters can be set during concurrent phases.
virtual void clear_generation_free_space_flags();
void set_first_after_collection() { _first_after_collection = true; }
protected:
// Average of the sum of the concurrent times for
// one collection in seconds.
AdaptiveWeightedAverage* _avg_concurrent_time;
// Average time between concurrent collections in seconds.
AdaptiveWeightedAverage* _avg_concurrent_interval;
// Average cost of the concurrent part of a collection
// in seconds.
AdaptiveWeightedAverage* _avg_concurrent_gc_cost;
// Average of the initial pause of a concurrent collection in seconds.
AdaptivePaddedAverage* _avg_initial_pause;
// Average of the remark pause of a concurrent collection in seconds.
AdaptivePaddedAverage* _avg_remark_pause;
// Average of the stop-the-world (STW) (initial mark + remark)
// times in seconds for concurrent collections.
AdaptiveWeightedAverage* _avg_cms_STW_time;
// Average of the STW collection cost for concurrent collections.
AdaptiveWeightedAverage* _avg_cms_STW_gc_cost;
// Average of the bytes free at the start of the sweep.
AdaptiveWeightedAverage* _avg_cms_free_at_sweep;
// Average of the bytes free at the end of the collection.
AdaptiveWeightedAverage* _avg_cms_free;
// Average of the bytes promoted between cms collections.
AdaptiveWeightedAverage* _avg_cms_promo;
// stop-the-world (STW) mark-sweep-compact
// Average of the pause time in seconds for STW mark-sweep-compact
// collections.
AdaptiveWeightedAverage* _avg_msc_pause;
// Average of the interval in seconds between STW mark-sweep-compact
// collections.
AdaptiveWeightedAverage* _avg_msc_interval;
// Average of the collection costs for STW mark-sweep-compact
// collections.
AdaptiveWeightedAverage* _avg_msc_gc_cost;
// Averages for mark-sweep collections.
// The collection may have started as a background collection
// that completes in a stop-the-world (STW) collection.
// Average of the pause time in seconds for mark-sweep
// collections.
AdaptiveWeightedAverage* _avg_ms_pause;
// Average of the interval in seconds between mark-sweep
// collections.
AdaptiveWeightedAverage* _avg_ms_interval;
// Average of the collection costs for mark-sweep
// collections.
AdaptiveWeightedAverage* _avg_ms_gc_cost;
// These variables contain a linear fit of
// a generation size as the independent variable
// and a pause time as the dependent variable.
// For example _remark_pause_old_estimator
// is a fit of the old generation size as the
// independent variable and the remark pause
// as the dependent variable.
// remark pause time vs. cms gen size
LinearLeastSquareFit* _remark_pause_old_estimator;
// initial pause time vs. cms gen size
LinearLeastSquareFit* _initial_pause_old_estimator;
// remark pause time vs. young gen size
LinearLeastSquareFit* _remark_pause_young_estimator;
// initial pause time vs. young gen size
LinearLeastSquareFit* _initial_pause_young_estimator;
// Accessors
int processor_count() const { return _processor_count; }
int concurrent_processor_count() const { return _concurrent_processor_count; }
AdaptiveWeightedAverage* avg_concurrent_time() const {
return _avg_concurrent_time;
}
AdaptiveWeightedAverage* avg_concurrent_interval() const {
return _avg_concurrent_interval;
}
AdaptiveWeightedAverage* avg_concurrent_gc_cost() const {
return _avg_concurrent_gc_cost;
}
AdaptiveWeightedAverage* avg_cms_STW_time() const {
return _avg_cms_STW_time;
}
AdaptiveWeightedAverage* avg_cms_STW_gc_cost() const {
return _avg_cms_STW_gc_cost;
}
AdaptivePaddedAverage* avg_initial_pause() const {
return _avg_initial_pause;
}
AdaptivePaddedAverage* avg_remark_pause() const {
return _avg_remark_pause;
}
AdaptiveWeightedAverage* avg_cms_free() const {
return _avg_cms_free;
}
AdaptiveWeightedAverage* avg_cms_free_at_sweep() const {
return _avg_cms_free_at_sweep;
}
AdaptiveWeightedAverage* avg_msc_pause() const {
return _avg_msc_pause;
}
AdaptiveWeightedAverage* avg_msc_interval() const {
return _avg_msc_interval;
}
AdaptiveWeightedAverage* avg_msc_gc_cost() const {
return _avg_msc_gc_cost;
}
AdaptiveWeightedAverage* avg_ms_pause() const {
return _avg_ms_pause;
}
AdaptiveWeightedAverage* avg_ms_interval() const {
return _avg_ms_interval;
}
AdaptiveWeightedAverage* avg_ms_gc_cost() const {
return _avg_ms_gc_cost;
}
LinearLeastSquareFit* remark_pause_old_estimator() {
return _remark_pause_old_estimator;
}
LinearLeastSquareFit* initial_pause_old_estimator() {
return _initial_pause_old_estimator;
}
LinearLeastSquareFit* remark_pause_young_estimator() {
return _remark_pause_young_estimator;
}
LinearLeastSquareFit* initial_pause_young_estimator() {
return _initial_pause_young_estimator;
}
// These *slope() methods return the slope
// m for the linear fit of an independent
// variable vs. a dependent variable. For
// example
// remark_pause = m * old_generation_size + c
// These may be used to determine if an
// adjustment should be made to achieve a goal.
// For example, if remark_pause_old_slope() is
// positive, a reduction of the old generation
// size has on average resulted in the reduction
// of the remark pause.
float remark_pause_old_slope() {
return _remark_pause_old_estimator->slope();
}
float initial_pause_old_slope() {
return _initial_pause_old_estimator->slope();
}
float remark_pause_young_slope() {
return _remark_pause_young_estimator->slope();
}
float initial_pause_young_slope() {
return _initial_pause_young_estimator->slope();
}
// Update estimators
void update_minor_pause_old_estimator(double minor_pause_in_ms);
// Fraction of processors used by the concurrent phases.
double concurrent_processor_fraction();
// Returns the total times for the concurrent part of the
// latest collection in seconds.
double concurrent_collection_time();
// Return the total times for the concurrent part of the
// latest collection in seconds where the times of the various
// concurrent phases are scaled by the processor fraction used
// during the phase.
double scaled_concurrent_collection_time();
// Dimensionless concurrent GC cost for all the concurrent phases.
double concurrent_collection_cost(double interval_in_seconds);
// Dimensionless GC cost
double collection_cost(double pause_in_seconds, double interval_in_seconds);
virtual GCPolicyKind kind() const { return _gc_cms_adaptive_size_policy; }
virtual double time_since_major_gc() const;
// This returns the maximum average for the concurrent, ms, and
// msc collections. This is meant to be used for the calculation
// of the decayed major gc cost and is not in general the
// average of all the different types of major collections.
virtual double major_gc_interval_average_for_decay() const;
public:
CMSAdaptiveSizePolicy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size,
double max_gc_minor_pause_sec,
double max_gc_pause_sec,
uint gc_cost_ratio);
// The timers for the stop-the-world phases measure a total
// stop-the-world time. The timer is started and stopped
// for each phase but is only reset after the final checkpoint.
void checkpoint_roots_initial_begin();
void checkpoint_roots_initial_end(GCCause::Cause gc_cause);
void checkpoint_roots_final_begin();
void checkpoint_roots_final_end(GCCause::Cause gc_cause);
// Methods for gathering information about the
// concurrent marking phase of the collection.
// Records the mutator times and
// resets the concurrent timer.
void concurrent_marking_begin();
// Resets concurrent phase timer in the begin methods and
// saves the time for a phase in the end methods.
void concurrent_marking_end();
void concurrent_sweeping_begin();
void concurrent_sweeping_end();
// Similar to the above (e.g., concurrent_marking_end()) and
// is used for both the precleaning an abortable precleaning
// phases.
void concurrent_precleaning_begin();
void concurrent_precleaning_end();
// Stops the concurrent phases time. Gathers
// information and resets the timer.
void concurrent_phases_end(GCCause::Cause gc_cause,
size_t cur_eden,
size_t cur_promo);
// Methods for gather information about STW Mark-Sweep-Compact
void msc_collection_begin();
void msc_collection_end(GCCause::Cause gc_cause);
// Methods for gather information about Mark-Sweep done
// in the foreground.
void ms_collection_begin();
void ms_collection_end(GCCause::Cause gc_cause);
// Cost for a mark-sweep tenured gen collection done in the foreground
double ms_gc_cost() const {
return MAX2(0.0F, _avg_ms_gc_cost->average());
}
// Cost of collecting the tenured generation. Includes
// concurrent collection and STW collection costs
double cms_gc_cost() const;
// Cost of STW mark-sweep-compact tenured gen collection.
double msc_gc_cost() const {
return MAX2(0.0F, _avg_msc_gc_cost->average());
}
//
double compacting_gc_cost() const {
double result = MIN2(1.0, minor_gc_cost() + msc_gc_cost());
assert(result >= 0.0, "Both minor and major costs are non-negative");
return result;
}
// Restarts the concurrent phases timer.
void concurrent_phases_resume();
// Time beginning and end of the marking phase for
// a synchronous MS collection. A MS collection
// that finishes in the foreground can have started
// in the background. These methods capture the
// completion of the marking (after the initial
// marking) that is done in the foreground.
void ms_collection_marking_begin();
void ms_collection_marking_end(GCCause::Cause gc_cause);
static elapsedTimer* concurrent_timer_ptr() {
return &_concurrent_timer;
}
AdaptiveWeightedAverage* avg_cms_promo() const {
return _avg_cms_promo;
}
int change_young_gen_for_maj_pauses() {
return _change_young_gen_for_maj_pauses;
}
void set_change_young_gen_for_maj_pauses(int v) {
_change_young_gen_for_maj_pauses = v;
}
void clear_internal_time_intervals();
// Either calculated_promo_size_in_bytes() or promo_size()
// should be deleted.
size_t promo_size() { return _promo_size; }
void set_promo_size(size_t v) { _promo_size = v; }
// Cost of GC for all types of collections.
virtual double gc_cost() const;
size_t generation_alignment() { return _generation_alignment; }
virtual void compute_eden_space_size(size_t cur_eden,
size_t max_eden_size);
// Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size.
virtual uint compute_survivor_space_size_and_threshold(
bool is_survivor_overflow,
uint tenuring_threshold,
size_t survivor_limit);
virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
size_t max_tenured_available,
size_t cur_eden);
size_t eden_decrement_aligned_down(size_t cur_eden);
size_t eden_increment_aligned_up(size_t cur_eden);
size_t adjust_eden_for_pause_time(size_t cur_eden);
size_t adjust_eden_for_throughput(size_t cur_eden);
size_t adjust_eden_for_footprint(size_t cur_eden);
size_t promo_decrement_aligned_down(size_t cur_promo);
size_t promo_increment_aligned_up(size_t cur_promo);
size_t adjust_promo_for_pause_time(size_t cur_promo);
size_t adjust_promo_for_throughput(size_t cur_promo);
size_t adjust_promo_for_footprint(size_t cur_promo, size_t cur_eden);
// Scale down the input size by the ratio of the cost to collect the
// generation to the total GC cost.
size_t scale_by_gen_gc_cost(size_t base_change, double gen_gc_cost);
// Return the value and clear it.
bool get_and_clear_first_after_collection();
// Printing support
virtual bool print_adaptive_size_policy_on(outputStream* st) const;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP

View file

@ -23,9 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
#include "gc_implementation/parNew/parNewGeneration.hpp"
#include "gc_implementation/shared/gcPolicyCounters.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
@ -57,25 +56,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec");
if (UseParNewGC) {
if (UseAdaptiveSizePolicy) {
_generations[0] = new GenerationSpec(Generation::ASParNew,
_initial_young_size, _max_young_size);
} else {
_generations[0] = new GenerationSpec(Generation::ParNew,
_initial_young_size, _max_young_size);
}
} else {
_generations[0] = new GenerationSpec(Generation::DefNew,
_initial_young_size, _max_young_size);
}
if (UseAdaptiveSizePolicy) {
_generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep,
_initial_old_size, _max_old_size);
} else {
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
_initial_old_size, _max_old_size);
}
Generation::Name yg_name =
UseParNewGC ? Generation::ParNew : Generation::DefNew;
_generations[0] = new GenerationSpec(yg_name, _initial_young_size,
_max_young_size);
_generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
_initial_old_size, _max_old_size);
if (_generations[0] == NULL || _generations[1] == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
@ -85,14 +71,12 @@ void ConcurrentMarkSweepPolicy::initialize_generations() {
void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size) {
double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
_size_policy = new CMSAdaptiveSizePolicy(init_eden_size,
init_promo_size,
init_survivor_size,
max_gc_minor_pause_sec,
max_gc_pause_sec,
GCTimeRatio);
_size_policy = new AdaptiveSizePolicy(init_eden_size,
init_promo_size,
init_survivor_size,
max_gc_pause_sec,
GCTimeRatio);
}
void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
@ -110,22 +94,3 @@ bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
{
return CMSIncrementalMode;
}
//
// ASConcurrentMarkSweepPolicy methods
//
void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
assert(size_policy() != NULL, "A size policy is required");
// initialize the policy counters - 2 collectors, 3 generations
if (UseParNewGC) {
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
size_policy());
}
else {
_gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
size_policy());
}
}

View file

@ -47,19 +47,4 @@ class ConcurrentMarkSweepPolicy : public GenCollectorPolicy {
virtual bool has_soft_ended_eden();
};
class ASConcurrentMarkSweepPolicy : public ConcurrentMarkSweepPolicy {
public:
// Initialize the jstat counters. This method requires a
// size policy. The size policy is expected to be created
// after the generations are fully initialized so the
// initialization of the counters need to be done post
// the initialization of the generations.
void initialize_gc_policy_counters();
virtual CollectorPolicy::Name kind() {
return CollectorPolicy::ASConcurrentMarkSweepPolicyKind;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP

View file

@ -1,303 +0,0 @@
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
#include "memory/resourceArea.hpp"
CMSGCAdaptivePolicyCounters::CMSGCAdaptivePolicyCounters(const char* name_arg,
int collectors,
int generations,
AdaptiveSizePolicy* size_policy_arg)
: GCAdaptivePolicyCounters(name_arg,
collectors,
generations,
size_policy_arg) {
if (UsePerfData) {
EXCEPTION_MARK;
ResourceMark rm;
const char* cname =
PerfDataManager::counter_name(name_space(), "cmsCapacity");
_cms_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Bytes, (jlong) OldSize, CHECK);
#ifdef NOT_PRODUCT
cname =
PerfDataManager::counter_name(name_space(), "initialPause");
_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_initial_pause()->last_sample(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "remarkPause");
_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_remark_pause()->last_sample(),
CHECK);
#endif
cname =
PerfDataManager::counter_name(name_space(), "avgInitialPause");
_avg_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_initial_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgRemarkPause");
_avg_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_remark_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgSTWGcCost");
_avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_STW_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgSTWTime");
_avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_STW_time()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentTime");
_avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_concurrent_time()->average(),
CHECK);
cname =
PerfDataManager::counter_name(name_space(), "avgConcurrentInterval");
_avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_concurrent_interval()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgConcurrentGcCost");
_avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgCMSFreeAtSweep");
_avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgCMSFree");
_avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_free()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgCMSPromo");
_avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_cms_promo()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMscPause");
_avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_msc_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMscInterval");
_avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_msc_interval()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "mscGcCost");
_msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_msc_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMsPause");
_avg_ms_pause_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_ms_pause()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgMsInterval");
_avg_ms_interval_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_ms_interval()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "msGcCost");
_ms_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
cname,
PerfData::U_Ticks,
(jlong) cms_size_policy()->avg_ms_gc_cost()->average(),
CHECK);
cname = PerfDataManager::counter_name(name_space(), "majorGcCost");
_major_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Ticks, (jlong) cms_size_policy()->cms_gc_cost(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
_promoted_avg_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
_promoted_avg_dev_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) 0 , CHECK);
cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
_promoted_padded_avg_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
cname = PerfDataManager::counter_name(name_space(),
"changeYoungGenForMajPauses");
_change_young_gen_for_maj_pauses_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
(jlong)0, CHECK);
cname = PerfDataManager::counter_name(name_space(), "remarkPauseOldSlope");
_remark_pause_old_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->remark_pause_old_slope(), CHECK);
cname = PerfDataManager::counter_name(name_space(), "initialPauseOldSlope");
_initial_pause_old_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->initial_pause_old_slope(), CHECK);
cname =
PerfDataManager::counter_name(name_space(), "remarkPauseYoungSlope") ;
_remark_pause_young_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->remark_pause_young_slope(), CHECK);
cname =
PerfDataManager::counter_name(name_space(), "initialPauseYoungSlope");
_initial_pause_young_slope_counter =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
(jlong) cms_size_policy()->initial_pause_young_slope(), CHECK);
}
assert(size_policy()->is_gc_cms_adaptive_size_policy(),
"Wrong type of size policy");
}
void CMSGCAdaptivePolicyCounters::update_counters() {
if (UsePerfData) {
GCAdaptivePolicyCounters::update_counters_from_policy();
update_counters_from_policy();
}
}
void CMSGCAdaptivePolicyCounters::update_counters(CMSGCStats* gc_stats) {
if (UsePerfData) {
update_counters();
update_promoted((size_t) gc_stats->avg_promoted()->last_sample());
update_avg_promoted_avg(gc_stats);
update_avg_promoted_dev(gc_stats);
update_avg_promoted_padded_avg(gc_stats);
}
}
void CMSGCAdaptivePolicyCounters::update_counters_from_policy() {
if (UsePerfData && (cms_size_policy() != NULL)) {
GCAdaptivePolicyCounters::update_counters_from_policy();
update_major_gc_cost_counter();
update_mutator_cost_counter();
update_eden_size();
update_promo_size();
// If these updates from the last_sample() work,
// revise the update methods for these counters
// (both here and in PS).
update_survived((size_t) cms_size_policy()->avg_survived()->last_sample());
update_avg_concurrent_time_counter();
update_avg_concurrent_interval_counter();
update_avg_concurrent_gc_cost_counter();
#ifdef NOT_PRODUCT
update_initial_pause_counter();
update_remark_pause_counter();
#endif
update_avg_initial_pause_counter();
update_avg_remark_pause_counter();
update_avg_cms_STW_time_counter();
update_avg_cms_STW_gc_cost_counter();
update_avg_cms_free_counter();
update_avg_cms_free_at_sweep_counter();
update_avg_cms_promo_counter();
update_avg_msc_pause_counter();
update_avg_msc_interval_counter();
update_msc_gc_cost_counter();
update_avg_ms_pause_counter();
update_avg_ms_interval_counter();
update_ms_gc_cost_counter();
update_avg_old_live_counter();
update_survivor_size_counters();
update_avg_survived_avg_counters();
update_avg_survived_dev_counters();
update_decrement_tenuring_threshold_for_gc_cost();
update_increment_tenuring_threshold_for_gc_cost();
update_decrement_tenuring_threshold_for_survivor_limit();
update_change_young_gen_for_maj_pauses();
update_major_collection_slope_counter();
update_remark_pause_old_slope_counter();
update_initial_pause_old_slope_counter();
update_remark_pause_young_slope_counter();
update_initial_pause_young_slope_counter();
update_decide_at_full_gc_counter();
}
}

View file

@ -1,308 +0,0 @@
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp"
#include "runtime/perfData.hpp"
// CMSGCAdaptivePolicyCounters is a holder class for performance counters
// that track the data and decisions for the ergonomics policy for the
// concurrent mark sweep collector
class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
friend class VMStructs;
private:
// Capacity of tenured generation recorded at the end of
// any collection.
PerfVariable* _cms_capacity_counter; // Make this common with PS _old_capacity
// Average stop-the-world pause time for both initial and
// remark pauses sampled at the end of the checkpointRootsFinalWork.
PerfVariable* _avg_cms_STW_time_counter;
// Average stop-the-world (STW) GC cost for the STW pause time
// _avg_cms_STW_time_counter.
PerfVariable* _avg_cms_STW_gc_cost_counter;
#ifdef NOT_PRODUCT
// These are useful to see how the most recent values of these
// counters compare to their respective averages but
// do not control behavior.
PerfVariable* _initial_pause_counter;
PerfVariable* _remark_pause_counter;
#endif
// Average of the initial marking pause for a concurrent collection.
PerfVariable* _avg_initial_pause_counter;
// Average of the remark pause for a concurrent collection.
PerfVariable* _avg_remark_pause_counter;
// Average for the sum of all the concurrent times per collection.
PerfVariable* _avg_concurrent_time_counter;
// Average for the time between the most recent end of a
// concurrent collection and the beginning of the next
// concurrent collection.
PerfVariable* _avg_concurrent_interval_counter;
// Average of the concurrent GC costs based on _avg_concurrent_time_counter
// and _avg_concurrent_interval_counter.
PerfVariable* _avg_concurrent_gc_cost_counter;
// Average of the free space in the tenured generation at the
// end of the sweep of the tenured generation.
PerfVariable* _avg_cms_free_counter;
// Average of the free space in the tenured generation at the
// start of the sweep of the tenured generation.
PerfVariable* _avg_cms_free_at_sweep_counter;
// Average of the free space in the tenured generation at the
// after any resizing of the tenured generation at the end
// of a collection of the tenured generation.
PerfVariable* _avg_cms_promo_counter;
// Average of the mark-sweep-compact (MSC) pause time for a collection
// of the tenured generation.
PerfVariable* _avg_msc_pause_counter;
// Average for the time between the most recent end of a
// MSC collection and the beginning of the next MSC collection.
PerfVariable* _avg_msc_interval_counter;
// Average for the GC cost of a MSC collection based on
// _avg_msc_pause_counter and _avg_msc_interval_counter.
PerfVariable* _msc_gc_cost_counter;
// Average of the mark-sweep (MS) pause time for a collection
// of the tenured generation.
PerfVariable* _avg_ms_pause_counter;
// Average for the time between the most recent end of a
// MS collection and the beginning of the next MS collection.
PerfVariable* _avg_ms_interval_counter;
// Average for the GC cost of a MS collection based on
// _avg_ms_pause_counter and _avg_ms_interval_counter.
PerfVariable* _ms_gc_cost_counter;
// Average of the bytes promoted per minor collection.
PerfVariable* _promoted_avg_counter;
// Average of the deviation of the promoted average.
PerfVariable* _promoted_avg_dev_counter;
// Padded average of the bytes promoted per minor collection.
PerfVariable* _promoted_padded_avg_counter;
// See description of the _change_young_gen_for_maj_pauses
// variable recently in cmsAdaptiveSizePolicy.hpp.
PerfVariable* _change_young_gen_for_maj_pauses_counter;
// See descriptions of _remark_pause_old_slope, _initial_pause_old_slope,
// etc. variables recently in cmsAdaptiveSizePolicy.hpp.
PerfVariable* _remark_pause_old_slope_counter;
PerfVariable* _initial_pause_old_slope_counter;
PerfVariable* _remark_pause_young_slope_counter;
PerfVariable* _initial_pause_young_slope_counter;
CMSAdaptiveSizePolicy* cms_size_policy() {
assert(_size_policy->kind() ==
AdaptiveSizePolicy::_gc_cms_adaptive_size_policy,
"Wrong size policy");
return (CMSAdaptiveSizePolicy*)_size_policy;
}
inline void update_avg_cms_STW_time_counter() {
_avg_cms_STW_time_counter->set_value(
(jlong) (cms_size_policy()->avg_cms_STW_time()->average() *
(double) MILLIUNITS));
}
inline void update_avg_cms_STW_gc_cost_counter() {
_avg_cms_STW_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_cms_STW_gc_cost()->average() * 100.0));
}
inline void update_avg_initial_pause_counter() {
_avg_initial_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
(double) MILLIUNITS));
}
#ifdef NOT_PRODUCT
inline void update_avg_remark_pause_counter() {
_avg_remark_pause_counter->set_value(
(jlong) (cms_size_policy()-> avg_remark_pause()->average() *
(double) MILLIUNITS));
}
inline void update_initial_pause_counter() {
_initial_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_initial_pause()->average() *
(double) MILLIUNITS));
}
#endif
inline void update_remark_pause_counter() {
_remark_pause_counter->set_value(
(jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() *
(double) MILLIUNITS));
}
inline void update_avg_concurrent_time_counter() {
_avg_concurrent_time_counter->set_value(
(jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() *
(double) MILLIUNITS));
}
inline void update_avg_concurrent_interval_counter() {
_avg_concurrent_interval_counter->set_value(
(jlong) (cms_size_policy()->avg_concurrent_interval()->average() *
(double) MILLIUNITS));
}
inline void update_avg_concurrent_gc_cost_counter() {
_avg_concurrent_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_concurrent_gc_cost()->average() * 100.0));
}
inline void update_avg_cms_free_counter() {
_avg_cms_free_counter->set_value(
(jlong) cms_size_policy()->avg_cms_free()->average());
}
inline void update_avg_cms_free_at_sweep_counter() {
_avg_cms_free_at_sweep_counter->set_value(
(jlong) cms_size_policy()->avg_cms_free_at_sweep()->average());
}
inline void update_avg_cms_promo_counter() {
_avg_cms_promo_counter->set_value(
(jlong) cms_size_policy()->avg_cms_promo()->average());
}
inline void update_avg_old_live_counter() {
_avg_old_live_counter->set_value(
(jlong)(cms_size_policy()->avg_old_live()->average())
);
}
inline void update_avg_msc_pause_counter() {
_avg_msc_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_msc_pause()->average() *
(double) MILLIUNITS));
}
inline void update_avg_msc_interval_counter() {
_avg_msc_interval_counter->set_value(
(jlong) (cms_size_policy()->avg_msc_interval()->average() *
(double) MILLIUNITS));
}
inline void update_msc_gc_cost_counter() {
_msc_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_msc_gc_cost()->average() * 100.0));
}
inline void update_avg_ms_pause_counter() {
_avg_ms_pause_counter->set_value(
(jlong) (cms_size_policy()->avg_ms_pause()->average() *
(double) MILLIUNITS));
}
inline void update_avg_ms_interval_counter() {
_avg_ms_interval_counter->set_value(
(jlong) (cms_size_policy()->avg_ms_interval()->average() *
(double) MILLIUNITS));
}
inline void update_ms_gc_cost_counter() {
_ms_gc_cost_counter->set_value(
(jlong) (cms_size_policy()->avg_ms_gc_cost()->average() * 100.0));
}
inline void update_major_gc_cost_counter() {
_major_gc_cost_counter->set_value(
(jlong)(cms_size_policy()->cms_gc_cost() * 100.0)
);
}
inline void update_mutator_cost_counter() {
_mutator_cost_counter->set_value(
(jlong)(cms_size_policy()->mutator_cost() * 100.0)
);
}
inline void update_avg_promoted_avg(CMSGCStats* gc_stats) {
_promoted_avg_counter->set_value(
(jlong)(gc_stats->avg_promoted()->average())
);
}
inline void update_avg_promoted_dev(CMSGCStats* gc_stats) {
_promoted_avg_dev_counter->set_value(
(jlong)(gc_stats->avg_promoted()->deviation())
);
}
inline void update_avg_promoted_padded_avg(CMSGCStats* gc_stats) {
_promoted_padded_avg_counter->set_value(
(jlong)(gc_stats->avg_promoted()->padded_average())
);
}
inline void update_remark_pause_old_slope_counter() {
_remark_pause_old_slope_counter->set_value(
(jlong)(cms_size_policy()->remark_pause_old_slope() * 1000)
);
}
inline void update_initial_pause_old_slope_counter() {
_initial_pause_old_slope_counter->set_value(
(jlong)(cms_size_policy()->initial_pause_old_slope() * 1000)
);
}
inline void update_remark_pause_young_slope_counter() {
_remark_pause_young_slope_counter->set_value(
(jlong)(cms_size_policy()->remark_pause_young_slope() * 1000)
);
}
inline void update_initial_pause_young_slope_counter() {
_initial_pause_young_slope_counter->set_value(
(jlong)(cms_size_policy()->initial_pause_young_slope() * 1000)
);
}
inline void update_change_young_gen_for_maj_pauses() {
_change_young_gen_for_maj_pauses_counter->set_value(
cms_size_policy()->change_young_gen_for_maj_pauses());
}
public:
CMSGCAdaptivePolicyCounters(const char* name, int collectors, int generations,
AdaptiveSizePolicy* size_policy);
// update counters
void update_counters();
void update_counters(CMSGCStats* gc_stats);
void update_counters_from_policy();
inline void update_cms_capacity_counter(size_t size_in_bytes) {
_cms_capacity_counter->set_value(size_in_bytes);
}
virtual GCPolicyCounters::Name kind() const {
return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP

View file

@ -70,7 +70,6 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
class CompactibleFreeListSpace: public CompactibleSpace {
friend class VMStructs;
friend class ConcurrentMarkSweepGeneration;
friend class ASConcurrentMarkSweepGeneration;
friend class CMSCollector;
// Local alloc buffer for promotion into this space.
friend class CFLS_LAB;

View file

@ -27,9 +27,8 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
@ -319,27 +318,13 @@ void CMSCollector::ref_processor_init() {
}
}
CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
AdaptiveSizePolicy* CMSCollector::size_policy() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"Wrong type of heap");
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
gch->gen_policy()->size_policy();
assert(sp->is_gc_cms_adaptive_size_policy(),
"Wrong type of size policy");
return sp;
return gch->gen_policy()->size_policy();
}
CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
CMSGCAdaptivePolicyCounters* results =
(CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
assert(
results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
"Wrong gc policy counter kind");
return results;
}
void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
const char* gen_name = "old";
@ -2031,11 +2016,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
"collections passed to foreground collector", _full_gcs_since_conc_gc);
}
// Sample collection interval time and reset for collection pause.
if (UseAdaptiveSizePolicy) {
size_policy()->msc_collection_begin();
}
// Temporarily widen the span of the weak reference processing to
// the entire heap.
MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
@ -2111,11 +2091,6 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
_inter_sweep_timer.reset();
_inter_sweep_timer.start();
// Sample collection pause time and reset for collection interval.
if (UseAdaptiveSizePolicy) {
size_policy()->msc_collection_end(gch->gc_cause());
}
gc_timer->register_gc_end();
gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
@ -2373,26 +2348,14 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
}
break;
case Precleaning:
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_precleaning_begin();
}
// marking from roots in markFromRoots has been completed
preclean();
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_precleaning_end();
}
assert(_collectorState == AbortablePreclean ||
_collectorState == FinalMarking,
"Collector state should have changed");
break;
case AbortablePreclean:
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_phases_resume();
}
abortable_preclean();
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_precleaning_end();
}
assert(_collectorState == FinalMarking, "Collector state should "
"have changed");
break;
@ -2406,23 +2369,12 @@ void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Caus
assert(_foregroundGCShouldWait, "block post-condition");
break;
case Sweeping:
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_sweeping_begin();
}
// final marking in checkpointRootsFinal has been completed
sweep(true);
assert(_collectorState == Resizing, "Collector state change "
"to Resizing must be done under the free_list_lock");
_full_gcs_since_conc_gc = 0;
// Stop the timers for adaptive size policy for the concurrent phases
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_sweeping_end();
size_policy()->concurrent_phases_end(gch->gc_cause(),
gch->prev_gen(_cmsGen)->capacity(),
_cmsGen->free());
}
case Resizing: {
// Sweeping has been completed...
// At this point the background collection has completed.
@ -2539,9 +2491,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
true, NULL, gc_id);)
if (UseAdaptiveSizePolicy) {
size_policy()->ms_collection_begin();
}
COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
HandleMark hm; // Discard invalid handles created during verification
@ -2633,11 +2582,6 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Caus
}
}
if (UseAdaptiveSizePolicy) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_policy()->ms_collection_end(gch->gc_cause());
}
if (VerifyAfterGC &&
GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
Universe::verify();
@ -3687,9 +3631,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_initial_begin();
}
// Reset all the PLAB chunk arrays if necessary.
if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@ -3769,9 +3710,6 @@ void CMSCollector::checkpointRootsInitialWork(bool asynch) {
// Save the end of the used_region of the constituent generations
// to be used to limit the extent of sweep in each generation.
save_sweep_limits();
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
}
verify_overflow_empty();
}
@ -3788,15 +3726,6 @@ bool CMSCollector::markFromRoots(bool asynch) {
bool res;
if (asynch) {
// Start the timers for adaptive size policy for the concurrent phases
// Do it here so that the foreground MS can use the concurrent
// timer since a foreground MS might has the sweep done concurrently
// or STW.
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_marking_begin();
}
// Weak ref discovery note: We may be discovering weak
// refs in this generation concurrent (but interleaved) with
// weak ref discovery by a younger generation collector.
@ -3814,22 +3743,12 @@ bool CMSCollector::markFromRoots(bool asynch) {
gclog_or_tty->print_cr("bailing out to foreground collection");
}
}
if (UseAdaptiveSizePolicy) {
size_policy()->concurrent_marking_end();
}
} else {
assert(SafepointSynchronize::is_at_safepoint(),
"inconsistent with asynch == false");
if (UseAdaptiveSizePolicy) {
size_policy()->ms_collection_marking_begin();
}
// already have locks
res = markFromRootsWork(asynch);
_collectorState = FinalMarking;
if (UseAdaptiveSizePolicy) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_policy()->ms_collection_marking_end(gch->gc_cause());
}
}
verify_overflow_empty();
return res;
@ -4705,8 +4624,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
if (clean_survivor) { // preclean the active survivor space(s)
assert(_young_gen->kind() == Generation::DefNew ||
_young_gen->kind() == Generation::ParNew ||
_young_gen->kind() == Generation::ASParNew,
_young_gen->kind() == Generation::ParNew,
"incorrect type for cast");
DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
PushAndMarkClosure pam_cl(this, _span, ref_processor(),
@ -5077,10 +4995,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
assert(haveFreelistLocks(), "must have free list locks");
assert_lock_strong(bitMapLock());
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_final_begin();
}
ResourceMark rm;
HandleMark hm;
@ -5214,9 +5128,6 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch,
"Should be clear by end of the final marking");
assert(_ct->klass_rem_set()->mod_union_is_clear(),
"Should be clear by end of the final marking");
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_final_end(gch->gc_cause());
}
}
void CMSParInitialMarkTask::work(uint worker_id) {
@ -6329,7 +6240,6 @@ void CMSCollector::sweep(bool asynch) {
_inter_sweep_timer.stop();
_inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
assert(!_intra_sweep_timer.is_active(), "Should not be active");
_intra_sweep_timer.reset();
@ -6454,17 +6364,6 @@ void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
}
}
CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->kind() == CollectedHeap::GenCollectedHeap,
"Wrong type of heap");
CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
gch->gen_policy()->size_policy();
assert(sp->is_gc_cms_adaptive_size_policy(),
"Wrong type of size policy");
return sp;
}
void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
if (PrintGCDetails && Verbose) {
gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
@ -6540,9 +6439,6 @@ void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
// Reset CMS data structures (for now just the marking bit map)
// preparatory for the next cycle.
void CMSCollector::reset(bool asynch) {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSAdaptiveSizePolicy* sp = size_policy();
AdaptiveSizePolicyOutput(sp, gch->total_collections());
if (asynch) {
CMSTokenSyncWithLocks ts(true, bitMapLock());
@ -6597,7 +6493,7 @@ void CMSCollector::reset(bool asynch) {
// Because only the full (i.e., concurrent mode failure) collections
// are being measured for gc overhead limits, clean the "near" flag
// and count.
sp->reset_gc_overhead_limit_count();
size_policy()->reset_gc_overhead_limit_count();
_collectorState = Idling;
} else {
// already have the lock
@ -7064,7 +6960,6 @@ void MarkRefsIntoAndScanClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -7225,7 +7120,6 @@ void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -7298,7 +7192,6 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -7457,7 +7350,6 @@ void MarkFromRootsClosure::do_yield_work() {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -8099,7 +7991,6 @@ void CMSPrecleanRefsYieldClosure::do_yield_work() {
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -8780,7 +8671,6 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
ConcurrentMarkSweepThread::desynchronize(true);
ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
@ -9327,172 +9217,6 @@ bool CMSCollector::no_preserved_marks() const {
}
#endif
CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
{
GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
CMSAdaptiveSizePolicy* size_policy =
(CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
assert(size_policy->is_gc_cms_adaptive_size_policy(),
"Wrong type for size policy");
return size_policy;
}
void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
size_t desired_promo_size) {
if (cur_promo_size < desired_promo_size) {
size_t expand_bytes = desired_promo_size - cur_promo_size;
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
"Expanding tenured generation by " SIZE_FORMAT " (bytes)",
expand_bytes);
}
expand(expand_bytes,
MinHeapDeltaBytes,
CMSExpansionCause::_adaptive_size_policy);
} else if (desired_promo_size < cur_promo_size) {
size_t shrink_bytes = cur_promo_size - desired_promo_size;
if (PrintAdaptiveSizePolicy && Verbose) {
gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
"Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
shrink_bytes);
}
shrink(shrink_bytes);
}
}
CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSGCAdaptivePolicyCounters* counters =
(CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
"Wrong kind of counters");
return counters;
}
void ASConcurrentMarkSweepGeneration::update_counters() {
if (UsePerfData) {
_space_counters->update_all();
_gen_counters->update_all();
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
"Wrong gc statistics type");
counters->update_counters(gc_stats_l);
}
}
void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
if (UsePerfData) {
_space_counters->update_used(used);
_space_counters->update_capacity();
_gen_counters->update_all();
CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
GenCollectedHeap* gch = GenCollectedHeap::heap();
CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
"Wrong gc statistics type");
counters->update_counters(gc_stats_l);
}
}
void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
assert_locked_or_safepoint(Heap_lock);
assert_lock_strong(freelistLock());
HeapWord* old_end = _cmsSpace->end();
HeapWord* unallocated_start = _cmsSpace->unallocated_block();
assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
FreeChunk* chunk_at_end = find_chunk_at_end();
if (chunk_at_end == NULL) {
// No room to shrink
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("No room to shrink: old_end "
PTR_FORMAT " unallocated_start " PTR_FORMAT
" chunk_at_end " PTR_FORMAT,
old_end, unallocated_start, chunk_at_end);
}
return;
} else {
// Find the chunk at the end of the space and determine
// how much it can be shrunk.
size_t shrinkable_size_in_bytes = chunk_at_end->size();
size_t aligned_shrinkable_size_in_bytes =
align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
"Inconsistent chunk at end of space");
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
size_t word_size_before = heap_word_size(_virtual_space.committed_size());
// Shrink the underlying space
_virtual_space.shrink_by(bytes);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
" desired_bytes " SIZE_FORMAT
" shrinkable_size_in_bytes " SIZE_FORMAT
" aligned_shrinkable_size_in_bytes " SIZE_FORMAT
" bytes " SIZE_FORMAT,
desired_bytes, shrinkable_size_in_bytes,
aligned_shrinkable_size_in_bytes, bytes);
gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
" unallocated_start " SIZE_FORMAT,
old_end, unallocated_start);
}
// If the space did shrink (shrinking is not guaranteed),
// shrink the chunk at the end by the appropriate amount.
if (((HeapWord*)_virtual_space.high()) < old_end) {
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
// Have to remove the chunk from the dictionary because it is changing
// size and might be someplace elsewhere in the dictionary.
// Get the chunk at end, shrink it, and put it
// back.
_cmsSpace->removeChunkFromDictionary(chunk_at_end);
size_t word_size_change = word_size_before - new_word_size;
size_t chunk_at_end_old_size = chunk_at_end->size();
assert(chunk_at_end_old_size >= word_size_change,
"Shrink is too large");
chunk_at_end->set_size(chunk_at_end_old_size -
word_size_change);
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
word_size_change);
_cmsSpace->returnChunkToDictionary(chunk_at_end);
MemRegion mr(_cmsSpace->bottom(), new_word_size);
_bts->resize(new_word_size); // resize the block offset shared array
Universe::heap()->barrier_set()->resize_covered_region(mr);
_cmsSpace->assert_locked();
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
NOT_PRODUCT(_cmsSpace->dictionary()->verify());
// update the space and generation capacity counters
if (UsePerfData) {
_space_counters->update_capacity();
_gen_counters->update_all();
}
if (Verbose && PrintGCDetails) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + bytes;
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
}
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
"Inconsistency at end of space");
assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
"Shrinking is inconsistent");
return;
}
}
// Transfer some number of overflown objects to usual marking
// stack. Return true if some objects were transferred.
bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {

View file

@ -52,7 +52,7 @@
// Concurrent mode failures are currently handled by
// means of a sliding mark-compact.
class CMSAdaptiveSizePolicy;
class AdaptiveSizePolicy;
class CMSConcMarkingTask;
class CMSGCAdaptivePolicyCounters;
class CMSTracer;
@ -1009,8 +1009,7 @@ class CMSCollector: public CHeapObj<mtGC> {
void icms_wait(); // Called at yield points.
// Adaptive size policy
CMSAdaptiveSizePolicy* size_policy();
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
AdaptiveSizePolicy* size_policy();
static void print_on_error(outputStream* st);
@ -1150,9 +1149,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
// Adaptive size policy
CMSAdaptiveSizePolicy* size_policy();
void set_did_compact(bool v) { _did_compact = v; }
bool refs_discovery_is_atomic() const { return false; }
@ -1346,37 +1342,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
void rotate_debug_collection_type();
};
class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
// Return the size policy from the heap's collector
// policy casted to CMSAdaptiveSizePolicy*.
CMSAdaptiveSizePolicy* cms_size_policy() const;
// Resize the generation based on the adaptive size
// policy.
void resize(size_t cur_promo, size_t desired_promo);
// Return the GC counters from the collector policy
CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
virtual void shrink_by(size_t bytes);
public:
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
bool use_adaptive_freelists,
FreeBlockDictionary<FreeChunk>::DictionaryChoice
dictionaryChoice) :
ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
use_adaptive_freelists, dictionaryChoice) {}
virtual const char* short_name() const { return "ASCMS"; }
virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
virtual void update_counters();
virtual void update_counters(size_t used);
};
//
// Closures of various sorts used by CMS to accomplish its work
//