mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
Initial load
This commit is contained in:
parent
686d76f772
commit
8153779ad3
2894 changed files with 911801 additions and 0 deletions
141
hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
Normal file
141
hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
Normal file
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
* Copyright 2002-2005 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
# include "incls/_precompiled.incl"
|
||||
# include "incls/_gcUtil.cpp.incl"
|
||||
|
||||
// Catch-all file for utility classes
|
||||
|
||||
float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
|
||||
float average) {
|
||||
// We smooth the samples by not using weight() directly until we've
|
||||
// had enough data to make it meaningful. We'd like the first weight
|
||||
// used to be 1, the second to be 1/2, etc until we have 100/weight
|
||||
// samples.
|
||||
unsigned count_weight = 100/count();
|
||||
unsigned adaptive_weight = (MAX2(weight(), count_weight));
|
||||
|
||||
float new_avg = exp_avg(average, new_sample, adaptive_weight);
|
||||
|
||||
return new_avg;
|
||||
}
|
||||
|
||||
void AdaptiveWeightedAverage::sample(float new_sample) {
|
||||
increment_count();
|
||||
assert(count() != 0,
|
||||
"Wraparound -- history would be incorrectly discarded");
|
||||
|
||||
// Compute the new weighted average
|
||||
float new_avg = compute_adaptive_average(new_sample, average());
|
||||
set_average(new_avg);
|
||||
_last_sample = new_sample;
|
||||
}
|
||||
|
||||
void AdaptivePaddedAverage::sample(float new_sample) {
|
||||
// Compute our parent classes sample information
|
||||
AdaptiveWeightedAverage::sample(new_sample);
|
||||
|
||||
// Now compute the deviation and the new padded sample
|
||||
float new_avg = average();
|
||||
float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
|
||||
deviation());
|
||||
set_deviation(new_dev);
|
||||
set_padded_average(new_avg + padding() * new_dev);
|
||||
_last_sample = new_sample;
|
||||
}
|
||||
|
||||
void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
|
||||
// Compute our parent classes sample information
|
||||
AdaptiveWeightedAverage::sample(new_sample);
|
||||
|
||||
float new_avg = average();
|
||||
if (new_sample != 0) {
|
||||
// We only create a new deviation if the sample is non-zero
|
||||
float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
|
||||
deviation());
|
||||
|
||||
set_deviation(new_dev);
|
||||
}
|
||||
set_padded_average(new_avg + padding() * deviation());
|
||||
_last_sample = new_sample;
|
||||
}
|
||||
|
||||
LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :
|
||||
_sum_x(0), _sum_y(0), _sum_xy(0),
|
||||
_mean_x(weight), _mean_y(weight) {}
|
||||
|
||||
void LinearLeastSquareFit::update(double x, double y) {
|
||||
_sum_x = _sum_x + x;
|
||||
_sum_x_squared = _sum_x_squared + x * x;
|
||||
_sum_y = _sum_y + y;
|
||||
_sum_xy = _sum_xy + x * y;
|
||||
_mean_x.sample(x);
|
||||
_mean_y.sample(y);
|
||||
assert(_mean_x.count() == _mean_y.count(), "Incorrect count");
|
||||
if ( _mean_x.count() > 1 ) {
|
||||
double slope_denominator;
|
||||
slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
|
||||
// Some tolerance should be injected here. A denominator that is
|
||||
// nearly 0 should be avoided.
|
||||
|
||||
if (slope_denominator != 0.0) {
|
||||
double slope_numerator;
|
||||
slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);
|
||||
_slope = slope_numerator / slope_denominator;
|
||||
|
||||
// The _mean_y and _mean_x are decaying averages and can
|
||||
// be used to discount earlier data. If they are used,
|
||||
// first consider whether all the quantities should be
|
||||
// kept as decaying averages.
|
||||
// _intercept = _mean_y.average() - _slope * _mean_x.average();
|
||||
_intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
double LinearLeastSquareFit::y(double x) {
|
||||
double new_y;
|
||||
|
||||
if ( _mean_x.count() > 1 ) {
|
||||
new_y = (_intercept + _slope * x);
|
||||
return new_y;
|
||||
} else {
|
||||
return _mean_y.average();
|
||||
}
|
||||
}
|
||||
|
||||
// Both decrement_will_decrease() and increment_will_decrease() return
|
||||
// true for a slope of 0. That is because a change is necessary before
|
||||
// a slope can be calculated and a 0 slope will, in general, indicate
|
||||
// that no calculation of the slope has yet been done. Returning true
|
||||
// for a slope equal to 0 reflects the intuitive expectation of the
|
||||
// dependence on the slope. Don't use the complement of these functions
|
||||
// since that untuitive expectation is not built into the complement.
|
||||
bool LinearLeastSquareFit::decrement_will_decrease() {
|
||||
return (_slope >= 0.00);
|
||||
}
|
||||
|
||||
bool LinearLeastSquareFit::increment_will_decrease() {
|
||||
return (_slope <= 0.00);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue