Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp
38921 views
/*1* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc_implementation/shared/gcUtil.hpp"2627// Catch-all file for utility classes2829float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,30float average) {31// We smooth the samples by not using weight() directly until we've32// had enough data to make it meaningful. We'd like the first weight33// used to be 1, the second to be 1/2, etc until we have34// OLD_THRESHOLD/weight samples.35unsigned count_weight = 0;3637// Avoid division by zero if the counter wraps (7158457)38if (!is_old()) {39count_weight = OLD_THRESHOLD/count();40}4142unsigned adaptive_weight = (MAX2(weight(), count_weight));4344float new_avg = exp_avg(average, new_sample, adaptive_weight);4546return new_avg;47}4849void AdaptiveWeightedAverage::sample(float new_sample) {50increment_count();5152// Compute the new weighted average53float new_avg = compute_adaptive_average(new_sample, average());54set_average(new_avg);55_last_sample = new_sample;56}5758void AdaptiveWeightedAverage::print() const {59print_on(tty);60}6162void AdaptiveWeightedAverage::print_on(outputStream* st) const {63guarantee(false, "NYI");64}6566void AdaptivePaddedAverage::print() const {67print_on(tty);68}6970void AdaptivePaddedAverage::print_on(outputStream* st) const {71guarantee(false, "NYI");72}7374void AdaptivePaddedNoZeroDevAverage::print() const {75print_on(tty);76}7778void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {79guarantee(false, "NYI");80}8182void AdaptivePaddedAverage::sample(float new_sample) {83// Compute new adaptive weighted average based on new sample.84AdaptiveWeightedAverage::sample(new_sample);8586// Now update the deviation and the padded average.87float new_avg = average();88float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),89deviation());90set_deviation(new_dev);91set_padded_average(new_avg + padding() * new_dev);92_last_sample = new_sample;93}9495void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {96// Compute our parent classes sample information97AdaptiveWeightedAverage::sample(new_sample);9899float new_avg = average();100if (new_sample != 0) {101// We only create a new deviation if the sample is non-zero102float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),103deviation());104105set_deviation(new_dev);106}107set_padded_average(new_avg + padding() * deviation());108_last_sample = new_sample;109}110111LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :112_sum_x(0), _sum_x_squared(0), _sum_y(0), _sum_xy(0),113_intercept(0), _slope(0), _mean_x(weight), _mean_y(weight) {}114115void LinearLeastSquareFit::update(double x, double y) {116_sum_x = _sum_x + x;117_sum_x_squared = _sum_x_squared + x * x;118_sum_y = _sum_y + y;119_sum_xy = _sum_xy + x * y;120_mean_x.sample(x);121_mean_y.sample(y);122assert(_mean_x.count() == _mean_y.count(), "Incorrect count");123if ( _mean_x.count() > 1 ) {124double slope_denominator;125slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);126// Some tolerance should be injected here. A denominator that is127// nearly 0 should be avoided.128129if (slope_denominator != 0.0) {130double slope_numerator;131slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);132_slope = slope_numerator / slope_denominator;133134// The _mean_y and _mean_x are decaying averages and can135// be used to discount earlier data. If they are used,136// first consider whether all the quantities should be137// kept as decaying averages.138// _intercept = _mean_y.average() - _slope * _mean_x.average();139_intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());140}141}142}143144double LinearLeastSquareFit::y(double x) {145double new_y;146147if ( _mean_x.count() > 1 ) {148new_y = (_intercept + _slope * x);149return new_y;150} else {151return _mean_y.average();152}153}154155// Both decrement_will_decrease() and increment_will_decrease() return156// true for a slope of 0. That is because a change is necessary before157// a slope can be calculated and a 0 slope will, in general, indicate158// that no calculation of the slope has yet been done. Returning true159// for a slope equal to 0 reflects the intuitive expectation of the160// dependence on the slope. Don't use the complement of these functions161// since that untuitive expectation is not built into the complement.162bool LinearLeastSquareFit::decrement_will_decrease() {163return (_slope >= 0.00);164}165166bool LinearLeastSquareFit::increment_will_decrease() {167return (_slope <= 0.00);168}169170171