gcUtil.cpp 5.6 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26
#include "precompiled.hpp"
#include "gc_implementation/shared/gcUtil.hpp"
D
duke 已提交
27 28 29 30 31 32 33

// Catch-all file for utility classes

float AdaptiveWeightedAverage::compute_adaptive_average(float new_sample,
                                                        float average) {
  // We smooth the samples by not using weight() directly until we've
  // had enough data to make it meaningful. We'd like the first weight
34 35 36 37 38 39 40 41 42
  // used to be 1, the second to be 1/2, etc until we have
  // OLD_THRESHOLD/weight samples.
  unsigned count_weight = 0;

  // Avoid division by zero if the counter wraps (7158457)
  if (!is_old()) {
    count_weight = OLD_THRESHOLD/count();
  }

D
duke 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
  unsigned adaptive_weight = (MAX2(weight(), count_weight));

  float new_avg = exp_avg(average, new_sample, adaptive_weight);

  return new_avg;
}

void AdaptiveWeightedAverage::sample(float new_sample) {
  increment_count();

  // Compute the new weighted average
  float new_avg = compute_adaptive_average(new_sample, average());
  set_average(new_avg);
  _last_sample = new_sample;
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
void AdaptiveWeightedAverage::print() const {
  print_on(tty);
}

void AdaptiveWeightedAverage::print_on(outputStream* st) const {
  guarantee(false, "NYI");
}

void AdaptivePaddedAverage::print() const {
  print_on(tty);
}

void AdaptivePaddedAverage::print_on(outputStream* st) const {
  guarantee(false, "NYI");
}

void AdaptivePaddedNoZeroDevAverage::print() const {
  print_on(tty);
}

void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
  guarantee(false, "NYI");
}

D
duke 已提交
83
void AdaptivePaddedAverage::sample(float new_sample) {
84
  // Compute new adaptive weighted average based on new sample.
D
duke 已提交
85 86
  AdaptiveWeightedAverage::sample(new_sample);

87
  // Now update the deviation and the padded average.
D
duke 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
  float new_avg = average();
  float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
                                           deviation());
  set_deviation(new_dev);
  set_padded_average(new_avg + padding() * new_dev);
  _last_sample = new_sample;
}

void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) {
  // Compute our parent classes sample information
  AdaptiveWeightedAverage::sample(new_sample);

  float new_avg = average();
  if (new_sample != 0) {
    // We only create a new deviation if the sample is non-zero
    float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
                                             deviation());

    set_deviation(new_dev);
  }
  set_padded_average(new_avg + padding() * deviation());
  _last_sample = new_sample;
}

LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) :
113 114
  _sum_x(0), _sum_x_squared(0), _sum_y(0), _sum_xy(0),
  _intercept(0), _slope(0), _mean_x(weight), _mean_y(weight) {}
D
duke 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169

void LinearLeastSquareFit::update(double x, double y) {
  _sum_x = _sum_x + x;
  _sum_x_squared = _sum_x_squared + x * x;
  _sum_y = _sum_y + y;
  _sum_xy = _sum_xy + x * y;
  _mean_x.sample(x);
  _mean_y.sample(y);
  assert(_mean_x.count() == _mean_y.count(), "Incorrect count");
  if ( _mean_x.count() > 1 ) {
    double slope_denominator;
    slope_denominator = (_mean_x.count() * _sum_x_squared - _sum_x * _sum_x);
    // Some tolerance should be injected here.  A denominator that is
    // nearly 0 should be avoided.

    if (slope_denominator != 0.0) {
      double slope_numerator;
      slope_numerator = (_mean_x.count() * _sum_xy - _sum_x * _sum_y);
      _slope = slope_numerator / slope_denominator;

      // The _mean_y and _mean_x are decaying averages and can
      // be used to discount earlier data.  If they are used,
      // first consider whether all the quantities should be
      // kept as decaying averages.
      // _intercept = _mean_y.average() - _slope * _mean_x.average();
      _intercept = (_sum_y - _slope * _sum_x) / ((double) _mean_x.count());
    }
  }
}

double LinearLeastSquareFit::y(double x) {
  double new_y;

  if ( _mean_x.count() > 1 ) {
    new_y = (_intercept + _slope * x);
    return new_y;
  } else {
    return _mean_y.average();
  }
}

// Both decrement_will_decrease() and increment_will_decrease() return
// true for a slope of 0.  That is because a change is necessary before
// a slope can be calculated and a 0 slope will, in general, indicate
// that no calculation of the slope has yet been done.  Returning true
// for a slope equal to 0 reflects the intuitive expectation of the
// dependence on the slope.  Don't use the complement of these functions
// since that untuitive expectation is not built into the complement.
bool LinearLeastSquareFit::decrement_will_decrease() {
  return (_slope >= 0.00);
}

bool LinearLeastSquareFit::increment_will_decrease() {
  return (_slope <= 0.00);
}