allocationStats.hpp 6.8 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24 25 26 27 28 29 30 31 32 33
 *
 */

class AllocationStats VALUE_OBJ_CLASS_SPEC {
  // A duration threshold (in ms) used to filter
  // possibly unreliable samples.
  static float _threshold;

  // We measure the demand between the end of the previous sweep and
  // beginning of this sweep:
  //   Count(end_last_sweep) - Count(start_this_sweep)
  //     + splitBirths(between) - splitDeaths(between)
34
  // The above number divided by the time since the end of the
D
duke 已提交
35 36 37 38 39 40 41 42 43
  // previous sweep gives us a time rate of demand for blocks
  // of this size. We compute a padded average of this rate as
  // our current estimate for the time rate of demand for blocks
  // of this size. Similarly, we keep a padded average for the time
  // between sweeps. Our current estimate for demand for blocks of
  // this size is then simply computed as the product of these two
  // estimates.
  AdaptivePaddedAverage _demand_rate_estimate;

44
  ssize_t     _desired;         // Demand stimate computed as described above
D
duke 已提交
45 46 47 48 49 50 51 52 53 54 55
  ssize_t     _coalDesired;     // desired +/- small-percent for tuning coalescing

  ssize_t     _surplus;         // count - (desired +/- small-percent),
                                // used to tune splitting in best fit
  ssize_t     _bfrSurp;         // surplus at start of current sweep
  ssize_t     _prevSweep;       // count from end of previous sweep
  ssize_t     _beforeSweep;     // count from before current sweep
  ssize_t     _coalBirths;      // additional chunks from coalescing
  ssize_t     _coalDeaths;      // loss from coalescing
  ssize_t     _splitBirths;     // additional chunks from splitting
  ssize_t     _splitDeaths;     // loss from splitting
56
  size_t      _returnedBytes;   // number of bytes returned to list.
D
duke 已提交
57
 public:
58
  void initialize(bool split_birth = false) {
D
duke 已提交
59 60 61 62 63 64 65 66 67 68 69
    AdaptivePaddedAverage* dummy =
      new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
                                                         CMS_FLSPadding);
    _desired = 0;
    _coalDesired = 0;
    _surplus = 0;
    _bfrSurp = 0;
    _prevSweep = 0;
    _beforeSweep = 0;
    _coalBirths = 0;
    _coalDeaths = 0;
70
    _splitBirths = split_birth? 1 : 0;
D
duke 已提交
71 72 73 74 75 76 77
    _splitDeaths = 0;
    _returnedBytes = 0;
  }

  AllocationStats() {
    initialize();
  }
78

D
duke 已提交
79 80 81
  // The rate estimate is in blocks per second.
  void compute_desired(size_t count,
                       float inter_sweep_current,
82 83
                       float inter_sweep_estimate,
                       float intra_sweep_estimate) {
D
duke 已提交
84 85 86 87 88 89 90 91 92
    // If the latest inter-sweep time is below our granularity
    // of measurement, we may call in here with
    // inter_sweep_current == 0. However, even for suitably small
    // but non-zero inter-sweep durations, we may not trust the accuracy
    // of accumulated data, since it has not been "integrated"
    // (read "low-pass-filtered") long enough, and would be
    // vulnerable to noisy glitches. In such cases, we
    // ignore the current sample and use currently available
    // historical estimates.
93 94 95 96
    // XXX NEEDS TO BE FIXED
    // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
    //     ^^^^^^^^^^^^^^^^^^^^^^^^^^^    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    //     "Total Stock"                  "Not used at this block size"
D
duke 已提交
97
    if (inter_sweep_current > _threshold) {
98 99 100 101 102 103 104 105
      ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
      // XXX NEEDS TO BE FIXED
      // assert(demand >= 0, "Demand should be non-negative");
      // Defensive: adjust for imprecision in event counting
      if (demand < 0) {
        demand = 0;
      }
      float old_rate = _demand_rate_estimate.padded_average();
D
duke 已提交
106 107
      float rate = ((float)demand)/inter_sweep_current;
      _demand_rate_estimate.sample(rate);
108 109 110 111 112 113 114 115 116 117
      float new_rate = _demand_rate_estimate.padded_average();
      ssize_t old_desired = _desired;
      _desired = (ssize_t)(new_rate * (inter_sweep_estimate
                                       + CMSExtrapolateSweep
                                         ? intra_sweep_estimate
                                         : 0.0));
      if (PrintFLSStatistics > 1) {
        gclog_or_tty->print_cr("demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d",
                                demand,     old_rate,     rate,             new_rate,     old_desired,     _desired);
      }
D
duke 已提交
118 119 120 121
    }
  }

  ssize_t desired() const { return _desired; }
122 123
  void set_desired(ssize_t v) { _desired = v; }

D
duke 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
  ssize_t coalDesired() const { return _coalDesired; }
  void set_coalDesired(ssize_t v) { _coalDesired = v; }

  ssize_t surplus() const { return _surplus; }
  void set_surplus(ssize_t v) { _surplus = v; }
  void increment_surplus() { _surplus++; }
  void decrement_surplus() { _surplus--; }

  ssize_t bfrSurp() const { return _bfrSurp; }
  void set_bfrSurp(ssize_t v) { _bfrSurp = v; }
  ssize_t prevSweep() const { return _prevSweep; }
  void set_prevSweep(ssize_t v) { _prevSweep = v; }
  ssize_t beforeSweep() const { return _beforeSweep; }
  void set_beforeSweep(ssize_t v) { _beforeSweep = v; }

  ssize_t coalBirths() const { return _coalBirths; }
  void set_coalBirths(ssize_t v) { _coalBirths = v; }
  void increment_coalBirths() { _coalBirths++; }

  ssize_t coalDeaths() const { return _coalDeaths; }
  void set_coalDeaths(ssize_t v) { _coalDeaths = v; }
  void increment_coalDeaths() { _coalDeaths++; }

  ssize_t splitBirths() const { return _splitBirths; }
  void set_splitBirths(ssize_t v) { _splitBirths = v; }
  void increment_splitBirths() { _splitBirths++; }

  ssize_t splitDeaths() const { return _splitDeaths; }
  void set_splitDeaths(ssize_t v) { _splitDeaths = v; }
  void increment_splitDeaths() { _splitDeaths++; }

  NOT_PRODUCT(
    size_t returnedBytes() const { return _returnedBytes; }
    void set_returnedBytes(size_t v) { _returnedBytes = v; }
  )
};