memTracker.cpp 10.3 KB
Newer Older
Z
zgu 已提交
1
/*
2
 * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
Z
zgu 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */
#include "precompiled.hpp"

26 27
#include "runtime/mutex.hpp"
#include "services/memBaseline.hpp"
Z
zgu 已提交
28
#include "services/memReporter.hpp"
29
#include "services/mallocTracker.inline.hpp"
Z
zgu 已提交
30
#include "services/memTracker.hpp"
31
#include "utilities/defaultStream.hpp"
Z
zgu 已提交
32

33 34
#ifdef SOLARIS
  volatile bool NMT_stack_walkable = false;
35
#else
36
  volatile bool NMT_stack_walkable = true;
37
#endif
Z
zgu 已提交
38

39 40
volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
41

42 43 44
MemBaseline MemTracker::_baseline;
Mutex*      MemTracker::_query_lock = NULL;
bool MemTracker::_is_nmt_env_valid = true;
Z
zgu 已提交
45 46


47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
NMT_TrackingLevel MemTracker::init_tracking_level() {
  NMT_TrackingLevel level = NMT_off;
  char buf[64];
  char nmt_option[64];
  jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
  if (os::getenv(buf, nmt_option, sizeof(nmt_option))) {
    if (strcmp(nmt_option, "summary") == 0) {
      level = NMT_summary;
    } else if (strcmp(nmt_option, "detail") == 0) {
#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
      level = NMT_detail;
#else
      level = NMT_summary;
#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
    } else if (strcmp(nmt_option, "off") != 0) {
      // The option value is invalid
      _is_nmt_env_valid = false;
Z
zgu 已提交
64
    }
65

66 67
    // Remove the environment variable to avoid leaking to child processes
    os::unsetenv(buf);
Z
zgu 已提交
68 69
  }

70 71 72 73
  // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice,
  // but it is benign, the results are the same.
  ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false);

74 75 76 77 78
  if (!MallocTracker::initialize(level) ||
      !VirtualMemoryTracker::initialize(level)) {
    level = NMT_off;
  }
  return level;
Z
zgu 已提交
79 80
}

81
void MemTracker::init() {
82 83 84 85 86 87
  NMT_TrackingLevel level = tracking_level();
  if (level >= NMT_summary) {
    if (!VirtualMemoryTracker::late_initialize(level)) {
      shutdown();
      return;
    }
88 89 90 91
    _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
    // Already OOM. It is unlikely, but still have to handle it.
    if (_query_lock == NULL) {
      shutdown();
Z
zgu 已提交
92 93 94 95
    }
  }
}

96 97 98 99 100 101 102
bool MemTracker::check_launcher_nmt_support(const char* value) {
  if (strcmp(value, "=detail") == 0) {
#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
      jio_fprintf(defaultStream::error_stream(),
        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
    if (MemTracker::tracking_level() != NMT_summary) {
    return false;
Z
zgu 已提交
103
  }
104 105 106
#else
    if (MemTracker::tracking_level() != NMT_detail) {
      return false;
Z
zgu 已提交
107
    }
108 109 110 111
#endif
  } else if (strcmp(value, "=summary") == 0) {
    if (MemTracker::tracking_level() != NMT_summary) {
      return false;
Z
zgu 已提交
112
    }
113 114 115
  } else if (strcmp(value, "=off") == 0) {
    if (MemTracker::tracking_level() != NMT_off) {
      return false;
Z
zgu 已提交
116 117
    }
  } else {
118
    _is_nmt_env_valid = false;
Z
zgu 已提交
119 120
  }

121
  return true;
Z
zgu 已提交
122 123
}

124 125
bool MemTracker::verify_nmt_option() {
  return _is_nmt_env_valid;
Z
zgu 已提交
126 127
}

128 129
void* MemTracker::malloc_base(void* memblock) {
  return MallocTracker::get_base(memblock);
Z
zgu 已提交
130 131
}

132 133 134 135 136 137 138 139 140 141 142
void Tracker::record(address addr, size_t size) {
  if (MemTracker::tracking_level() < NMT_summary) return;
  switch(_type) {
    case uncommit:
      VirtualMemoryTracker::remove_uncommitted_region(addr, size);
      break;
    case release:
      VirtualMemoryTracker::remove_released_region(addr, size);
        break;
    default:
      ShouldNotReachHere();
Z
zgu 已提交
143 144 145 146
  }
}


147
// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
148
void MemTracker::shutdown() {
149
  // We can only shutdown NMT to minimal tracking level if it is ever on.
150 151
  if (tracking_level () > NMT_minimal) {
    transition_to(NMT_minimal);
Z
zgu 已提交
152 153 154
  }
}

155 156
bool MemTracker::transition_to(NMT_TrackingLevel level) {
  NMT_TrackingLevel current_level = tracking_level();
157

158 159
  assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");

160 161 162
  if (current_level == level) {
    return true;
  } else if (current_level > level) {
163
    // Downgrade tracking level, we want to lower the tracking level first
164 165 166 167 168 169
    _tracking_level = level;
    // Make _tracking_level visible immediately.
    OrderAccess::fence();
    VirtualMemoryTracker::transition(current_level, level);
    MallocTracker::transition(current_level, level);
  } else {
170 171 172
    // Upgrading tracking level is not supported and has never been supported.
    // Allocating and deallocating malloc tracking structures is not thread safe and
    // leads to inconsistencies unless a lot coarser locks are added.
Z
zgu 已提交
173 174 175 176
  }
  return true;
}

177 178 179 180 181 182 183 184 185 186 187
void MemTracker::report(bool summary_only, outputStream* output) {
 assert(output != NULL, "No output stream");
  MemBaseline baseline;
  if (baseline.baseline(summary_only)) {
    if (summary_only) {
      MemSummaryReporter rpt(baseline, output);
      rpt.report();
    } else {
      MemDetailReporter rpt(baseline, output);
      rpt.report();
    }
Z
zgu 已提交
188 189 190
  }
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
// This is a walker to gather malloc site hashtable statistics,
// the result is used for tuning.
class StatisticsWalker : public MallocSiteWalker {
 private:
  enum Threshold {
    // aggregates statistics over this threshold into one
    // line item.
    report_threshold = 20
  };

 private:
  // Number of allocation sites that have all memory freed
  int   _empty_entries;
  // Total number of allocation sites, include empty sites
  int   _total_entries;
  // Number of captured call stack distribution
  int   _stack_depth_distribution[NMT_TrackingStackDepth];
  // Hash distribution
  int   _hash_distribution[report_threshold];
  // Number of hash buckets that have entries over the threshold
  int   _bucket_over_threshold;

  // The hash bucket that walker is currently walking
  int   _current_hash_bucket;
  // The length of current hash bucket
  int   _current_bucket_length;
  // Number of hash buckets that are not empty
  int   _used_buckets;
  // Longest hash bucket length
  int   _longest_bucket_length;

 public:
  StatisticsWalker() : _empty_entries(0), _total_entries(0) {
    int index = 0;
    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
      _stack_depth_distribution[index] = 0;
227
    }
228 229
    for (index = 0; index < report_threshold; index ++) {
      _hash_distribution[index] = 0;
230
    }
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
    _bucket_over_threshold = 0;
    _longest_bucket_length = 0;
    _current_hash_bucket = -1;
    _current_bucket_length = 0;
    _used_buckets = 0;
  }

  virtual bool at(const MallocSite* e) {
    if (e->size() == 0) _empty_entries ++;
    _total_entries ++;

    // stack depth distrubution
    int frames = e->call_stack()->frames();
    _stack_depth_distribution[frames - 1] ++;

    // hash distribution
    int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
    if (_current_hash_bucket == -1) {
      _current_hash_bucket = hash_bucket;
      _current_bucket_length = 1;
    } else if (_current_hash_bucket == hash_bucket) {
      _current_bucket_length ++;
Z
zgu 已提交
253
    } else {
254 255 256
      record_bucket_length(_current_bucket_length);
      _current_hash_bucket = hash_bucket;
      _current_bucket_length = 1;
Z
zgu 已提交
257
    }
258
    return true;
Z
zgu 已提交
259 260
  }

261 262 263
  // walk completed
  void completed() {
    record_bucket_length(_current_bucket_length);
Z
zgu 已提交
264
  }
265

266 267 268 269 270 271 272 273 274
  void report_statistics(outputStream* out) {
    int index;
    out->print_cr("Malloc allocation site table:");
    out->print_cr("\tTotal entries: %d", _total_entries);
    out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
    out->print_cr(" ");
    out->print_cr("Hash distribution:");
    if (_used_buckets < MallocSiteTable::hash_buckets()) {
      out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
275
    }
276 277 278 279 280 281 282 283 284
    for (index = 0; index < report_threshold; index ++) {
      if (_hash_distribution[index] != 0) {
        if (index == 0) {
          out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
        } else if (index < 9) { // single digit
          out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
        } else {
          out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
        }
285 286
      }
    }
287 288
    if (_bucket_over_threshold > 0) {
      out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
289
    }
290 291 292 293 294 295 296
    out->print_cr("most entries: %d", _longest_bucket_length);
    out->print_cr(" ");
    out->print_cr("Call stack depth distribution:");
    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
      if (_stack_depth_distribution[index] > 0) {
        out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
      }
297 298 299
    }
  }

300 301 302 303 304
 private:
  void record_bucket_length(int length) {
    _used_buckets ++;
    if (length <= report_threshold) {
      _hash_distribution[length - 1] ++;
305
    } else {
306
      _bucket_over_threshold ++;
307
    }
308
    _longest_bucket_length = MAX2(_longest_bucket_length, length);
309
  }
310
};
311 312


313 314 315 316 317
void MemTracker::tuning_statistics(outputStream* out) {
  // NMT statistics
  StatisticsWalker walker;
  MallocSiteTable::walk_malloc_site(&walker);
  walker.completed();
318

319 320 321 322 323 324
  out->print_cr("Native Memory Tracking Statistics:");
  out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
  out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
  NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
  out->print_cr(" ");
  walker.report_statistics(out);
325 326
}