memBaseline.cpp 15.6 KB
Newer Older
Z
zgu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "services/memBaseline.hpp"
#include "services/memTracker.hpp"

MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  {mtJavaHeap,   "Java Heap"},
  {mtClass,      "Class"},
  {mtThreadStack,"Thread Stack"},
  {mtThread,     "Thread"},
  {mtCode,       "Code"},
  {mtGC,         "GC"},
  {mtCompiler,   "Compiler"},
  {mtInternal,   "Internal"},
  {mtOther,      "Other"},
  {mtSymbol,     "Symbol"},
  {mtNMT,        "Memory Tracking"},
  {mtChunk,      "Pooled Free Chunks"},
42
  {mtClassShared,"Shared spaces for classes"},
43
  {mtTest,       "Test"},
Z
zgu 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
                             // behind
};

MemBaseline::MemBaseline() {
  _baselined = false;

  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
    _vm_data[index].set_type(MemType2NameMap[index]._flag);
    _arena_data[index].set_type(MemType2NameMap[index]._flag);
  }

  _malloc_cs = NULL;
  _vm_cs = NULL;
59
  _vm_map = NULL;
Z
zgu 已提交
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76

  _number_of_classes = 0;
  _number_of_threads = 0;
}


void MemBaseline::clear() {
  if (_malloc_cs != NULL) {
    delete _malloc_cs;
    _malloc_cs = NULL;
  }

  if (_vm_cs != NULL) {
    delete _vm_cs;
    _vm_cs = NULL;
  }

77 78 79 80 81
  if (_vm_map != NULL) {
    delete _vm_map;
    _vm_map = NULL;
  }

Z
zgu 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94
  reset();
}


void MemBaseline::reset() {
  _baselined = false;
  _total_vm_reserved = 0;
  _total_vm_committed = 0;
  _total_malloced = 0;
  _number_of_classes = 0;

  if (_malloc_cs != NULL) _malloc_cs->clear();
  if (_vm_cs != NULL) _vm_cs->clear();
95
  if (_vm_map != NULL) _vm_map->clear();
Z
zgu 已提交
96 97 98 99 100 101 102 103 104

  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    _malloc_data[index].clear();
    _vm_data[index].clear();
    _arena_data[index].clear();
  }
}

MemBaseline::~MemBaseline() {
105
  clear();
Z
zgu 已提交
106 107 108 109 110
}

// baseline malloc'd memory records, generate overall summary and summaries by
// memory types
bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
111 112
  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
Z
zgu 已提交
113 114
  size_t used_arena_size = 0;
  int index;
115 116 117
  while (malloc_ptr != NULL) {
    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
    size_t size = malloc_ptr->size();
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
    if (malloc_ptr->is_arena_memory_record()) {
      // We do have anonymous arenas, they are either used as value objects,
      // which are embedded inside other objects, or used as stack objects.
      _arena_data[index].inc(size);
      used_arena_size += size;
    } else {
      _total_malloced += size;
      _malloc_data[index].inc(size);
      if (malloc_ptr->is_arena_record()) {
        // see if arena memory record present
        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
        if (next_malloc_ptr->is_arena_memory_record()) {
          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
             "Arena records do not match");
          size = next_malloc_ptr->size();
          _arena_data[index].inc(size);
          used_arena_size += size;
          malloc_itr.next();
        }
Z
zgu 已提交
137 138
      }
    }
139
    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
Z
zgu 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
  }

  // substract used arena size to get size of arena chunk in free list
  index = flag2index(mtChunk);
  _malloc_data[index].reduce(used_arena_size);
  // we really don't know how many chunks in free list, so just set to
  // 0
  _malloc_data[index].overwrite_counter(0);

  return true;
}

// baseline mmap'd memory records, generate overall summary and summaries by
// memory types
bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
155 156
  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
Z
zgu 已提交
157
  int index;
158 159 160
  while (vm_ptr != NULL) {
    if (vm_ptr->is_reserved_region()) {
      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
Z
zgu 已提交
161
    // we use the number of thread stack to count threads
162
      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
Z
zgu 已提交
163 164
      _number_of_threads ++;
    }
165 166 167 168 169 170 171
      _total_vm_reserved += vm_ptr->size();
      _vm_data[index].inc(vm_ptr->size(), 0);
    } else {
      _total_vm_committed += vm_ptr->size();
      _vm_data[index].inc(0, vm_ptr->size());
    }
    vm_ptr = (VMMemRegion*)vm_itr.next();
Z
zgu 已提交
172 173 174 175 176 177 178 179 180
  }
  return true;
}

// baseline malloc'd memory by callsites, but only the callsites with memory allocation
// over 1KB are stored.
bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
  assert(MemTracker::track_callsite(), "detail tracking is off");

181 182 183
  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
  MallocCallsitePointer malloc_callsite;
Z
zgu 已提交
184

185
  // initailize malloc callsite array
Z
zgu 已提交
186 187 188
  if (_malloc_cs == NULL) {
    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
    // out of native memory
189
    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
Z
zgu 已提交
190 191 192 193 194 195
      return false;
    }
  } else {
    _malloc_cs->clear();
  }

196 197 198 199 200 201
  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);

  // sort into callsite pc order. Details are aggregated by callsites
  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
  bool ret = true;

Z
zgu 已提交
202
  // baseline memory that is totaled over 1 KB
203
  while (malloc_ptr != NULL) {
204
    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
Z
zgu 已提交
205
      // skip thread stacks
206 207 208 209 210 211
      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
        if (malloc_callsite.addr() != malloc_ptr->pc()) {
          if ((malloc_callsite.amount()/K) > 0) {
            if (!_malloc_cs->append(&malloc_callsite)) {
              ret = false;
              break;
Z
zgu 已提交
212 213
            }
          }
214
          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
Z
zgu 已提交
215
        }
216
        malloc_callsite.inc(malloc_ptr->size());
Z
zgu 已提交
217 218
      }
    }
219
    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
Z
zgu 已提交
220 221
  }

222 223 224 225 226 227 228 229 230 231
  // restore to address order. Snapshot malloc data is maintained in memory
  // address order.
  malloc_data->sort((FN_SORT)malloc_sort_by_addr);

  if (!ret) {
              return false;
            }
  // deal with last record
  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
    if (!_malloc_cs->append(&malloc_callsite)) {
Z
zgu 已提交
232 233 234 235 236 237 238 239 240 241
      return false;
    }
  }
  return true;
}

// baseline mmap'd memory by callsites
bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
  assert(MemTracker::track_callsite(), "detail tracking is off");

242 243 244 245 246 247 248 249 250 251 252 253 254 255
  VMCallsitePointer  vm_callsite;
  VMCallsitePointer* cur_callsite = NULL;
  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();

  // initialize virtual memory map array
  if (_vm_map == NULL) {
    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
   if (_vm_map == NULL || _vm_map->out_of_memory()) {
     return false;
   }
  } else {
    _vm_map->clear();
  }
Z
zgu 已提交
256

257
  // initialize virtual memory callsite array
Z
zgu 已提交
258 259
  if (_vm_cs == NULL) {
    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
260
    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
Z
zgu 已提交
261 262 263 264 265 266
      return false;
    }
  } else {
    _vm_cs->clear();
  }

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
  // consolidate virtual memory data
  VMMemRegionEx*     reserved_rec = NULL;
  VMMemRegionEx*     committed_rec = NULL;

  // vm_ptr is coming in increasing base address order
  while (vm_ptr != NULL) {
    if (vm_ptr->is_reserved_region()) {
      // consolidate reserved memory regions for virtual memory map.
      // The criteria for consolidation is:
      // 1. two adjacent reserved memory regions
      // 2. belong to the same memory type
      // 3. reserved from the same callsite
      if (reserved_rec == NULL ||
        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
        reserved_rec->pc() != vm_ptr->pc()) {
        if (!_vm_map->append(vm_ptr)) {
Z
zgu 已提交
284 285
        return false;
      }
286 287 288 289 290
        // inserted reserved region, we need the pointer to the element in virtual
        // memory map array.
        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
      } else {
        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
Z
zgu 已提交
291
    }
292 293

      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
Z
zgu 已提交
294 295
      return false;
    }
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
      vm_callsite = VMCallsitePointer(vm_ptr->pc());
      cur_callsite = &vm_callsite;
      vm_callsite.inc(vm_ptr->size(), 0);
    } else {
      // consolidate committed memory regions for virtual memory map
      // The criterial is:
      // 1. two adjacent committed memory regions
      // 2. committed from the same callsite
      if (committed_rec == NULL ||
        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
        committed_rec->pc() != vm_ptr->pc()) {
        if (!_vm_map->append(vm_ptr)) {
          return false;
  }
        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
    } else {
        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
      }
      vm_callsite.inc(0, vm_ptr->size());
    }
    vm_ptr = (VMMemRegionEx*)vm_itr.next();
Z
zgu 已提交
317
  }
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
  // deal with last record
  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
    return false;
  }

  // sort it into callsite pc order. Details are aggregated by callsites
  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);

  // walk the array to consolidate record by pc
  MemPointerArrayIteratorImpl itr(_vm_cs);
  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
  while (next_rec != NULL) {
    assert(callsite_rec != NULL, "Sanity check");
    if (next_rec->addr() == callsite_rec->addr()) {
      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
      itr.remove();
      next_rec = (VMCallsitePointer*)itr.current();
    } else {
      callsite_rec = next_rec;
      next_rec = (VMCallsitePointer*)itr.next();
    }
  }

Z
zgu 已提交
342 343 344 345 346 347 348 349 350 351
  return true;
}

// baseline a snapshot. If summary_only = false, memory usages aggregated by
// callsites are also baselined.
bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
  MutexLockerEx snapshot_locker(snapshot._lock, true);
  reset();
  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
               baseline_vm_summary(snapshot._vm_ptrs);
352
  _number_of_classes = snapshot.number_of_classes();
Z
zgu 已提交
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377

  if (!summary_only && MemTracker::track_callsite() && _baselined) {
    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
      baseline_vm_details(snapshot._vm_ptrs);
  }
  return _baselined;
}


int MemBaseline::flag2index(MEMFLAGS flag) const {
  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    if (MemType2NameMap[index]._flag == flag) {
      return index;
    }
  }
  assert(false, "no type");
  return -1;
}

const char* MemBaseline::type2name(MEMFLAGS type) {
  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    if (MemType2NameMap[index]._flag == type) {
      return MemType2NameMap[index]._name;
    }
  }
378
  assert(false, err_msg("bad type %x", type));
Z
zgu 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
  return NULL;
}


MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
  _total_malloced = other._total_malloced;
  _total_vm_reserved = other._total_vm_reserved;
  _total_vm_committed = other._total_vm_committed;

  _baselined = other._baselined;
  _number_of_classes = other._number_of_classes;

  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    _malloc_data[index] = other._malloc_data[index];
    _vm_data[index] = other._vm_data[index];
    _arena_data[index] = other._arena_data[index];
  }

  if (MemTracker::track_callsite()) {
    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
           "not properly baselined");
    _malloc_cs->clear();
    _vm_cs->clear();
    int index;
    for (index = 0; index < other._malloc_cs->length(); index ++) {
      _malloc_cs->append(other._malloc_cs->at(index));
    }

    for (index = 0; index < other._vm_cs->length(); index ++) {
      _vm_cs->append(other._vm_cs->at(index));
    }
  }
  return *this;
}

/* compare functions for sorting */

// sort snapshot malloc'd records in callsite pc order
int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
  assert(MemTracker::track_callsite(),"Just check");
  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
}

// sort baselined malloc'd records in size order
int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
}

// sort baselined malloc'd records in callsite pc order
int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
}


// sort baselined mmap'd records in size (reserved size) order
int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
}

// sort baselined mmap'd records in callsite pc order
int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
}


// sort snapshot malloc'd records in memory block address order
int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
  assert(delta != 0, "dup pointer");
  return delta;
}