memBaseline.cpp 16.3 KB
Newer Older
Z
zgu 已提交
1
/*
2
 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
Z
zgu 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */
#include "precompiled.hpp"
#include "memory/allocation.hpp"
26 27
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
Z
zgu 已提交
28 29 30
#include "services/memBaseline.hpp"
#include "services/memTracker.hpp"

31

Z
zgu 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44
MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  {mtJavaHeap,   "Java Heap"},
  {mtClass,      "Class"},
  {mtThreadStack,"Thread Stack"},
  {mtThread,     "Thread"},
  {mtCode,       "Code"},
  {mtGC,         "GC"},
  {mtCompiler,   "Compiler"},
  {mtInternal,   "Internal"},
  {mtOther,      "Other"},
  {mtSymbol,     "Symbol"},
  {mtNMT,        "Memory Tracking"},
  {mtChunk,      "Pooled Free Chunks"},
45
  {mtClassShared,"Shared spaces for classes"},
46
  {mtTest,       "Test"},
Z
zgu 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
                             // behind
};

MemBaseline::MemBaseline() {
  _baselined = false;

  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
    _vm_data[index].set_type(MemType2NameMap[index]._flag);
    _arena_data[index].set_type(MemType2NameMap[index]._flag);
  }

  _malloc_cs = NULL;
  _vm_cs = NULL;
62
  _vm_map = NULL;
Z
zgu 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

  _number_of_classes = 0;
  _number_of_threads = 0;
}


void MemBaseline::clear() {
  if (_malloc_cs != NULL) {
    delete _malloc_cs;
    _malloc_cs = NULL;
  }

  if (_vm_cs != NULL) {
    delete _vm_cs;
    _vm_cs = NULL;
  }

80 81 82 83 84
  if (_vm_map != NULL) {
    delete _vm_map;
    _vm_map = NULL;
  }

Z
zgu 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97
  reset();
}


void MemBaseline::reset() {
  _baselined = false;
  _total_vm_reserved = 0;
  _total_vm_committed = 0;
  _total_malloced = 0;
  _number_of_classes = 0;

  if (_malloc_cs != NULL) _malloc_cs->clear();
  if (_vm_cs != NULL) _vm_cs->clear();
98
  if (_vm_map != NULL) _vm_map->clear();
Z
zgu 已提交
99 100 101 102 103 104 105 106 107

  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    _malloc_data[index].clear();
    _vm_data[index].clear();
    _arena_data[index].clear();
  }
}

MemBaseline::~MemBaseline() {
108
  clear();
Z
zgu 已提交
109 110 111 112 113
}

// baseline malloc'd memory records, generate overall summary and summaries by
// memory types
bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
114 115
  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
Z
zgu 已提交
116 117
  size_t used_arena_size = 0;
  int index;
118 119 120
  while (malloc_ptr != NULL) {
    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
    size_t size = malloc_ptr->size();
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
    if (malloc_ptr->is_arena_memory_record()) {
      // We do have anonymous arenas, they are either used as value objects,
      // which are embedded inside other objects, or used as stack objects.
      _arena_data[index].inc(size);
      used_arena_size += size;
    } else {
      _total_malloced += size;
      _malloc_data[index].inc(size);
      if (malloc_ptr->is_arena_record()) {
        // see if arena memory record present
        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
        if (next_malloc_ptr->is_arena_memory_record()) {
          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
             "Arena records do not match");
          size = next_malloc_ptr->size();
          _arena_data[index].inc(size);
          used_arena_size += size;
          malloc_itr.next();
        }
Z
zgu 已提交
140 141
      }
    }
142
    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
Z
zgu 已提交
143 144 145 146 147 148 149 150 151 152 153 154
  }

  // substract used arena size to get size of arena chunk in free list
  index = flag2index(mtChunk);
  _malloc_data[index].reduce(used_arena_size);
  // we really don't know how many chunks in free list, so just set to
  // 0
  _malloc_data[index].overwrite_counter(0);

  return true;
}

155 156 157 158
// check if there is a safepoint in progress, if so, block the thread
// for the safepoint
void MemBaseline::check_safepoint(JavaThread* thr) {
  if (SafepointSynchronize::is_synchronizing()) {
159 160
    // grab and drop the SR_lock to honor the safepoint protocol
    MutexLocker ml(thr->SR_lock());
161 162 163
  }
}

Z
zgu 已提交
164 165 166
// baseline mmap'd memory records, generate overall summary and summaries by
// memory types
bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
167 168
  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
Z
zgu 已提交
169
  int index;
170 171 172
  while (vm_ptr != NULL) {
    if (vm_ptr->is_reserved_region()) {
      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
Z
zgu 已提交
173
    // we use the number of thread stack to count threads
174
      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
Z
zgu 已提交
175 176
      _number_of_threads ++;
    }
177 178 179 180 181 182 183
      _total_vm_reserved += vm_ptr->size();
      _vm_data[index].inc(vm_ptr->size(), 0);
    } else {
      _total_vm_committed += vm_ptr->size();
      _vm_data[index].inc(0, vm_ptr->size());
    }
    vm_ptr = (VMMemRegion*)vm_itr.next();
Z
zgu 已提交
184 185 186 187 188 189 190 191 192
  }
  return true;
}

// baseline malloc'd memory by callsites, but only the callsites with memory allocation
// over 1KB are stored.
bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
  assert(MemTracker::track_callsite(), "detail tracking is off");

193 194 195
  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
  MallocCallsitePointer malloc_callsite;
Z
zgu 已提交
196

197
  // initailize malloc callsite array
Z
zgu 已提交
198 199 200
  if (_malloc_cs == NULL) {
    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
    // out of native memory
201
    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
Z
zgu 已提交
202 203 204 205 206 207
      return false;
    }
  } else {
    _malloc_cs->clear();
  }

208 209 210 211 212 213
  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);

  // sort into callsite pc order. Details are aggregated by callsites
  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
  bool ret = true;

Z
zgu 已提交
214
  // baseline memory that is totaled over 1 KB
215
  while (malloc_ptr != NULL) {
216
    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
Z
zgu 已提交
217
      // skip thread stacks
218 219 220 221 222 223
      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
        if (malloc_callsite.addr() != malloc_ptr->pc()) {
          if ((malloc_callsite.amount()/K) > 0) {
            if (!_malloc_cs->append(&malloc_callsite)) {
              ret = false;
              break;
Z
zgu 已提交
224 225
            }
          }
226
          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
Z
zgu 已提交
227
        }
228
        malloc_callsite.inc(malloc_ptr->size());
Z
zgu 已提交
229 230
      }
    }
231
    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
Z
zgu 已提交
232 233
  }

234 235 236 237 238 239 240 241 242 243
  // restore to address order. Snapshot malloc data is maintained in memory
  // address order.
  malloc_data->sort((FN_SORT)malloc_sort_by_addr);

  if (!ret) {
              return false;
            }
  // deal with last record
  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
    if (!_malloc_cs->append(&malloc_callsite)) {
Z
zgu 已提交
244 245 246 247 248 249 250 251 252 253
      return false;
    }
  }
  return true;
}

// baseline mmap'd memory by callsites
bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
  assert(MemTracker::track_callsite(), "detail tracking is off");

254 255 256 257 258 259 260 261 262 263 264 265 266 267
  VMCallsitePointer  vm_callsite;
  VMCallsitePointer* cur_callsite = NULL;
  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();

  // initialize virtual memory map array
  if (_vm_map == NULL) {
    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
   if (_vm_map == NULL || _vm_map->out_of_memory()) {
     return false;
   }
  } else {
    _vm_map->clear();
  }
Z
zgu 已提交
268

269
  // initialize virtual memory callsite array
Z
zgu 已提交
270 271
  if (_vm_cs == NULL) {
    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
272
    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
Z
zgu 已提交
273 274 275 276 277 278
      return false;
    }
  } else {
    _vm_cs->clear();
  }

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
  // consolidate virtual memory data
  VMMemRegionEx*     reserved_rec = NULL;
  VMMemRegionEx*     committed_rec = NULL;

  // vm_ptr is coming in increasing base address order
  while (vm_ptr != NULL) {
    if (vm_ptr->is_reserved_region()) {
      // consolidate reserved memory regions for virtual memory map.
      // The criteria for consolidation is:
      // 1. two adjacent reserved memory regions
      // 2. belong to the same memory type
      // 3. reserved from the same callsite
      if (reserved_rec == NULL ||
        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
        reserved_rec->pc() != vm_ptr->pc()) {
        if (!_vm_map->append(vm_ptr)) {
Z
zgu 已提交
296 297
        return false;
      }
298 299 300 301 302
        // inserted reserved region, we need the pointer to the element in virtual
        // memory map array.
        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
      } else {
        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
Z
zgu 已提交
303
    }
304 305

      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
Z
zgu 已提交
306 307
      return false;
    }
308 309 310 311 312 313 314 315 316 317 318 319 320
      vm_callsite = VMCallsitePointer(vm_ptr->pc());
      cur_callsite = &vm_callsite;
      vm_callsite.inc(vm_ptr->size(), 0);
    } else {
      // consolidate committed memory regions for virtual memory map
      // The criterial is:
      // 1. two adjacent committed memory regions
      // 2. committed from the same callsite
      if (committed_rec == NULL ||
        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
        committed_rec->pc() != vm_ptr->pc()) {
        if (!_vm_map->append(vm_ptr)) {
          return false;
321
        }
322 323 324 325 326 327 328
        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
    } else {
        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
      }
      vm_callsite.inc(0, vm_ptr->size());
    }
    vm_ptr = (VMMemRegionEx*)vm_itr.next();
Z
zgu 已提交
329
  }
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
  // deal with last record
  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
    return false;
  }

  // sort it into callsite pc order. Details are aggregated by callsites
  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);

  // walk the array to consolidate record by pc
  MemPointerArrayIteratorImpl itr(_vm_cs);
  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
  while (next_rec != NULL) {
    assert(callsite_rec != NULL, "Sanity check");
    if (next_rec->addr() == callsite_rec->addr()) {
      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
      itr.remove();
      next_rec = (VMCallsitePointer*)itr.current();
    } else {
      callsite_rec = next_rec;
      next_rec = (VMCallsitePointer*)itr.next();
    }
  }

Z
zgu 已提交
354 355 356 357 358
  return true;
}

// baseline a snapshot. If summary_only = false, memory usages aggregated by
// callsites are also baselined.
359 360
// The method call can be lengthy, especially when detail tracking info is
// requested. So the method checks for safepoint explicitly.
Z
zgu 已提交
361
bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
362 363 364
  Thread* THREAD = Thread::current();
  assert(THREAD->is_Java_thread(), "must be a JavaThread");
  MutexLocker snapshot_locker(snapshot._lock);
Z
zgu 已提交
365
  reset();
366 367 368 369 370
  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
  if (_baselined) {
    check_safepoint((JavaThread*)THREAD);
    _baselined = baseline_vm_summary(snapshot._vm_ptrs);
  }
371
  _number_of_classes = snapshot.number_of_classes();
Z
zgu 已提交
372 373

  if (!summary_only && MemTracker::track_callsite() && _baselined) {
374 375 376 377 378 379
    check_safepoint((JavaThread*)THREAD);
    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
    if (_baselined) {
      check_safepoint((JavaThread*)THREAD);
      _baselined =  baseline_vm_details(snapshot._vm_ptrs);
    }
Z
zgu 已提交
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
  }
  return _baselined;
}


int MemBaseline::flag2index(MEMFLAGS flag) const {
  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    if (MemType2NameMap[index]._flag == flag) {
      return index;
    }
  }
  assert(false, "no type");
  return -1;
}

const char* MemBaseline::type2name(MEMFLAGS type) {
  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    if (MemType2NameMap[index]._flag == type) {
      return MemType2NameMap[index]._name;
    }
  }
401
  assert(false, err_msg("bad type %x", type));
Z
zgu 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
  return NULL;
}


MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
  _total_malloced = other._total_malloced;
  _total_vm_reserved = other._total_vm_reserved;
  _total_vm_committed = other._total_vm_committed;

  _baselined = other._baselined;
  _number_of_classes = other._number_of_classes;

  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    _malloc_data[index] = other._malloc_data[index];
    _vm_data[index] = other._vm_data[index];
    _arena_data[index] = other._arena_data[index];
  }

  if (MemTracker::track_callsite()) {
    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
           "not properly baselined");
    _malloc_cs->clear();
    _vm_cs->clear();
    int index;
    for (index = 0; index < other._malloc_cs->length(); index ++) {
      _malloc_cs->append(other._malloc_cs->at(index));
    }

    for (index = 0; index < other._vm_cs->length(); index ++) {
      _vm_cs->append(other._vm_cs->at(index));
    }
  }
  return *this;
}

/* compare functions for sorting */

// sort snapshot malloc'd records in callsite pc order
int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
  assert(MemTracker::track_callsite(),"Just check");
  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
}

// sort baselined malloc'd records in size order
int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
}

// sort baselined malloc'd records in callsite pc order
int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
}


// sort baselined mmap'd records in size (reserved size) order
int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
}

// sort baselined mmap'd records in callsite pc order
int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
}


// sort snapshot malloc'd records in memory block address order
int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
  assert(MemTracker::is_on(), "Just check");
  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
  assert(delta != 0, "dup pointer");
  return delta;
}