memSnapshot.cpp 16.0 KB
Newer Older
Z
zgu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/decoder.hpp"
#include "services/memBaseline.hpp"
#include "services/memPtr.hpp"
#include "services/memPtrArray.hpp"
#include "services/memSnapshot.hpp"
#include "services/memTracker.hpp"

34 35 36 37 38 39
static int sort_in_seq_order(const void* p1, const void* p2) {
  assert(p1 != NULL && p2 != NULL, "Sanity check");
  const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
  const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
  return (mp1->seq() - mp2->seq());
}
Z
zgu 已提交
40

41 42 43 44
bool StagingArea::init() {
  if (MemTracker::track_callsite()) {
    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
Z
zgu 已提交
45
  } else {
46 47
    _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
    _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
Z
zgu 已提交
48 49
  }

50 51 52
  if (_malloc_data != NULL && _vm_data != NULL &&
      !_malloc_data->out_of_memory() &&
      !_vm_data->out_of_memory()) {
Z
zgu 已提交
53
    return true;
54 55 56 57 58 59
  } else {
    if (_malloc_data != NULL) delete _malloc_data;
    if (_vm_data != NULL) delete _vm_data;
    _malloc_data = NULL;
    _vm_data = NULL;
    return false;
Z
zgu 已提交
60 61 62 63
  }
}


64 65 66 67 68
MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() {
  MemPointerArray* arr = vm_data();
  // sort into seq number order
  arr->sort((FN_SORT)sort_in_seq_order);
  return MemPointerArrayIteratorImpl(arr);
Z
zgu 已提交
69 70
}

71

Z
zgu 已提交
72 73 74 75 76 77 78 79 80
MemSnapshot::MemSnapshot() {
  if (MemTracker::track_callsite()) {
    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
  } else {
    _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
    _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
  }

81
  _staging_area.init();
82
  _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
Z
zgu 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
  NOT_PRODUCT(_untracked_count = 0;)
}

MemSnapshot::~MemSnapshot() {
  assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
  {
    MutexLockerEx locker(_lock);
    if (_alloc_ptrs != NULL) {
      delete _alloc_ptrs;
      _alloc_ptrs = NULL;
    }

    if (_vm_ptrs != NULL) {
      delete _vm_ptrs;
      _vm_ptrs = NULL;
    }
  }

  if (_lock != NULL) {
    delete _lock;
    _lock = NULL;
  }
}

void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
  assert(dest != NULL && src != NULL, "Just check");
  assert(dest->addr() == src->addr(), "Just check");

  MEMFLAGS flags = dest->flags();

  if (MemTracker::track_callsite()) {
    *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
  } else {
    *dest = *src;
  }
}


// merge a per-thread memory recorder to the staging area
bool MemSnapshot::merge(MemRecorder* rec) {
  assert(rec != NULL && !rec->out_of_memory(), "Just check");

  SequencedRecordIterator itr(rec->pointer_itr());

  MutexLockerEx lock(_lock, true);
128
  MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
Z
zgu 已提交
129 130 131
  MemPointerRecord *p1, *p2;
  p1 = (MemPointerRecord*) itr.current();
  while (p1 != NULL) {
132 133 134
    if (p1->is_vm_pointer()) {
      // we don't do anything with virtual memory records during merge
      if (!_staging_area.vm_data()->append(p1)) {
Z
zgu 已提交
135 136
        return false;
      }
137 138 139 140 141 142
    } else {
      p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
      // we have not seen this memory block, so just add to staging area
      if (p2 == NULL) {
        if (!malloc_staging_itr.insert(p1)) {
          return false;
Z
zgu 已提交
143
        }
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
      } else if (p1->addr() == p2->addr()) {
        MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
        // a memory block can have many tagging records, find right one to replace or
        // right position to insert
        while (staging_next != NULL && staging_next->addr() == p1->addr()) {
          if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
            (p1->flags() & MemPointerRecord::tag_masks)) {
            p2 = (MemPointerRecord*)malloc_staging_itr.next();
            staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
          } else {
            break;
          }
        }
        int df = (p1->flags() & MemPointerRecord::tag_masks) -
          (p2->flags() & MemPointerRecord::tag_masks);
        if (df == 0) {
          assert(p1->seq() > 0, "not sequenced");
          assert(p2->seq() > 0, "not sequenced");
          if (p1->seq() > p2->seq()) {
            copy_pointer(p2, p1);
          }
        } else if (df < 0) {
          if (!malloc_staging_itr.insert(p1)) {
            return false;
          }
        } else {
          if (!malloc_staging_itr.insert_after(p1)) {
            return false;
          }
Z
zgu 已提交
173
        }
174 175
      } else if (p1->addr() < p2->addr()) {
        if (!malloc_staging_itr.insert(p1)) {
Z
zgu 已提交
176 177 178
          return false;
        }
      } else {
179
        if (!malloc_staging_itr.insert_after(p1)) {
Z
zgu 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192
          return false;
        }
      }
    }
    p1 = (MemPointerRecord*)itr.next();
  }
  NOT_PRODUCT(void check_staging_data();)
  return true;
}



// promote data to next generation
193 194 195 196
bool MemSnapshot::promote() {
  assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
  assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
         "Just check");
Z
zgu 已提交
197
  MutexLockerEx lock(_lock, true);
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

  MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
  bool promoted = false;
  if (promote_malloc_records(&malloc_itr)) {
    MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker();
    if (promote_virtual_memory_records(&vm_itr)) {
      promoted = true;
    }
  }

  NOT_PRODUCT(check_malloc_pointers();)
  _staging_area.clear();
  return promoted;
}

bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
  MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
  MemPointerRecord* matched_rec;
  while (new_rec != NULL) {
    matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
    // found matched memory block
    if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
      // snapshot already contains 'lived' records
      assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
             "Sanity check");
      // update block states
      if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
        copy_pointer(matched_rec, new_rec);
Z
zgu 已提交
227
      } else {
228 229 230 231 232 233 234 235 236
        // a deallocation record
        assert(new_rec->is_deallocation_record(), "Sanity check");
        // an arena record can be followed by a size record, we need to remove both
        if (matched_rec->is_arena_record()) {
          MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
          if (next->is_arena_size_record()) {
            // it has to match the arena record
            assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
            malloc_snapshot_itr.remove();
Z
zgu 已提交
237 238
          }
        }
239 240
        // the memory is deallocated, remove related record(s)
        malloc_snapshot_itr.remove();
Z
zgu 已提交
241 242
      }
    } else {
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
      // it is a new record, insert into snapshot
      if (new_rec->is_arena_size_record()) {
        MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
        if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
          // no matched arena record, ignore the size record
          new_rec = NULL;
        }
      }
      // only 'live' record can go into snapshot
      if (new_rec != NULL) {
        if  (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
          if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
            if (!malloc_snapshot_itr.insert_after(new_rec)) {
              return false;
            }
          } else {
            if (!malloc_snapshot_itr.insert(new_rec)) {
              return false;
Z
zgu 已提交
261 262 263
            }
          }
        }
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
#ifndef PRODUCT
        else if (!has_allocation_record(new_rec->addr())) {
          // NMT can not track some startup memory, which is allocated before NMT is on
          _untracked_count ++;
        }
#endif
      }
    }
    new_rec = (MemPointerRecord*)itr->next();
  }
  return true;
}

bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
  VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
  MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
  VMMemRegionEx new_vm_rec;
  VMMemRegion*  matched_rec;
  while (new_rec != NULL) {
    assert(new_rec->is_vm_pointer(), "Sanity check");
    if (MemTracker::track_callsite()) {
      new_vm_rec.init((MemPointerRecordEx*)new_rec);
    } else {
      new_vm_rec.init(new_rec);
    }
    matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
    if (matched_rec != NULL &&
        (matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) {
      // snapshot can only have 'live' records
      assert(matched_rec->is_reserve_record(), "Sanity check");
      if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) {
        // resize reserved virtual memory range
        // resize has to cover committed area
        assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check");
        matched_rec->set_reserved_size(new_vm_rec.size());
      } else if (new_vm_rec.is_commit_record()) {
        // commit memory inside reserved memory range
        assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check");
        // thread stacks are marked committed, so we ignore 'commit' record for creating
        // stack guard pages
        if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) {
          matched_rec->commit(new_vm_rec.committed_size());
        }
      } else if (new_vm_rec.is_uncommit_record()) {
        if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) {
          // ignore 'uncommit' record from removing stack guard pages, uncommit
          // thread stack as whole
          if (matched_rec->committed_size() == new_vm_rec.committed_size()) {
            matched_rec->uncommit(new_vm_rec.committed_size());
Z
zgu 已提交
313
          }
314 315 316 317 318
        } else {
          // uncommit memory inside reserved memory range
          assert(new_vm_rec.committed_size() <= matched_rec->committed_size(),
                "Sanity check");
          matched_rec->uncommit(new_vm_rec.committed_size());
Z
zgu 已提交
319
        }
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
      } else if (new_vm_rec.is_type_tagging_record()) {
        // tag this virtual memory range to a memory type
        // can not re-tag a memory range to different type
        assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone ||
               FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()),
               "Sanity check");
        matched_rec->tag(new_vm_rec.flags());
      } else if (new_vm_rec.is_release_record()) {
        // release part or whole memory range
        if (new_vm_rec.base() == matched_rec->base() &&
            new_vm_rec.size() == matched_rec->size()) {
          // release whole virtual memory range
          assert(matched_rec->committed_size() == 0, "Sanity check");
          vm_snapshot_itr.remove();
        } else {
          // partial release
          matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size());
        }
      } else {
        // multiple reserve/commit on the same virtual memory range
        assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) &&
          (new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()),
          "Sanity check");
        matched_rec->tag(new_vm_rec.flags());
      }
    } else {
      // no matched record
      if (new_vm_rec.is_reserve_record()) {
        if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) {
          if (!vm_snapshot_itr.insert(&new_vm_rec)) {
            return false;
Z
zgu 已提交
351
          }
352 353 354
        } else {
          if (!vm_snapshot_itr.insert_after(&new_vm_rec)) {
            return false;
Z
zgu 已提交
355 356
          }
        }
357 358 359
      } else {
        // throw out obsolete records, which are the commit/uncommit/release/tag records
        // on memory regions that are already released.
Z
zgu 已提交
360 361
      }
  }
362 363 364
    new_rec = (MemPointerRecord*)itr->next();
  }
  return true;
Z
zgu 已提交
365 366
}

367
#ifndef PRODUCT
Z
zgu 已提交
368 369 370 371 372 373 374 375
void MemSnapshot::print_snapshot_stats(outputStream* st) {
  st->print_cr("Snapshot:");
  st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
    (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);

  st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
    (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);

376 377 378 379 380 381 382 383 384
  st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
    _staging_area.malloc_data()->capacity(),
    (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
    _staging_area.malloc_data()->instance_size()/K);

  st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
    _staging_area.vm_data()->capacity(),
    (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
    _staging_area.vm_data()->instance_size()/K);
Z
zgu 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401

  st->print_cr("\tUntracked allocation: %d", _untracked_count);
}

void MemSnapshot::check_malloc_pointers() {
  MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
  MemPointerRecord* p = (MemPointerRecord*)mItr.current();
  MemPointerRecord* prev = NULL;
  while (p != NULL) {
    if (prev != NULL) {
      assert(p->addr() >= prev->addr(), "sorting order");
    }
    prev = p;
    p = (MemPointerRecord*)mItr.next();
  }
}

402
bool MemSnapshot::has_allocation_record(address addr) {
403
  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
404 405 406 407 408 409 410 411 412 413 414 415
  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
  while (cur != NULL) {
    if (cur->addr() == addr && cur->is_allocation_record()) {
      return true;
    }
    cur = (MemPointerRecord*)itr.next();
  }
  return false;
}
#endif // PRODUCT

#ifdef ASSERT
Z
zgu 已提交
416
void MemSnapshot::check_staging_data() {
417
  MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
Z
zgu 已提交
418 419 420 421 422 423 424 425 426 427
  MemPointerRecord* cur = (MemPointerRecord*)itr.current();
  MemPointerRecord* next = (MemPointerRecord*)itr.next();
  while (next != NULL) {
    assert((next->addr() > cur->addr()) ||
      ((next->flags() & MemPointerRecord::tag_masks) >
       (cur->flags() & MemPointerRecord::tag_masks)),
       "sorting order");
    cur = next;
    next = (MemPointerRecord*)itr.next();
  }
428 429 430 431 432 433 434

  MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
  cur = (MemPointerRecord*)vm_itr.current();
  while (cur != NULL) {
    assert(cur->is_vm_pointer(), "virtual memory pointer only");
    cur = (MemPointerRecord*)vm_itr.next();
  }
Z
zgu 已提交
435
}
436
#endif // ASSERT
Z
zgu 已提交
437