heapRegion.cpp 35.9 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25
#include "precompiled.hpp"
J
johnc 已提交
26
#include "code/nmethod.hpp"
27 28 29 30
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
31
#include "gc_implementation/g1/heapRegionBounds.inline.hpp"
32
#include "gc_implementation/g1/heapRegionRemSet.hpp"
33
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
34
#include "gc_implementation/shared/liveRange.hpp"
35 36
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
37
#include "memory/space.inline.hpp"
38
#include "oops/oop.inline.hpp"
39
#include "runtime/orderAccess.inline.hpp"
40

41 42
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

43 44 45 46 47
int    HeapRegion::LogOfHRGrainBytes = 0;
int    HeapRegion::LogOfHRGrainWords = 0;
size_t HeapRegion::GrainBytes        = 0;
size_t HeapRegion::GrainWords        = 0;
size_t HeapRegion::CardsPerRegion    = 0;
48

49
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
50 51 52
                                 HeapRegion* hr,
                                 G1ParPushHeapRSClosure* cl,
                                 CardTableModRefBS::PrecisionStyle precision) :
53
  DirtyCardToOopClosure(hr, cl, precision, NULL),
54
  _hr(hr), _rs_scan(cl), _g1(g1) { }
55 56 57

FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
                                                   OopClosure* oc) :
58
  _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
59

60 61 62
void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
                                      HeapWord* bottom,
                                      HeapWord* top) {
63
  G1CollectedHeap* g1h = _g1;
64
  size_t oop_size;
65
  HeapWord* cur = bottom;
66 67 68 69 70

  // Start filtering what we add to the remembered set. If the object is
  // not considered dead, either because it is marked (in the mark bitmap)
  // or it was allocated after marking finished, then we add it. Otherwise
  // we can safely ignore the object.
71 72
  if (!g1h->is_obj_dead(oop(cur), _hr)) {
    oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
73
  } else {
74
    oop_size = _hr->block_size(cur);
75 76
  }

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
  cur += oop_size;

  if (cur < top) {
    oop cur_oop = oop(cur);
    oop_size = _hr->block_size(cur);
    HeapWord* next_obj = cur + oop_size;
    while (next_obj < top) {
      // Keep filtering the remembered set.
      if (!g1h->is_obj_dead(cur_oop, _hr)) {
        // Bottom lies entirely below top, so we can call the
        // non-memRegion version of oop_iterate below.
        cur_oop->oop_iterate(_rs_scan);
      }
      cur = next_obj;
      cur_oop = oop(cur);
      oop_size = _hr->block_size(cur);
      next_obj = cur + oop_size;
94 95 96
    }

    // Last object. Need to do dead-obj filtering here too.
97 98
    if (!g1h->is_obj_dead(oop(cur), _hr)) {
      oop(cur)->oop_iterate(_rs_scan, mr);
99 100 101 102
    }
  }
}

103
size_t HeapRegion::max_region_size() {
104
  return HeapRegionBounds::max_size();
105 106
}

107
void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
108 109
  uintx region_size = G1HeapRegionSize;
  if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
110
    size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
111 112
    region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
                       (uintx) HeapRegionBounds::min_size());
113 114 115 116 117 118
  }

  int region_size_log = log2_long((jlong) region_size);
  // Recalculate the region size to make sure it's a power of
  // 2. This means that region_size is the largest power of 2 that's
  // <= what we've calculated so far.
119
  region_size = ((uintx)1 << region_size_log);
120 121

  // Now make sure that we don't go over or under our limits.
122 123 124 125
  if (region_size < HeapRegionBounds::min_size()) {
    region_size = HeapRegionBounds::min_size();
  } else if (region_size > HeapRegionBounds::max_size()) {
    region_size = HeapRegionBounds::max_size();
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
  }

  // And recalculate the log.
  region_size_log = log2_long((jlong) region_size);

  // Now, set up the globals.
  guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
  LogOfHRGrainBytes = region_size_log;

  guarantee(LogOfHRGrainWords == 0, "we should only set it once");
  LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;

  guarantee(GrainBytes == 0, "we should only set it once");
  // The cast to int is safe, given that we've bounded region_size by
  // MIN_REGION_SIZE and MAX_REGION_SIZE.
141
  GrainBytes = (size_t)region_size;
142 143 144

  guarantee(GrainWords == 0, "we should only set it once");
  GrainWords = GrainBytes >> LogHeapWordSize;
145
  guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
146 147 148 149 150

  guarantee(CardsPerRegion == 0, "we should only set it once");
  CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
}

151 152 153 154 155 156 157 158
void HeapRegion::reset_after_compaction() {
  G1OffsetTableContigSpace::reset_after_compaction();
  // After a compaction the mark bitmap is invalid, so we must
  // treat all objects as being inside the unmarked area.
  zero_marked_bytes();
  init_top_at_mark_start();
}

159
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
160 161 162 163 164
  assert(_humongous_start_region == NULL,
         "we should have already filtered out humongous regions");
  assert(_end == _orig_end,
         "we should have already filtered out humongous regions");

165 166
  _in_collection_set = false;

167
  set_allocation_context(AllocationContext::system());
168 169
  set_young_index_in_cset(-1);
  uninstall_surv_rate_group();
170
  set_free();
171
  reset_pre_dummy_top();
172 173 174 175

  if (!par) {
    // If this is parallel, this will be done later.
    HeapRegionRemSet* hrrs = rem_set();
176 177 178 179 180
    if (locked) {
      hrrs->clear_locked();
    } else {
      hrrs->clear();
    }
181
    _claimed = InitialClaimValue;
182 183 184 185 186
  }
  zero_marked_bytes();

  _offsets.resize(HeapRegion::GrainWords);
  init_top_at_mark_start();
T
Merge  
tonyp 已提交
187
  if (clear_space) clear(SpaceDecorator::Mangle);
188 189
}

190 191
void HeapRegion::par_clear() {
  assert(used() == 0, "the region should have been already cleared");
192
  assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
193 194 195 196 197 198 199
  HeapRegionRemSet* hrrs = rem_set();
  hrrs->clear();
  CardTableModRefBS* ct_bs =
                   (CardTableModRefBS*)G1CollectedHeap::heap()->barrier_set();
  ct_bs->clear(MemRegion(bottom(), end()));
}

200
void HeapRegion::calc_gc_efficiency() {
201 202
  // GC efficiency is the ratio of how much space would be
  // reclaimed over how long we predict it would take to reclaim it.
203
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
204
  G1CollectorPolicy* g1p = g1h->g1_policy();
205 206 207 208 209 210 211

  // Retrieve a prediction of the elapsed time for this region for
  // a mixed gc because the region will only be evacuated during a
  // mixed gc.
  double region_elapsed_time_ms =
    g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
  _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
212 213
}

214
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
215
  assert(!isHumongous(), "sanity / pre-condition");
216 217 218
  assert(end() == _orig_end,
         "Should be normal before the humongous object allocation");
  assert(top() == bottom(), "should be empty");
219
  assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
220

221
  _type.set_starts_humongous();
222
  _humongous_start_region = this;
223 224

  set_end(new_end);
225
  _offsets.set_for_starts_humongous(new_top);
226 227
}

228
void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
229
  assert(!isHumongous(), "sanity / pre-condition");
230 231 232
  assert(end() == _orig_end,
         "Should be normal before the humongous object allocation");
  assert(top() == bottom(), "should be empty");
233
  assert(first_hr->startsHumongous(), "pre-condition");
234

235
  _type.set_continues_humongous();
236
  _humongous_start_region = first_hr;
237 238
}

239
void HeapRegion::clear_humongous() {
240 241 242 243 244 245 246 247 248 249 250 251 252 253
  assert(isHumongous(), "pre-condition");

  if (startsHumongous()) {
    assert(top() <= end(), "pre-condition");
    set_end(_orig_end);
    if (top() > end()) {
      // at least one "continues humongous" region after it
      set_top(end());
    }
  } else {
    // continues humongous
    assert(end() == _orig_end, "sanity");
  }

254
  assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
255 256 257
  _humongous_start_region = NULL;
}

258 259 260 261 262 263 264 265 266 267 268
bool HeapRegion::claimHeapRegion(jint claimValue) {
  jint current = _claimed;
  if (current != claimValue) {
    jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
    if (res == current) {
      return true;
    }
  }
  return false;
}

269
HeapRegion::HeapRegion(uint hrm_index,
270
                       G1BlockOffsetSharedArray* sharedOffsetArray,
S
sjohanss 已提交
271
                       MemRegion mr) :
272
    G1OffsetTableContigSpace(sharedOffsetArray, mr),
S
sjohanss 已提交
273 274
    _hrm_index(hrm_index),
    _allocation_context(AllocationContext::system()),
275
    _humongous_start_region(NULL),
276
    _in_collection_set(false),
277
    _next_in_special_set(NULL), _orig_end(NULL),
278
    _claimed(InitialClaimValue), _evacuation_failed(false),
279
    _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
280
    _next_young_region(NULL),
281
    _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
282 283 284 285 286
#ifdef ASSERT
    _containing_set(NULL),
#endif // ASSERT
     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
    _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
287
    _predicted_bytes_to_copy(0)
288
{
J
johnc 已提交
289
  _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
290 291 292 293 294 295 296 297 298 299
  assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");

  initialize(mr);
}

void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
  assert(_rem_set->is_empty(), "Remembered set must be empty");

  G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);

300
  _orig_end = mr.end();
301
  hr_clear(false /*par*/, false /*clear_space*/);
T
Merge  
tonyp 已提交
302
  set_top(bottom());
303
  record_timestamp();
304 305 306
}

CompactibleSpace* HeapRegion::next_compaction_space() const {
307
  return G1CollectedHeap::heap()->next_compaction_region(this);
308 309
}

310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
                                                    bool during_conc_mark) {
  // We always recreate the prev marking info and we'll explicitly
  // mark all objects we find to be self-forwarded on the prev
  // bitmap. So all objects need to be below PTAMS.
  _prev_marked_bytes = 0;

  if (during_initial_mark) {
    // During initial-mark, we'll also explicitly mark all objects
    // we find to be self-forwarded on the next bitmap. So all
    // objects need to be below NTAMS.
    _next_top_at_mark_start = top();
    _next_marked_bytes = 0;
  } else if (during_conc_mark) {
    // During concurrent mark, all objects in the CSet (including
    // the ones we find to be self-forwarded) are implicitly live.
    // So all objects need to be above NTAMS.
    _next_top_at_mark_start = bottom();
    _next_marked_bytes = 0;
  }
}

void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
                                                  bool during_conc_mark,
                                                  size_t marked_bytes) {
  assert(0 <= marked_bytes && marked_bytes <= used(),
         err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
                 marked_bytes, used()));
338
  _prev_top_at_mark_start = top();
339 340 341
  _prev_marked_bytes = marked_bytes;
}

342 343 344 345 346 347 348 349 350 351 352 353
HeapWord*
HeapRegion::object_iterate_mem_careful(MemRegion mr,
                                                 ObjectClosure* cl) {
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  // We used to use "block_start_careful" here.  But we're actually happy
  // to update the BOT while we do this...
  HeapWord* cur = block_start(mr.start());
  mr = mr.intersection(used_region());
  if (mr.is_empty()) return NULL;
  // Otherwise, find the obj that extends onto mr.start().

  assert(cur <= mr.start()
354
         && (oop(cur)->klass_or_null() == NULL ||
355 356 357 358 359
             cur + oop(cur)->size() > mr.start()),
         "postcondition of block_start");
  oop obj;
  while (cur < mr.end()) {
    obj = oop(cur);
360
    if (obj->klass_or_null() == NULL) {
361 362 363 364 365 366 367 368
      // Ran into an unparseable point.
      return cur;
    } else if (!g1h->is_obj_dead(obj)) {
      cl->do_object(obj);
    }
    if (cl->abort()) return cur;
    // The check above must occur before the operation below, since an
    // abort might invalidate the "size" operation.
369
    cur += block_size(cur);
370 371 372 373 374 375 376
  }
  return NULL;
}

HeapWord*
HeapRegion::
oops_on_card_seq_iterate_careful(MemRegion mr,
377
                                 FilterOutOfRegionClosure* cl,
378 379 380 381 382 383 384 385 386
                                 bool filter_young,
                                 jbyte* card_ptr) {
  // Currently, we should only have to clean the card if filter_young
  // is true and vice versa.
  if (filter_young) {
    assert(card_ptr != NULL, "pre-condition");
  } else {
    assert(card_ptr == NULL, "pre-condition");
  }
387 388 389 390
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

  // If we're within a stop-world GC, then we might look at a card in a
  // GC alloc region that extends onto a GC LAB, which may not be
391
  // parseable.  Stop such at the "scan_top" of the region.
392
  if (g1h->is_gc_active()) {
393
    mr = mr.intersection(MemRegion(bottom(), scan_top()));
394 395 396 397 398 399
  } else {
    mr = mr.intersection(used_region());
  }
  if (mr.is_empty()) return NULL;
  // Otherwise, find the obj that extends onto mr.start().

400 401 402 403 404 405 406 407 408 409
  // The intersection of the incoming mr (for the card) and the
  // allocated part of the region is non-empty. This implies that
  // we have actually allocated into this region. The code in
  // G1CollectedHeap.cpp that allocates a new region sets the
  // is_young tag on the region before allocating. Thus we
  // safely know if this region is young.
  if (is_young() && filter_young) {
    return NULL;
  }

J
johnc 已提交
410 411
  assert(!is_young(), "check value of filter_young");

412 413 414 415 416 417 418 419 420
  // We can only clean the card here, after we make the decision that
  // the card is not young. And we only clean the card if we have been
  // asked to (i.e., card_ptr != NULL).
  if (card_ptr != NULL) {
    *card_ptr = CardTableModRefBS::clean_card_val();
    // We must complete this write before we do any of the reads below.
    OrderAccess::storeload();
  }

421 422 423 424
  // Cache the boundaries of the memory region in some const locals
  HeapWord* const start = mr.start();
  HeapWord* const end = mr.end();

425 426
  // We used to use "block_start_careful" here.  But we're actually happy
  // to update the BOT while we do this...
427 428
  HeapWord* cur = block_start(start);
  assert(cur <= start, "Postcondition");
429

430 431 432
  oop obj;

  HeapWord* next = cur;
433
  do {
434 435 436
    cur = next;
    obj = oop(cur);
    if (obj->klass_or_null() == NULL) {
437 438 439 440
      // Ran into an unparseable point.
      return cur;
    }
    // Otherwise...
441
    next = cur + block_size(cur);
442
  } while (next <= start);
443 444 445 446

  // If we finish the above loop...We have a parseable object that
  // begins on or before the start of the memory region, and ends
  // inside or spans the entire region.
447 448
  assert(cur <= start, "Loop postcondition");
  assert(obj->klass_or_null() != NULL, "Loop postcondition");
449

450
  do {
451
    obj = oop(cur);
452
    assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
453
    if (obj->klass_or_null() == NULL) {
454 455
      // Ran into an unparseable point.
      return cur;
456
    }
457

458 459
    // Advance the current pointer. "obj" still points to the object to iterate.
    cur = cur + block_size(cur);
460

461
    if (!g1h->is_obj_dead(obj)) {
462 463 464 465 466
      // Non-objArrays are sometimes marked imprecise at the object start. We
      // always need to iterate over them in full.
      // We only iterate over object arrays in full if they are completely contained
      // in the memory region.
      if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
467 468
        obj->oop_iterate(cl);
      } else {
469
        obj->oop_iterate(cl, mr);
470 471
      }
    }
472 473
  } while (cur < end);

474 475 476
  return NULL;
}

J
johnc 已提交
477 478 479 480 481 482 483
// Code roots support

void HeapRegion::add_strong_code_root(nmethod* nm) {
  HeapRegionRemSet* hrrs = rem_set();
  hrrs->add_strong_code_root(nm);
}

484 485
void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
  assert_locked_or_safepoint(CodeCache_lock);
J
johnc 已提交
486
  HeapRegionRemSet* hrrs = rem_set();
487
  hrrs->add_strong_code_root_locked(nm);
J
johnc 已提交
488 489
}

490
void HeapRegion::remove_strong_code_root(nmethod* nm) {
J
johnc 已提交
491
  HeapRegionRemSet* hrrs = rem_set();
492
  hrrs->remove_strong_code_root(nm);
J
johnc 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
}

void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
  HeapRegionRemSet* hrrs = rem_set();
  hrrs->strong_code_roots_do(blk);
}

class VerifyStrongCodeRootOopClosure: public OopClosure {
  const HeapRegion* _hr;
  nmethod* _nm;
  bool _failures;
  bool _has_oops_in_region;

  template <class T> void do_oop_work(T* p) {
    T heap_oop = oopDesc::load_heap_oop(p);
    if (!oopDesc::is_null(heap_oop)) {
      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);

      // Note: not all the oops embedded in the nmethod are in the
      // current region. We only look at those which are.
      if (_hr->is_in(obj)) {
        // Object is in the region. Check that its less than top
        if (_hr->top() <= (HeapWord*)obj) {
          // Object is above top
          gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
                                 "["PTR_FORMAT", "PTR_FORMAT") is above "
                                 "top "PTR_FORMAT,
520
                                 (void *)obj, _hr->bottom(), _hr->end(), _hr->top());
J
johnc 已提交
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
          _failures = true;
          return;
        }
        // Nmethod has at least one oop in the current region
        _has_oops_in_region = true;
      }
    }
  }

public:
  VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
    _hr(hr), _failures(false), _has_oops_in_region(false) {}

  void do_oop(narrowOop* p) { do_oop_work(p); }
  void do_oop(oop* p)       { do_oop_work(p); }

  bool failures()           { return _failures; }
  bool has_oops_in_region() { return _has_oops_in_region; }
};

class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
  const HeapRegion* _hr;
  bool _failures;
public:
  VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
    _hr(hr), _failures(false) {}

  void do_code_blob(CodeBlob* cb) {
    nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
    if (nm != NULL) {
      // Verify that the nemthod is live
      if (!nm->is_alive()) {
        gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod "
                               PTR_FORMAT" in its strong code roots",
                               _hr->bottom(), _hr->end(), nm);
        _failures = true;
      } else {
        VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
        nm->oops_do(&oop_cl);
        if (!oop_cl.has_oops_in_region()) {
          gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod "
                                 PTR_FORMAT" in its strong code roots "
                                 "with no pointers into region",
                                 _hr->bottom(), _hr->end(), nm);
          _failures = true;
        } else if (oop_cl.failures()) {
          gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other "
                                 "failures for nmethod "PTR_FORMAT,
                                 _hr->bottom(), _hr->end(), nm);
          _failures = true;
        }
      }
    }
  }

  bool failures()       { return _failures; }
};

void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
  if (!G1VerifyHeapRegionCodeRoots) {
    // We're not verifying code roots.
    return;
  }
  if (vo == VerifyOption_G1UseMarkWord) {
    // Marking verification during a full GC is performed after class
    // unloading, code cache unloading, etc so the strong code roots
    // attached to each heap region are in an inconsistent state. They won't
    // be consistent until the strong code roots are rebuilt after the
    // actual GC. Skip verifying the strong code roots in this particular
    // time.
    assert(VerifyDuringGC, "only way to get here");
    return;
  }

  HeapRegionRemSet* hrrs = rem_set();
596
  size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
J
johnc 已提交
597 598 599 600 601 602

  // if this region is empty then there should be no entries
  // on its strong code root list
  if (is_empty()) {
    if (strong_code_roots_length > 0) {
      gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is empty "
603
                             "but has "SIZE_FORMAT" code root entries",
J
johnc 已提交
604 605 606 607 608 609
                             bottom(), end(), strong_code_roots_length);
      *failures = true;
    }
    return;
  }

610
  if (continuesHumongous()) {
J
johnc 已提交
611
    if (strong_code_roots_length > 0) {
612
      gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous "
613
                             "region but has "SIZE_FORMAT" code root entries",
614
                             HR_FORMAT_PARAMS(this), strong_code_roots_length);
J
johnc 已提交
615 616 617 618 619 620 621 622 623 624 625 626 627
      *failures = true;
    }
    return;
  }

  VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
  strong_code_roots_do(&cb_cl);

  if (cb_cl.failures()) {
    *failures = true;
  }
}

628 629
void HeapRegion::print() const { print_on(gclog_or_tty); }
void HeapRegion::print_on(outputStream* st) const {
630
  st->print("AC%4u", allocation_context());
631
  st->print(" %2s", get_short_type_str());
632 633 634 635
  if (in_collection_set())
    st->print(" CS");
  else
    st->print("   ");
636
  st->print(" TS %5d", _gc_time_stamp);
637 638
  st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT,
            prev_top_at_mark_start(), next_top_at_mark_start());
639 640 641
  G1OffsetTableContigSpace::print_on(st);
}

J
johnc 已提交
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
class VerifyLiveClosure: public OopClosure {
private:
  G1CollectedHeap* _g1h;
  CardTableModRefBS* _bs;
  oop _containing_obj;
  bool _failures;
  int _n_failures;
  VerifyOption _vo;
public:
  // _vo == UsePrevMarking -> use "prev" marking information,
  // _vo == UseNextMarking -> use "next" marking information,
  // _vo == UseMarkWord    -> use mark word from object header.
  VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
    _g1h(g1h), _bs(NULL), _containing_obj(NULL),
    _failures(false), _n_failures(0), _vo(vo)
  {
    BarrierSet* bs = _g1h->barrier_set();
    if (bs->is_a(BarrierSet::CardTableModRef))
      _bs = (CardTableModRefBS*)bs;
  }

  void set_containing_obj(oop obj) {
    _containing_obj = obj;
  }

  bool failures() { return _failures; }
  int n_failures() { return _n_failures; }

  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
  virtual void do_oop(      oop* p) { do_oop_work(p); }

  void print_object(outputStream* out, oop obj) {
#ifdef PRODUCT
    Klass* k = obj->klass();
    const char* class_name = InstanceKlass::cast(k)->external_name();
    out->print_cr("class name %s", class_name);
#else // PRODUCT
    obj->print_on(out);
#endif // PRODUCT
  }

  template <class T>
  void do_oop_work(T* p) {
    assert(_containing_obj != NULL, "Precondition");
    assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
           "Precondition");
    T heap_oop = oopDesc::load_heap_oop(p);
    if (!oopDesc::is_null(heap_oop)) {
      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
      bool failed = false;
      if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
        MutexLockerEx x(ParGCRareEvent_lock,
                        Mutex::_no_safepoint_check_flag);

        if (!_failures) {
697
          gclog_or_tty->cr();
J
johnc 已提交
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
          gclog_or_tty->print_cr("----------");
        }
        if (!_g1h->is_in_closed_subset(obj)) {
          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
          gclog_or_tty->print_cr("Field "PTR_FORMAT
                                 " of live obj "PTR_FORMAT" in region "
                                 "["PTR_FORMAT", "PTR_FORMAT")",
                                 p, (void*) _containing_obj,
                                 from->bottom(), from->end());
          print_object(gclog_or_tty, _containing_obj);
          gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap",
                                 (void*) obj);
        } else {
          HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
          HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
          gclog_or_tty->print_cr("Field "PTR_FORMAT
                                 " of live obj "PTR_FORMAT" in region "
                                 "["PTR_FORMAT", "PTR_FORMAT")",
                                 p, (void*) _containing_obj,
                                 from->bottom(), from->end());
          print_object(gclog_or_tty, _containing_obj);
          gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region "
                                 "["PTR_FORMAT", "PTR_FORMAT")",
                                 (void*) obj, to->bottom(), to->end());
          print_object(gclog_or_tty, obj);
        }
        gclog_or_tty->print_cr("----------");
        gclog_or_tty->flush();
        _failures = true;
        failed = true;
        _n_failures++;
      }

      if (!_g1h->full_collection() || G1VerifyRSetsDuringFullGC) {
        HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
        HeapRegion* to   = _g1h->heap_region_containing(obj);
        if (from != NULL && to != NULL &&
            from != to &&
            !to->isHumongous()) {
          jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
          jbyte cv_field = *_bs->byte_for_const(p);
          const jbyte dirty = CardTableModRefBS::dirty_card_val();

          bool is_bad = !(from->is_young()
                          || to->rem_set()->contains_reference(p)
                          || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
                              (_containing_obj->is_objArray() ?
                                  cv_field == dirty
                               : cv_obj == dirty || cv_field == dirty));
          if (is_bad) {
            MutexLockerEx x(ParGCRareEvent_lock,
                            Mutex::_no_safepoint_check_flag);

            if (!_failures) {
752
              gclog_or_tty->cr();
J
johnc 已提交
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
              gclog_or_tty->print_cr("----------");
            }
            gclog_or_tty->print_cr("Missing rem set entry:");
            gclog_or_tty->print_cr("Field "PTR_FORMAT" "
                                   "of obj "PTR_FORMAT", "
                                   "in region "HR_FORMAT,
                                   p, (void*) _containing_obj,
                                   HR_FORMAT_PARAMS(from));
            _containing_obj->print_on(gclog_or_tty);
            gclog_or_tty->print_cr("points to obj "PTR_FORMAT" "
                                   "in region "HR_FORMAT,
                                   (void*) obj,
                                   HR_FORMAT_PARAMS(to));
            obj->print_on(gclog_or_tty);
            gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
                          cv_obj, cv_field);
            gclog_or_tty->print_cr("----------");
            gclog_or_tty->flush();
            _failures = true;
            if (!failed) _n_failures++;
          }
        }
      }
    }
  }
};
779

780 781 782
// This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects.

783
void HeapRegion::verify(VerifyOption vo,
784
                        bool* failures) const {
785
  G1CollectedHeap* g1 = G1CollectedHeap::heap();
786
  *failures = false;
787 788
  HeapWord* p = bottom();
  HeapWord* prev_p = NULL;
789
  VerifyLiveClosure vl_cl(g1, vo);
790
  bool is_humongous = isHumongous();
791
  bool do_bot_verify = !is_young();
792
  size_t object_num = 0;
793
  while (p < top()) {
794
    oop obj = oop(p);
795
    size_t obj_size = block_size(p);
796 797
    object_num += 1;

798 799
    if (is_humongous != g1->isHumongous(obj_size) &&
        !g1->is_obj_dead(obj, this)) { // Dead objects may have bigger block_size since they span several objects.
800 801
      gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
                             SIZE_FORMAT" words) in a %shumongous region",
802 803
                             p, g1->isHumongous(obj_size) ? "" : "non-",
                             obj_size, is_humongous ? "" : "non-");
804
       *failures = true;
805
       return;
806
    }
807 808

    // If it returns false, verify_for_object() will output the
809
    // appropriate message.
810 811 812
    if (do_bot_verify &&
        !g1->is_obj_dead(obj, this) &&
        !_offsets.verify_for_object(p, obj_size)) {
813 814
      *failures = true;
      return;
815
    }
816

817
    if (!g1->is_obj_dead_cond(obj, this, vo)) {
818
      if (obj->is_oop()) {
819
        Klass* klass = obj->klass();
820 821 822 823
        bool is_metaspace_object = Metaspace::contains(klass) ||
                                   (vo == VerifyOption_G1UsePrevMarking &&
                                   ClassLoaderDataGraph::unload_list_contains(klass));
        if (!is_metaspace_object) {
824
          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
825
                                 "not metadata", klass, (void *)obj);
826 827 828 829
          *failures = true;
          return;
        } else if (!klass->is_klass()) {
          gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
830
                                 "not a klass", klass, (void *)obj);
831 832 833 834
          *failures = true;
          return;
        } else {
          vl_cl.set_containing_obj(obj);
835
          obj->oop_iterate_no_header(&vl_cl);
836
          if (vl_cl.failures()) {
837
            *failures = true;
838 839 840
          }
          if (G1MaxVerifyFailures >= 0 &&
              vl_cl.n_failures() >= G1MaxVerifyFailures) {
841 842 843
            return;
          }
        }
844
      } else {
845
        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
846 847
        *failures = true;
        return;
848 849 850
      }
    }
    prev_p = p;
851
    p += obj_size;
852
  }
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885

  if (p != top()) {
    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
                           "does not match top "PTR_FORMAT, p, top());
    *failures = true;
    return;
  }

  HeapWord* the_end = end();
  assert(p == top(), "it should still hold");
  // Do some extra BOT consistency checking for addresses in the
  // range [top, end). BOT look-ups in this range should yield
  // top. No point in doing that if top == end (there's nothing there).
  if (p < the_end) {
    // Look up top
    HeapWord* addr_1 = p;
    HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
    if (b_start_1 != p) {
      gclog_or_tty->print_cr("BOT look up for top: "PTR_FORMAT" "
                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                             addr_1, b_start_1, p);
      *failures = true;
      return;
    }

    // Look up top + 1
    HeapWord* addr_2 = p + 1;
    if (addr_2 < the_end) {
      HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
      if (b_start_2 != p) {
        gclog_or_tty->print_cr("BOT look up for top + 1: "PTR_FORMAT" "
                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                               addr_2, b_start_2, p);
886 887
        *failures = true;
        return;
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
      }
    }

    // Look up an address between top and end
    size_t diff = pointer_delta(the_end, p) / 2;
    HeapWord* addr_3 = p + diff;
    if (addr_3 < the_end) {
      HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
      if (b_start_3 != p) {
        gclog_or_tty->print_cr("BOT look up for top + diff: "PTR_FORMAT" "
                               " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                               addr_3, b_start_3, p);
        *failures = true;
        return;
      }
    }

    // Loook up end - 1
    HeapWord* addr_4 = the_end - 1;
    HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
    if (b_start_4 != p) {
      gclog_or_tty->print_cr("BOT look up for end - 1: "PTR_FORMAT" "
                             " yielded "PTR_FORMAT", expecting "PTR_FORMAT,
                             addr_4, b_start_4, p);
      *failures = true;
      return;
914
    }
915
  }
916

917 918 919 920 921
  if (is_humongous && object_num > 1) {
    gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
                           "but has "SIZE_FORMAT", objects",
                           bottom(), end(), object_num);
    *failures = true;
922
    return;
923
  }
J
johnc 已提交
924 925 926 927 928 929 930

  verify_strong_code_roots(vo, failures);
}

void HeapRegion::verify() const {
  bool dummy = false;
  verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
931 932 933 934 935
}

// G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
// away eventually.

T
Merge  
tonyp 已提交
936
void G1OffsetTableContigSpace::clear(bool mangle_space) {
937
  set_top(bottom());
938
  _scan_top = bottom();
939
  CompactibleSpace::clear(mangle_space);
940
  reset_bot();
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
}

void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
  Space::set_bottom(new_bottom);
  _offsets.set_bottom(new_bottom);
}

void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
  Space::set_end(new_end);
  _offsets.resize(new_end - bottom());
}

void G1OffsetTableContigSpace::print() const {
  print_short();
  gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
                INTPTR_FORMAT ", " INTPTR_FORMAT ")",
                bottom(), top(), _offsets.threshold(), end());
}

HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
  return _offsets.initialize_threshold();
}

HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
                                                    HeapWord* end) {
  _offsets.alloc_block(start, end);
  return _offsets.threshold();
}

970
HeapWord* G1OffsetTableContigSpace::scan_top() const {
971
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
972 973
  HeapWord* local_top = top();
  OrderAccess::loadload();
974 975 976
  const unsigned local_time_stamp = _gc_time_stamp;
  assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
  if (local_time_stamp < g1h->get_gc_time_stamp()) {
977 978
    return local_top;
  } else {
979
    return _scan_top;
980
  }
981 982
}

983
void G1OffsetTableContigSpace::record_timestamp() {
984 985 986 987
  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();

  if (_gc_time_stamp < curr_gc_time_stamp) {
988 989 990 991 992 993 994 995
    // Setting the time stamp here tells concurrent readers to look at
    // scan_top to know the maximum allowed address to look at.

    // scan_top should be bottom for all regions except for the
    // retained old alloc region which should have scan_top == top
    HeapWord* st = _scan_top;
    guarantee(st == _bottom || st == _top, "invariant");

996
    _gc_time_stamp = curr_gc_time_stamp;
997 998 999
  }
}

1000 1001 1002 1003 1004 1005
void G1OffsetTableContigSpace::record_retained_region() {
  // scan_top is the maximum address where it's safe for the next gc to
  // scan this region.
  _scan_top = top();
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
  object_iterate(blk);
}

void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
  HeapWord* p = bottom();
  while (p < top()) {
    if (block_is_obj(p)) {
      blk->do_object(oop(p));
    }
    p += block_size(p);
  }
}

#define block_is_always_obj(q) true
void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
  SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
}
#undef block_is_always_obj

1026 1027
G1OffsetTableContigSpace::
G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
1028
                         MemRegion mr) :
1029 1030 1031 1032 1033
  _offsets(sharedOffsetArray, mr),
  _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
  _gc_time_stamp(0)
{
  _offsets.set_space(this);
1034 1035 1036 1037
}

void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
  CompactibleSpace::initialize(mr, clear_space, mangle_space);
1038
  _top = bottom();
1039 1040
  _scan_top = bottom();
  set_saved_mark_word(NULL);
1041
  reset_bot();
1042
}
1043