heapRegionRemSet.cpp 44.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28 29
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
30
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
31
#include "memory/allocation.hpp"
32
#include "memory/padded.inline.hpp"
33
#include "memory/space.inline.hpp"
34
#include "oops/oop.inline.hpp"
35 36
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
J
johnc 已提交
37
#include "utilities/growableArray.hpp"
38

39 40
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

Z
zgu 已提交
41
class PerRegionTable: public CHeapObj<mtGC> {
42 43 44 45 46 47 48
  friend class OtherRegionsTable;
  friend class HeapRegionRemSetIterator;

  HeapRegion*     _hr;
  BitMap          _bm;
  jint            _occupied;

49
  // next pointer for free/allocated 'all' list
50
  PerRegionTable* _next;
51

52 53
  // prev pointer for the allocated 'all' list
  PerRegionTable* _prev;
54

55 56
  // next pointer in collision list
  PerRegionTable * _collision_list_next;
57

58 59
  // Global free list of PRTs
  static PerRegionTable* _free_list;
60 61 62 63 64 65 66 67 68 69 70 71

protected:
  // We need access in order to union things into the base table.
  BitMap* bm() { return &_bm; }

  void recount_occupied() {
    _occupied = (jint) bm()->count_one_bits();
  }

  PerRegionTable(HeapRegion* hr) :
    _hr(hr),
    _occupied(0),
72 73
    _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
    _collision_list_next(NULL), _next(NULL), _prev(NULL)
74 75
  {}

76
  void add_card_work(CardIdx_t from_card, bool par) {
77 78 79 80 81 82 83 84 85 86 87 88
    if (!_bm.at(from_card)) {
      if (par) {
        if (_bm.par_at_put(from_card, 1)) {
          Atomic::inc(&_occupied);
        }
      } else {
        _bm.at_put(from_card, 1);
        _occupied++;
      }
    }
  }

89
  void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 91 92
    // Must make this robust in case "from" is not in "_hr", because of
    // concurrency.

93 94 95 96
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
                             from,
                             UseCompressedOops
97 98
                             ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
                             : (void *)oopDesc::load_decode_heap_oop((oop*)from));
99
    }
100 101 102 103 104

    HeapRegion* loc_hr = hr();
    // If the test below fails, then this table was reused concurrently
    // with this operation.  This is OK, since the old table was coarsened,
    // and adding a bit to the new table is never incorrect.
105 106 107 108 109
    // If the table used to belong to a continues humongous region and is
    // now reused for the corresponding start humongous region, we need to
    // make sure that we detect this. Thus, we call is_in_reserved_raw()
    // instead of just is_in_reserved() here.
    if (loc_hr->is_in_reserved_raw(from)) {
110
      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
111 112
      CardIdx_t from_card = (CardIdx_t)
          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
113

114
      assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
115
             "Must be in range.");
116
      add_card_work(from_card, par);
117 118 119 120 121 122 123 124 125 126 127 128 129
    }
  }

public:

  HeapRegion* hr() const { return _hr; }

  jint occupied() const {
    // Overkill, but if we ever need it...
    // guarantee(_occupied == _bm.count_one_bits(), "Check");
    return _occupied;
  }

130 131 132 133 134
  void init(HeapRegion* hr, bool clear_links_to_all_list) {
    if (clear_links_to_all_list) {
      set_next(NULL);
      set_prev(NULL);
    }
135
    _hr = hr;
136
    _collision_list_next = NULL;
137 138 139 140
    _occupied = 0;
    _bm.clear();
  }

141
  void add_reference(OopOrNarrowOopStar from) {
142 143 144
    add_reference_work(from, /*parallel*/ true);
  }

145
  void seq_add_reference(OopOrNarrowOopStar from) {
146 147 148 149 150
    add_reference_work(from, /*parallel*/ false);
  }

  void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
    HeapWord* hr_bot = hr()->bottom();
151
    size_t hr_first_card_index = ctbs->index_for(hr_bot);
152 153 154 155
    bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
    recount_occupied();
  }

156
  void add_card(CardIdx_t from_card_index) {
157 158 159
    add_card_work(from_card_index, /*parallel*/ true);
  }

160
  void seq_add_card(CardIdx_t from_card_index) {
161 162 163 164 165 166 167 168 169 170 171
    add_card_work(from_card_index, /*parallel*/ false);
  }

  // (Destructively) union the bitmap of the current table into the given
  // bitmap (which is assumed to be of the same size.)
  void union_bitmap_into(BitMap* bm) {
    bm->set_union(_bm);
  }

  // Mem size in bytes.
  size_t mem_size() const {
172
    return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
173 174 175
  }

  // Requires "from" to be in "hr()".
176
  bool contains_reference(OopOrNarrowOopStar from) const {
177 178 179 180 181 182
    assert(hr()->is_in_reserved(from), "Precondition.");
    size_t card_ind = pointer_delta(from, hr()->bottom(),
                                    CardTableModRefBS::card_size);
    return _bm.at(card_ind);
  }

183 184 185
  // Bulk-free the PRTs from prt to last, assumes that they are
  // linked together using their _next field.
  static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
186
    while (true) {
187
      PerRegionTable* fl = _free_list;
188 189 190 191 192
      last->set_next(fl);
      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
      if (res == fl) {
        return;
      }
193 194 195 196
    }
    ShouldNotReachHere();
  }

197 198 199 200 201
  static void free(PerRegionTable* prt) {
    bulk_free(prt, prt);
  }

  // Returns an initialized PerRegionTable instance.
202 203
  static PerRegionTable* alloc(HeapRegion* hr) {
    PerRegionTable* fl = _free_list;
204
    while (fl != NULL) {
205 206 207
      PerRegionTable* nxt = fl->next();
      PerRegionTable* res =
        (PerRegionTable*)
208 209
        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
      if (res == fl) {
210
        fl->init(hr, true);
211 212 213 214 215 216
        return fl;
      } else {
        fl = _free_list;
      }
    }
    assert(fl == NULL, "Loop condition.");
217
    return new PerRegionTable(hr);
218 219
  }

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
  PerRegionTable* next() const { return _next; }
  void set_next(PerRegionTable* next) { _next = next; }
  PerRegionTable* prev() const { return _prev; }
  void set_prev(PerRegionTable* prev) { _prev = prev; }

  // Accessor and Modification routines for the pointer for the
  // singly linked collision list that links the PRTs within the
  // OtherRegionsTable::_fine_grain_regions hash table.
  //
  // It might be useful to also make the collision list doubly linked
  // to avoid iteration over the collisions list during scrubbing/deletion.
  // OTOH there might not be many collisions.

  PerRegionTable* collision_list_next() const {
    return _collision_list_next;
  }

  void set_collision_list_next(PerRegionTable* next) {
    _collision_list_next = next;
  }

  PerRegionTable** collision_list_next_addr() {
    return &_collision_list_next;
  }

245
  static size_t fl_mem_size() {
246
    PerRegionTable* cur = _free_list;
247 248
    size_t res = 0;
    while (cur != NULL) {
249
      res += cur->mem_size();
250 251 252 253
      cur = cur->next();
    }
    return res;
  }
254 255

  static void test_fl_mem_size();
256 257
};

258
PerRegionTable* PerRegionTable::_free_list = NULL;
259 260 261 262 263 264

size_t OtherRegionsTable::_max_fine_entries = 0;
size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
size_t OtherRegionsTable::_fine_eviction_stride = 0;
size_t OtherRegionsTable::_fine_eviction_sample_size = 0;

265
OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
266
  _g1h(G1CollectedHeap::heap()),
267
  _hr(hr), _m(m),
268 269 270
  _coarse_map(G1CollectedHeap::heap()->max_regions(),
              false /* in-resource-area */),
  _fine_grain_regions(NULL),
271
  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
272 273 274 275
  _n_fine_entries(0), _n_coarse_entries(0),
  _fine_eviction_start(0),
  _sparse_table(hr)
{
276 277
  typedef PerRegionTable* PerRegionTablePtr;

278 279
  if (_max_fine_entries == 0) {
    assert(_mod_max_fine_entries_mask == 0, "Both or none.");
280
    size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
281
    _max_fine_entries = (size_t)1 << max_entries_log;
282
    _mod_max_fine_entries_mask = _max_fine_entries - 1;
283

284 285
    assert(_fine_eviction_sample_size == 0
           && _fine_eviction_stride == 0, "All init at same time.");
286
    _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
287 288
    _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
  }
289

290
  _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
291
                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
292 293

  if (_fine_grain_regions == NULL) {
294
    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
295
                          "Failed to allocate _fine_grain_entries.");
296 297
  }

298 299 300 301 302
  for (size_t i = 0; i < _max_fine_entries; i++) {
    _fine_grain_regions[i] = NULL;
  }
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
  // We always append to the beginning of the list for convenience;
  // the order of entries in this list does not matter.
  if (_first_all_fine_prts != NULL) {
    assert(_first_all_fine_prts->prev() == NULL, "invariant");
    _first_all_fine_prts->set_prev(prt);
    prt->set_next(_first_all_fine_prts);
  } else {
    // this is the first element we insert. Adjust the "last" pointer
    _last_all_fine_prts = prt;
    assert(prt->next() == NULL, "just checking");
  }
  // the new element is always the first element without a predecessor
  prt->set_prev(NULL);
  _first_all_fine_prts = prt;

  assert(prt->prev() == NULL, "just checking");
  assert(_first_all_fine_prts == prt, "just checking");
  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
         "just checking");
  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
         "just checking");
  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
         "just checking");
}

void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
  if (prt->prev() != NULL) {
    assert(_first_all_fine_prts != prt, "just checking");
    prt->prev()->set_next(prt->next());
    // removing the last element in the list?
    if (_last_all_fine_prts == prt) {
      _last_all_fine_prts = prt->prev();
    }
  } else {
    assert(_first_all_fine_prts == prt, "just checking");
    _first_all_fine_prts = prt->next();
    // list is empty now?
    if (_first_all_fine_prts == NULL) {
      _last_all_fine_prts = NULL;
    }
  }

  if (prt->next() != NULL) {
    prt->next()->set_prev(prt->prev());
  }

  prt->set_next(NULL);
  prt->set_prev(NULL);

  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
         "just checking");
  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
         "just checking");
  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
         "just checking");
}

363 364 365
int**  FromCardCache::_cache = NULL;
uint   FromCardCache::_max_regions = 0;
size_t FromCardCache::_static_mem_size = 0;
366

367 368
void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
  guarantee(_cache == NULL, "Should not call this multiple times");
369

370 371 372 373
  _max_regions = max_num_regions;
  _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
                                                       _max_regions,
                                                       &_static_mem_size);
374

375
  invalidate(0, _max_regions);
376 377
}

378 379 380 381
void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
            err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
                    start_idx, new_num_regions));
382
  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
383 384 385
    uint end_idx = (start_idx + (uint)new_num_regions);
    assert(end_idx <= _max_regions, "Must be within max.");
    for (uint j = start_idx; j < end_idx; j++) {
386
      set(i, j, InvalidCard);
387 388 389 390 391
    }
  }
}

#ifndef PRODUCT
392
void FromCardCache::print(outputStream* out) {
393
  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
394 395 396
    for (uint j = 0; j < _max_regions; j++) {
      out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
                    i, j, at(i, j));
397 398 399 400 401
    }
  }
}
#endif

402 403 404 405 406 407 408
void FromCardCache::clear(uint region_idx) {
  uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
  for (uint i = 0; i < num_par_remsets; i++) {
    set(i, region_idx, InvalidCard);
  }
}

409
void OtherRegionsTable::initialize(uint max_regions) {
410 411 412
  FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
}

413 414
void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
  FromCardCache::invalidate(start_idx, num_regions);
415 416 417 418 419 420
}

void OtherRegionsTable::print_from_card_cache() {
  FromCardCache::print();
}

421
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
422
  uint cur_hrm_ind = hr()->hrm_index();
423

424 425 426 427
  if (G1TraceHeapRegionRememberedSet) {
    gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
                                                    from,
                                                    UseCompressedOops
428 429
                                                    ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
                                                    : (void *)oopDesc::load_decode_heap_oop((oop*)from));
430
  }
431 432 433

  int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);

434
  if (G1TraceHeapRegionRememberedSet) {
435
    gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
436
                  hr()->bottom(), from_card,
437
                  FromCardCache::at((uint)tid, cur_hrm_ind));
438
  }
439

440
  if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
441 442 443
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("  from-card cache hit.");
    }
444 445 446 447 448 449
    assert(contains_reference(from), "We just added it!");
    return;
  }

  // Note that this may be a continued H region.
  HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
450
  RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
451 452

  // If the region is already coarsened, return.
453
  if (_coarse_map.at(from_hrm_ind)) {
454 455 456
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("  coarse map hit.");
    }
457 458 459 460 461
    assert(contains_reference(from), "We just added it!");
    return;
  }

  // Otherwise find a per-region table to add it to.
462
  size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
463
  PerRegionTable* prt = find_region_table(ind, from_hr);
464
  if (prt == NULL) {
465
    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
466 467 468 469 470 471 472
    // Confirm that it's really not there...
    prt = find_region_table(ind, from_hr);
    if (prt == NULL) {

      uintptr_t from_hr_bot_card_index =
        uintptr_t(from_hr->bottom())
          >> CardTableModRefBS::card_shift;
473
      CardIdx_t card_index = from_card - from_hr_bot_card_index;
474
      assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
475 476
             "Must be in range.");
      if (G1HRRSUseSparseTable &&
477
          _sparse_table.add_card(from_hrm_ind, card_index)) {
478 479
        if (G1RecordHRRSOops) {
          HeapRegionRemSet::record(hr(), from);
480 481 482 483 484 485 486 487 488 489
          if (G1TraceHeapRegionRememberedSet) {
            gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
                                "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                                align_size_down(uintptr_t(from),
                                                CardTableModRefBS::card_size),
                                hr()->bottom(), from);
          }
        }
        if (G1TraceHeapRegionRememberedSet) {
          gclog_or_tty->print_cr("   added card to sparse table.");
490 491 492 493
        }
        assert(contains_reference_locked(from), "We just added it!");
        return;
      } else {
494 495
        if (G1TraceHeapRegionRememberedSet) {
          gclog_or_tty->print_cr("   [tid %d] sparse table entry "
496
                        "overflow(f: %d, t: %u)",
497
                        tid, from_hrm_ind, cur_hrm_ind);
498
        }
499 500 501 502
      }

      if (_n_fine_entries == _max_fine_entries) {
        prt = delete_region_table();
503 504 505
        // There is no need to clear the links to the 'all' list here:
        // prt will be reused immediately, i.e. remain in the 'all' list.
        prt->init(from_hr, false /* clear_links_to_all_list */);
506
      } else {
507
        prt = PerRegionTable::alloc(from_hr);
508
        link_to_all(prt);
509 510
      }

511
      PerRegionTable* first_prt = _fine_grain_regions[ind];
512
      prt->set_collision_list_next(first_prt);
513 514 515 516
      _fine_grain_regions[ind] = prt;
      _n_fine_entries++;

      if (G1HRRSUseSparseTable) {
517
        // Transfer from sparse to fine-grain.
518
        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
519 520 521
        assert(sprt_entry != NULL, "There should have been an entry");
        for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
          CardIdx_t c = sprt_entry->card(i);
522 523 524 525 526
          if (c != SparsePRTEntry::NullEntry) {
            prt->add_card(c);
          }
        }
        // Now we can delete the sparse entry.
527
        bool res = _sparse_table.delete_entry(from_hrm_ind);
528 529 530 531 532 533 534 535 536 537
        assert(res, "It should have been there.");
      }
    }
    assert(prt != NULL && prt->hr() == from_hr, "consequence");
  }
  // Note that we can't assert "prt->hr() == from_hr", because of the
  // possibility of concurrent reuse.  But see head comment of
  // OtherRegionsTable for why this is OK.
  assert(prt != NULL, "Inv");

538 539
  prt->add_reference(from);

540 541
  if (G1RecordHRRSOops) {
    HeapRegionRemSet::record(hr(), from);
542 543 544 545 546 547 548
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print("Added card " PTR_FORMAT " to region "
                          "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                          align_size_down(uintptr_t(from),
                                          CardTableModRefBS::card_size),
                          hr()->bottom(), from);
    }
549 550 551 552
  }
  assert(contains_reference(from), "We just added it!");
}

553
PerRegionTable*
554 555
OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
  assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
556
  PerRegionTable* prt = _fine_grain_regions[ind];
557
  while (prt != NULL && prt->hr() != hr) {
558
    prt = prt->collision_list_next();
559 560 561 562 563 564 565
  }
  // Loop postcondition is the method postcondition.
  return prt;
}

jint OtherRegionsTable::_n_coarsenings = 0;

566
PerRegionTable* OtherRegionsTable::delete_region_table() {
567
  assert(_m->owned_by_self(), "Precondition");
568
  assert(_n_fine_entries == _max_fine_entries, "Precondition");
569
  PerRegionTable* max = NULL;
570
  jint max_occ = 0;
571
  PerRegionTable** max_prev;
572 573 574 575 576 577 578 579 580 581 582
  size_t max_ind;

  size_t i = _fine_eviction_start;
  for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
    size_t ii = i;
    // Make sure we get a non-NULL sample.
    while (_fine_grain_regions[ii] == NULL) {
      ii++;
      if (ii == _max_fine_entries) ii = 0;
      guarantee(ii != i, "We must find one.");
    }
583 584
    PerRegionTable** prev = &_fine_grain_regions[ii];
    PerRegionTable* cur = *prev;
585 586 587 588 589 590 591 592
    while (cur != NULL) {
      jint cur_occ = cur->occupied();
      if (max == NULL || cur_occ > max_occ) {
        max = cur;
        max_prev = prev;
        max_ind = i;
        max_occ = cur_occ;
      }
593 594
      prev = cur->collision_list_next_addr();
      cur = cur->collision_list_next();
595 596 597 598
    }
    i = i + _fine_eviction_stride;
    if (i >= _n_fine_entries) i = i - _n_fine_entries;
  }
599

600
  _fine_eviction_start++;
601 602

  if (_fine_eviction_start >= _n_fine_entries) {
603 604
    _fine_eviction_start -= _n_fine_entries;
  }
605

606 607 608
  guarantee(max != NULL, "Since _n_fine_entries > 0");

  // Set the corresponding coarse bit.
609 610 611
  size_t max_hrm_index = (size_t) max->hr()->hrm_index();
  if (!_coarse_map.at(max_hrm_index)) {
    _coarse_map.at_put(max_hrm_index, true);
612
    _n_coarse_entries++;
613 614 615 616 617 618 619
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
                 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
                 hr()->bottom(),
                 max->hr()->bottom(),
                 _n_coarse_entries);
    }
620 621 622
  }

  // Unsplice.
623
  *max_prev = max->collision_list_next();
624 625 626 627 628 629 630 631 632 633
  Atomic::inc(&_n_coarsenings);
  _n_fine_entries--;
  return max;
}


// At present, this must be called stop-world single-threaded.
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
                              BitMap* region_bm, BitMap* card_bm) {
  // First eliminated garbage regions from the coarse map.
634
  if (G1RSScrubVerbose) {
635
    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
636
  }
637 638

  assert(_coarse_map.size() == region_bm->size(), "Precondition");
639 640 641 642
  if (G1RSScrubVerbose) {
    gclog_or_tty->print("   Coarse map: before = "SIZE_FORMAT"...",
                        _n_coarse_entries);
  }
643 644
  _coarse_map.set_intersection(*region_bm);
  _n_coarse_entries = _coarse_map.count_one_bits();
645 646 647
  if (G1RSScrubVerbose) {
    gclog_or_tty->print_cr("   after = "SIZE_FORMAT".", _n_coarse_entries);
  }
648 649 650

  // Now do the fine-grained maps.
  for (size_t i = 0; i < _max_fine_entries; i++) {
651 652
    PerRegionTable* cur = _fine_grain_regions[i];
    PerRegionTable** prev = &_fine_grain_regions[i];
653
    while (cur != NULL) {
654
      PerRegionTable* nxt = cur->collision_list_next();
655
      // If the entire region is dead, eliminate.
656 657
      if (G1RSScrubVerbose) {
        gclog_or_tty->print_cr("     For other region %u:",
658
                               cur->hr()->hrm_index());
659
      }
660
      if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
661
        *prev = nxt;
662
        cur->set_collision_list_next(NULL);
663
        _n_fine_entries--;
664
        if (G1RSScrubVerbose) {
665
          gclog_or_tty->print_cr("          deleted via region map.");
666
        }
667
        unlink_from_all(cur);
668
        PerRegionTable::free(cur);
669 670
      } else {
        // Do fine-grain elimination.
671
        if (G1RSScrubVerbose) {
672
          gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
673
        }
674
        cur->scrub(ctbs, card_bm);
675
        if (G1RSScrubVerbose) {
676
          gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
677
        }
678 679 680
        // Did that empty the table completely?
        if (cur->occupied() == 0) {
          *prev = nxt;
681
          cur->set_collision_list_next(NULL);
682
          _n_fine_entries--;
683
          unlink_from_all(cur);
684
          PerRegionTable::free(cur);
685
        } else {
686
          prev = cur->collision_list_next_addr();
687 688 689 690 691 692 693 694 695 696
        }
      }
      cur = nxt;
    }
  }
  // Since we may have deleted a from_card_cache entry from the RS, clear
  // the FCC.
  clear_fcc();
}

697 698 699
bool OtherRegionsTable::is_empty() const {
  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
}
700 701 702 703 704 705 706 707 708 709

size_t OtherRegionsTable::occupied() const {
  size_t sum = occ_fine();
  sum += occ_sparse();
  sum += occ_coarse();
  return sum;
}

size_t OtherRegionsTable::occ_fine() const {
  size_t sum = 0;
710 711 712 713 714 715 716

  size_t num = 0;
  PerRegionTable * cur = _first_all_fine_prts;
  while (cur != NULL) {
    sum += cur->occupied();
    cur = cur->next();
    num++;
717
  }
718
  guarantee(num == _n_fine_entries, "just checking");
719 720 721 722
  return sum;
}

size_t OtherRegionsTable::occ_coarse() const {
723
  return (_n_coarse_entries * HeapRegion::CardsPerRegion);
724 725 726 727 728 729 730 731
}

size_t OtherRegionsTable::occ_sparse() const {
  return _sparse_table.occupied();
}

size_t OtherRegionsTable::mem_size() const {
  size_t sum = 0;
732 733 734 735 736
  // all PRTs are of the same size so it is sufficient to query only one of them.
  if (_first_all_fine_prts != NULL) {
    assert(_last_all_fine_prts != NULL &&
      _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
    sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
737
  }
738
  sum += (sizeof(PerRegionTable*) * _max_fine_entries);
739 740
  sum += (_coarse_map.size_in_words() * HeapWordSize);
  sum += (_sparse_table.mem_size());
741
  sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
742 743 744 745
  return sum;
}

size_t OtherRegionsTable::static_mem_size() {
746
  return FromCardCache::static_mem_size();
747 748 749
}

size_t OtherRegionsTable::fl_mem_size() {
750
  return PerRegionTable::fl_mem_size();
751 752 753
}

void OtherRegionsTable::clear_fcc() {
754
  FromCardCache::clear(hr()->hrm_index());
755 756 757
}

void OtherRegionsTable::clear() {
758 759 760 761 762 763 764
  // if there are no entries, skip this step
  if (_first_all_fine_prts != NULL) {
    guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
    PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
    memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
  } else {
    guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
765
  }
766 767

  _first_all_fine_prts = _last_all_fine_prts = NULL;
768 769 770 771 772 773 774 775 776 777 778
  _sparse_table.clear();
  _coarse_map.clear();
  _n_fine_entries = 0;
  _n_coarse_entries = 0;

  clear_fcc();
}

bool OtherRegionsTable::del_single_region_table(size_t ind,
                                                HeapRegion* hr) {
  assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
779 780
  PerRegionTable** prev_addr = &_fine_grain_regions[ind];
  PerRegionTable* prt = *prev_addr;
781
  while (prt != NULL && prt->hr() != hr) {
782 783
    prev_addr = prt->collision_list_next_addr();
    prt = prt->collision_list_next();
784 785 786
  }
  if (prt != NULL) {
    assert(prt->hr() == hr, "Loop postcondition.");
787 788
    *prev_addr = prt->collision_list_next();
    unlink_from_all(prt);
789
    PerRegionTable::free(prt);
790 791 792 793 794 795 796
    _n_fine_entries--;
    return true;
  } else {
    return false;
  }
}

797
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
798
  // Cast away const in this case.
799
  MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
800 801 802
  return contains_reference_locked(from);
}

803
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
804
  HeapRegion* hr = _g1h->heap_region_containing_raw(from);
805
  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
806 807 808
  // Is this region in the coarse map?
  if (_coarse_map.at(hr_ind)) return true;

809
  PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
810 811 812 813 814 815 816 817 818 819
                                     hr);
  if (prt != NULL) {
    return prt->contains_reference(from);

  } else {
    uintptr_t from_card =
      (uintptr_t(from) >> CardTableModRefBS::card_shift);
    uintptr_t hr_bot_card_index =
      uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
    assert(from_card >= hr_bot_card_index, "Inv");
820
    CardIdx_t card_index = from_card - hr_bot_card_index;
821
    assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
822
           "Must be in range.");
823
    return _sparse_table.contains_card(hr_ind, card_index);
824 825 826
  }
}

T
tonyp 已提交
827 828 829 830 831
void
OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  _sparse_table.do_cleanup_work(hrrs_cleanup_task);
}

832 833 834
// Determines how many threads can add records to an rset in parallel.
// This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads.
835
uint HeapRegionRemSet::num_par_rem_sets() {
836
  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
837 838 839 840
}

HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                   HeapRegion* hr)
841
  : _bosa(bosa),
842
    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
843
    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
844 845
  reset_for_par_iteration();
}
846

847 848 849
void HeapRegionRemSet::setup_remset_size() {
  // Setup sparse and fine-grain tables sizes.
  // table_size = base * (log(region_size / 1M) + 1)
850 851
  const int LOG_M = 20;
  int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
852 853 854 855 856 857 858 859 860
  if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
    G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
  }
  if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
    G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
  }
  guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
}

861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
bool HeapRegionRemSet::claim_iter() {
  if (_iter_state != Unclaimed) return false;
  jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
  return (res == Unclaimed);
}

void HeapRegionRemSet::set_iter_complete() {
  _iter_state = Complete;
}

bool HeapRegionRemSet::iter_is_complete() {
  return _iter_state == Complete;
}

#ifndef PRODUCT
876
void HeapRegionRemSet::print() {
877
  HeapRegionRemSetIterator iter(this);
878 879 880 881
  size_t card_index;
  while (iter.has_next(card_index)) {
    HeapWord* card_start =
      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
882
    gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
  }
  if (iter.n_yielded() != occupied()) {
    gclog_or_tty->print_cr("Yielded disagrees with occupied:");
    gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
                  iter.n_yielded(),
                  iter.n_yielded_coarse(), iter.n_yielded_fine());
    gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
                  occupied(), occ_coarse(), occ_fine());
  }
  guarantee(iter.n_yielded() == occupied(),
            "We should have yielded all the represented cards.");
}
#endif

void HeapRegionRemSet::cleanup() {
  SparsePRT::cleanup_all();
}

void HeapRegionRemSet::clear() {
902 903 904
  MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
  clear_locked();
}
J
johnc 已提交
905

906 907
void HeapRegionRemSet::clear_locked() {
  _code_roots.clear();
908
  _other_regions.clear();
909
  assert(occupied_locked() == 0, "Should be clear.");
910 911 912 913 914 915 916 917
  reset_for_par_iteration();
}

void HeapRegionRemSet::reset_for_par_iteration() {
  _iter_state = Unclaimed;
  _iter_claimed = 0;
  // It's good to check this to make sure that the two methods are in sync.
  assert(verify_ready_for_par_iteration(), "post-condition");
918 919 920 921 922 923 924
}

void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
                             BitMap* region_bm, BitMap* card_bm) {
  _other_regions.scrub(ctbs, region_bm, card_bm);
}

J
johnc 已提交
925 926 927 928
// Code roots support

void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
  assert(nm != NULL, "sanity");
929
  _code_roots.add(nm);
J
johnc 已提交
930 931 932 933
}

void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
  assert(nm != NULL, "sanity");
934 935 936 937
  assert_locked_or_safepoint(CodeCache_lock);

  _code_roots.remove_lock_free(nm);

J
johnc 已提交
938
  // Check that there were no duplicates
939
  guarantee(!_code_roots.contains(nm), "duplicate entry found");
J
johnc 已提交
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
}

class NMethodMigrationOopClosure : public OopClosure {
  G1CollectedHeap* _g1h;
  HeapRegion* _from;
  nmethod* _nm;

  uint _num_self_forwarded;

  template <class T> void do_oop_work(T* p) {
    T heap_oop = oopDesc::load_heap_oop(p);
    if (!oopDesc::is_null(heap_oop)) {
      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
      if (_from->is_in(obj)) {
        // Reference still points into the source region.
        // Since roots are immediately evacuated this means that
        // we must have self forwarded the object
        assert(obj->is_forwarded(),
               err_msg("code roots should be immediately evacuated. "
                       "Ref: "PTR_FORMAT", "
                       "Obj: "PTR_FORMAT", "
                       "Region: "HR_FORMAT,
                       p, (void*) obj, HR_FORMAT_PARAMS(_from)));
        assert(obj->forwardee() == obj,
               err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));

        // The object has been self forwarded.
        // Note, if we're during an initial mark pause, there is
        // no need to explicitly mark object. It will be marked
        // during the regular evacuation failure handling code.
        _num_self_forwarded++;
      } else {
        // The reference points into a promotion or to-space region
        HeapRegion* to = _g1h->heap_region_containing(obj);
        to->rem_set()->add_strong_code_root(_nm);
      }
    }
  }

public:
  NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
    _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}

  void do_oop(narrowOop* p) { do_oop_work(p); }
  void do_oop(oop* p)       { do_oop_work(p); }

  uint retain() { return _num_self_forwarded > 0; }
};

void HeapRegionRemSet::migrate_strong_code_roots() {
  assert(hr()->in_collection_set(), "only collection set regions");
991 992 993
  assert(!hr()->isHumongous(),
         err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
                 HR_FORMAT_PARAMS(hr())));
J
johnc 已提交
994 995 996 997 998 999 1000

  ResourceMark rm;

  // List of code blobs to retain for this region
  GrowableArray<nmethod*> to_be_retained(10);
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

1001 1002
  while (!_code_roots.is_empty()) {
    nmethod *nm = _code_roots.pop();
J
johnc 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
    if (nm != NULL) {
      NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
      nm->oops_do(&oop_cl);
      if (oop_cl.retain()) {
        to_be_retained.push(nm);
      }
    }
  }

  // Now push any code roots we need to retain
  assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
         "Retained nmethod list must be empty or "
         "evacuation of this region failed");

  while (to_be_retained.is_nonempty()) {
    nmethod* nm = to_be_retained.pop();
    assert(nm != NULL, "sanity");
    add_strong_code_root(nm);
  }
}

void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
1025
  _code_roots.nmethods_do(blk);
J
johnc 已提交
1026 1027 1028
}

size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1029
  return _code_roots.mem_size();
J
johnc 已提交
1030 1031
}

1032
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
1033
  _hrrs(hrrs),
1034
  _g1h(G1CollectedHeap::heap()),
1035 1036 1037
  _coarse_map(&hrrs->_other_regions._coarse_map),
  _bosa(hrrs->bosa()),
  _is(Sparse),
1038
  // Set these values so that we increment to the first region.
1039 1040
  _coarse_cur_region_index(-1),
  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
1041
  _cur_card_in_prt(HeapRegion::CardsPerRegion),
1042 1043 1044 1045 1046
  _fine_cur_prt(NULL),
  _n_yielded_coarse(0),
  _n_yielded_fine(0),
  _n_yielded_sparse(0),
  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1047 1048 1049 1050 1051 1052

bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
  if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
  // Go to the next card.
  _coarse_cur_region_cur_card++;
  // Was the last the last card in the current region?
1053
  if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1054 1055 1056 1057 1058 1059 1060 1061
    // Yes: find the next region.  This may leave _coarse_cur_region_index
    // Set to the last index, in which case there are no more coarse
    // regions.
    _coarse_cur_region_index =
      (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
    if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
      _coarse_cur_region_cur_card = 0;
      HeapWord* r_bot =
1062
        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
      _cur_region_card_offset = _bosa->index_for(r_bot);
    } else {
      return false;
    }
  }
  // If we didn't return false above, then we can yield a card.
  card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
  return true;
}

bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
  if (fine_has_next()) {
1075 1076
    _cur_card_in_prt =
      _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1077
  }
1078 1079 1080 1081 1082
  if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
    // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
    // the remembered set.
    if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
      return false;
1083
    }
1084 1085 1086
    PerRegionTable* next_prt = _fine_cur_prt->next();
    switch_to_prt(next_prt);
    _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1087
  }
1088 1089 1090 1091

  card_index = _cur_region_card_offset + _cur_card_in_prt;
  guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
            err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt));
1092 1093 1094 1095
  return true;
}

bool HeapRegionRemSetIterator::fine_has_next() {
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
  return _cur_card_in_prt != HeapRegion::CardsPerRegion;
}

void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
  assert(prt != NULL, "Cannot switch to NULL prt");
  _fine_cur_prt = prt;

  HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
  _cur_region_card_offset = _bosa->index_for(r_bot);

  // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
  // To avoid special-casing this start case, and not miss the first bitmap
  // entry, initialize _cur_region_cur_card with -1 instead of 0.
  _cur_card_in_prt = (size_t)-1;
1110 1111 1112 1113
}

bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
  switch (_is) {
1114
  case Sparse: {
1115 1116 1117 1118 1119 1120
    if (_sparse_iter.has_next(card_index)) {
      _n_yielded_sparse++;
      return true;
    }
    // Otherwise, deliberate fall-through
    _is = Fine;
1121 1122 1123 1124 1125
    PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
    if (initial_fine_prt != NULL) {
      switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
    }
  }
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
  case Fine:
    if (fine_has_next(card_index)) {
      _n_yielded_fine++;
      return true;
    }
    // Otherwise, deliberate fall-through
    _is = Coarse;
  case Coarse:
    if (coarse_has_next(card_index)) {
      _n_yielded_coarse++;
      return true;
    }
    // Otherwise...
    break;
  }
  assert(ParallelGCThreads > 1 ||
         n_yielded() == _hrrs->occupied(),
         "Should have yielded all the cards in the rem set "
         "(in the non-par case).");
  return false;
}



1150 1151 1152 1153
OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
HeapWord**          HeapRegionRemSet::_recorded_cards = NULL;
HeapRegion**        HeapRegionRemSet::_recorded_regions = NULL;
int                 HeapRegionRemSet::_n_recorded = 0;
1154 1155 1156 1157 1158

HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
int*         HeapRegionRemSet::_recorded_event_index = NULL;
int          HeapRegionRemSet::_n_recorded_events = 0;

1159
void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1160 1161 1162 1163 1164
  if (_recorded_oops == NULL) {
    assert(_n_recorded == 0
           && _recorded_cards == NULL
           && _recorded_regions == NULL,
           "Inv");
Z
zgu 已提交
1165 1166 1167
    _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
    _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded, mtGC);
    _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded, mtGC);
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
  }
  if (_n_recorded == MaxRecorded) {
    gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
  } else {
    _recorded_cards[_n_recorded] =
      (HeapWord*)align_size_down(uintptr_t(f),
                                 CardTableModRefBS::card_size);
    _recorded_oops[_n_recorded] = f;
    _recorded_regions[_n_recorded] = hr;
    _n_recorded++;
  }
}

void HeapRegionRemSet::record_event(Event evnt) {
  if (!G1RecordHRRSEvents) return;

  if (_recorded_events == NULL) {
    assert(_n_recorded_events == 0
           && _recorded_event_index == NULL,
           "Inv");
Z
zgu 已提交
1188 1189
    _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
    _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
  }
  if (_n_recorded_events == MaxRecordedEvents) {
    gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
  } else {
    _recorded_events[_n_recorded_events] = evnt;
    _recorded_event_index[_n_recorded_events] = _n_recorded;
    _n_recorded_events++;
  }
}

void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
  switch (evnt) {
  case Event_EvacStart:
    str->print("Evac Start");
    break;
  case Event_EvacEnd:
    str->print("Evac End");
    break;
  case Event_RSUpdateEnd:
    str->print("RS Update End");
    break;
  }
}

void HeapRegionRemSet::print_recorded() {
  int cur_evnt = 0;
  Event cur_evnt_kind;
  int cur_evnt_ind = 0;
  if (_n_recorded_events > 0) {
    cur_evnt_kind = _recorded_events[cur_evnt];
    cur_evnt_ind = _recorded_event_index[cur_evnt];
  }

  for (int i = 0; i < _n_recorded; i++) {
    while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
      gclog_or_tty->print("Event: ");
      print_event(gclog_or_tty, cur_evnt_kind);
1227
      gclog_or_tty->cr();
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
      cur_evnt++;
      if (cur_evnt < MaxRecordedEvents) {
        cur_evnt_kind = _recorded_events[cur_evnt];
        cur_evnt_ind = _recorded_event_index[cur_evnt];
      }
    }
    gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
                        " for ref " PTR_FORMAT ".\n",
                        _recorded_cards[i], _recorded_regions[i]->bottom(),
                        _recorded_oops[i]);
  }
}

T
tonyp 已提交
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
void HeapRegionRemSet::reset_for_cleanup_tasks() {
  SparsePRT::reset_for_cleanup_tasks();
}

void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  _other_regions.do_cleanup_work(hrrs_cleanup_task);
}

void
HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
  SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
}

1254
#ifndef PRODUCT
1255 1256
void PerRegionTable::test_fl_mem_size() {
  PerRegionTable* dummy = alloc(NULL);
1257 1258 1259 1260 1261

  size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
  assert(dummy->mem_size() > min_prt_size,
         err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. "
                 "Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size));
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
  free(dummy);
  guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
  // try to reset the state
  _free_list = NULL;
  delete dummy;
}

void HeapRegionRemSet::test_prt() {
  PerRegionTable::test_fl_mem_size();
}

1273 1274 1275 1276
void HeapRegionRemSet::test() {
  os::sleep(Thread::current(), (jlong)5000, false);
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

1277
  // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
  // hash bucket.
  HeapRegion* hr0 = g1h->region_at(0);
  HeapRegion* hr1 = g1h->region_at(1);
  HeapRegion* hr2 = g1h->region_at(5);
  HeapRegion* hr3 = g1h->region_at(6);
  HeapRegion* hr4 = g1h->region_at(7);
  HeapRegion* hr5 = g1h->region_at(8);

  HeapWord* hr1_start = hr1->bottom();
  HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
  HeapWord* hr1_last = hr1->end() - 1;

  HeapWord* hr2_start = hr2->bottom();
  HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
  HeapWord* hr2_last = hr2->end() - 1;

  HeapWord* hr3_start = hr3->bottom();
  HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
  HeapWord* hr3_last = hr3->end() - 1;

  HeapRegionRemSet* hrrs = hr0->rem_set();

  // Make three references from region 0x101...
1301 1302 1303
  hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1304

1305 1306 1307
  hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1308

1309 1310 1311
  hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1312 1313

  // Now cause a coarsening.
1314 1315
  hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
  hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1316 1317

  // Now, does iteration yield these three?
1318
  HeapRegionRemSetIterator iter(hrrs);
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
  size_t sum = 0;
  size_t card_index;
  while (iter.has_next(card_index)) {
    HeapWord* card_start =
      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
    gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
    sum++;
  }
  guarantee(sum == 11 - 3 + 2048, "Failure");
  guarantee(sum == hrrs->occupied(), "Failure");
}
#endif