heapRegionRemSet.cpp 44.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28 29
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
30
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
31
#include "memory/allocation.hpp"
32
#include "memory/padded.inline.hpp"
33
#include "memory/space.inline.hpp"
34
#include "oops/oop.inline.hpp"
35 36
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
J
johnc 已提交
37
#include "utilities/growableArray.hpp"
38

39 40
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

Z
zgu 已提交
41
class PerRegionTable: public CHeapObj<mtGC> {
42 43 44 45 46 47 48
  friend class OtherRegionsTable;
  friend class HeapRegionRemSetIterator;

  HeapRegion*     _hr;
  BitMap          _bm;
  jint            _occupied;

49
  // next pointer for free/allocated 'all' list
50
  PerRegionTable* _next;
51

52 53
  // prev pointer for the allocated 'all' list
  PerRegionTable* _prev;
54

55 56
  // next pointer in collision list
  PerRegionTable * _collision_list_next;
57

58 59
  // Global free list of PRTs
  static PerRegionTable* _free_list;
60 61 62 63 64 65 66 67 68 69 70 71

protected:
  // We need access in order to union things into the base table.
  BitMap* bm() { return &_bm; }

  void recount_occupied() {
    _occupied = (jint) bm()->count_one_bits();
  }

  PerRegionTable(HeapRegion* hr) :
    _hr(hr),
    _occupied(0),
72 73
    _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
    _collision_list_next(NULL), _next(NULL), _prev(NULL)
74 75
  {}

76
  void add_card_work(CardIdx_t from_card, bool par) {
77 78 79 80 81 82 83 84 85 86 87 88
    if (!_bm.at(from_card)) {
      if (par) {
        if (_bm.par_at_put(from_card, 1)) {
          Atomic::inc(&_occupied);
        }
      } else {
        _bm.at_put(from_card, 1);
        _occupied++;
      }
    }
  }

89
  void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 91 92
    // Must make this robust in case "from" is not in "_hr", because of
    // concurrency.

93 94 95 96
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
                             from,
                             UseCompressedOops
97 98
                             ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
                             : (void *)oopDesc::load_decode_heap_oop((oop*)from));
99
    }
100 101 102 103 104

    HeapRegion* loc_hr = hr();
    // If the test below fails, then this table was reused concurrently
    // with this operation.  This is OK, since the old table was coarsened,
    // and adding a bit to the new table is never incorrect.
105 106 107 108 109
    // If the table used to belong to a continues humongous region and is
    // now reused for the corresponding start humongous region, we need to
    // make sure that we detect this. Thus, we call is_in_reserved_raw()
    // instead of just is_in_reserved() here.
    if (loc_hr->is_in_reserved_raw(from)) {
110
      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
111 112
      CardIdx_t from_card = (CardIdx_t)
          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
113

114
      assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
115
             "Must be in range.");
116
      add_card_work(from_card, par);
117 118 119 120 121
    }
  }

public:

122 123 124
  HeapRegion* hr() const {
    return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
  }
125 126 127 128 129 130 131

  jint occupied() const {
    // Overkill, but if we ever need it...
    // guarantee(_occupied == _bm.count_one_bits(), "Check");
    return _occupied;
  }

132 133 134 135 136 137
  void init(HeapRegion* hr, bool clear_links_to_all_list) {
    if (clear_links_to_all_list) {
      set_next(NULL);
      set_prev(NULL);
    }
    _collision_list_next = NULL;
138 139
    _occupied = 0;
    _bm.clear();
140 141 142
    // Make sure that the bitmap clearing above has been finished before publishing
    // this PRT to concurrent threads.
    OrderAccess::release_store_ptr(&_hr, hr);
143 144
  }

145
  void add_reference(OopOrNarrowOopStar from) {
146 147 148
    add_reference_work(from, /*parallel*/ true);
  }

149
  void seq_add_reference(OopOrNarrowOopStar from) {
150 151 152 153 154
    add_reference_work(from, /*parallel*/ false);
  }

  void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
    HeapWord* hr_bot = hr()->bottom();
155
    size_t hr_first_card_index = ctbs->index_for(hr_bot);
156 157 158 159
    bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
    recount_occupied();
  }

160
  void add_card(CardIdx_t from_card_index) {
161 162 163
    add_card_work(from_card_index, /*parallel*/ true);
  }

164
  void seq_add_card(CardIdx_t from_card_index) {
165 166 167 168 169 170 171 172 173 174 175
    add_card_work(from_card_index, /*parallel*/ false);
  }

  // (Destructively) union the bitmap of the current table into the given
  // bitmap (which is assumed to be of the same size.)
  void union_bitmap_into(BitMap* bm) {
    bm->set_union(_bm);
  }

  // Mem size in bytes.
  size_t mem_size() const {
176
    return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
177 178 179
  }

  // Requires "from" to be in "hr()".
180
  bool contains_reference(OopOrNarrowOopStar from) const {
181 182 183 184 185 186
    assert(hr()->is_in_reserved(from), "Precondition.");
    size_t card_ind = pointer_delta(from, hr()->bottom(),
                                    CardTableModRefBS::card_size);
    return _bm.at(card_ind);
  }

187 188 189
  // Bulk-free the PRTs from prt to last, assumes that they are
  // linked together using their _next field.
  static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
190
    while (true) {
191
      PerRegionTable* fl = _free_list;
192 193 194 195 196
      last->set_next(fl);
      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
      if (res == fl) {
        return;
      }
197 198 199 200
    }
    ShouldNotReachHere();
  }

201 202 203 204 205
  static void free(PerRegionTable* prt) {
    bulk_free(prt, prt);
  }

  // Returns an initialized PerRegionTable instance.
206 207
  static PerRegionTable* alloc(HeapRegion* hr) {
    PerRegionTable* fl = _free_list;
208
    while (fl != NULL) {
209 210 211
      PerRegionTable* nxt = fl->next();
      PerRegionTable* res =
        (PerRegionTable*)
212 213
        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
      if (res == fl) {
214
        fl->init(hr, true);
215 216 217 218 219 220
        return fl;
      } else {
        fl = _free_list;
      }
    }
    assert(fl == NULL, "Loop condition.");
221
    return new PerRegionTable(hr);
222 223
  }

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  PerRegionTable* next() const { return _next; }
  void set_next(PerRegionTable* next) { _next = next; }
  PerRegionTable* prev() const { return _prev; }
  void set_prev(PerRegionTable* prev) { _prev = prev; }

  // Accessor and Modification routines for the pointer for the
  // singly linked collision list that links the PRTs within the
  // OtherRegionsTable::_fine_grain_regions hash table.
  //
  // It might be useful to also make the collision list doubly linked
  // to avoid iteration over the collisions list during scrubbing/deletion.
  // OTOH there might not be many collisions.

  PerRegionTable* collision_list_next() const {
    return _collision_list_next;
  }

  void set_collision_list_next(PerRegionTable* next) {
    _collision_list_next = next;
  }

  PerRegionTable** collision_list_next_addr() {
    return &_collision_list_next;
  }

249
  static size_t fl_mem_size() {
250
    PerRegionTable* cur = _free_list;
251 252
    size_t res = 0;
    while (cur != NULL) {
253
      res += cur->mem_size();
254 255 256 257
      cur = cur->next();
    }
    return res;
  }
258 259

  static void test_fl_mem_size();
260 261
};

262
PerRegionTable* PerRegionTable::_free_list = NULL;
263 264 265 266 267 268

size_t OtherRegionsTable::_max_fine_entries = 0;
size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
size_t OtherRegionsTable::_fine_eviction_stride = 0;
size_t OtherRegionsTable::_fine_eviction_sample_size = 0;

269
OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
270
  _g1h(G1CollectedHeap::heap()),
271
  _hr(hr), _m(m),
272 273 274
  _coarse_map(G1CollectedHeap::heap()->max_regions(),
              false /* in-resource-area */),
  _fine_grain_regions(NULL),
275
  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
276 277 278 279
  _n_fine_entries(0), _n_coarse_entries(0),
  _fine_eviction_start(0),
  _sparse_table(hr)
{
280 281
  typedef PerRegionTable* PerRegionTablePtr;

282 283
  if (_max_fine_entries == 0) {
    assert(_mod_max_fine_entries_mask == 0, "Both or none.");
284
    size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
285
    _max_fine_entries = (size_t)1 << max_entries_log;
286
    _mod_max_fine_entries_mask = _max_fine_entries - 1;
287

288 289
    assert(_fine_eviction_sample_size == 0
           && _fine_eviction_stride == 0, "All init at same time.");
290
    _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
291 292
    _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
  }
293

294
  _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
295
                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
296 297

  if (_fine_grain_regions == NULL) {
298
    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
299
                          "Failed to allocate _fine_grain_entries.");
300 301
  }

302 303 304 305 306
  for (size_t i = 0; i < _max_fine_entries; i++) {
    _fine_grain_regions[i] = NULL;
  }
}

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
  // We always append to the beginning of the list for convenience;
  // the order of entries in this list does not matter.
  if (_first_all_fine_prts != NULL) {
    assert(_first_all_fine_prts->prev() == NULL, "invariant");
    _first_all_fine_prts->set_prev(prt);
    prt->set_next(_first_all_fine_prts);
  } else {
    // this is the first element we insert. Adjust the "last" pointer
    _last_all_fine_prts = prt;
    assert(prt->next() == NULL, "just checking");
  }
  // the new element is always the first element without a predecessor
  prt->set_prev(NULL);
  _first_all_fine_prts = prt;

  assert(prt->prev() == NULL, "just checking");
  assert(_first_all_fine_prts == prt, "just checking");
  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
         "just checking");
  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
         "just checking");
  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
         "just checking");
}

void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
  if (prt->prev() != NULL) {
    assert(_first_all_fine_prts != prt, "just checking");
    prt->prev()->set_next(prt->next());
    // removing the last element in the list?
    if (_last_all_fine_prts == prt) {
      _last_all_fine_prts = prt->prev();
    }
  } else {
    assert(_first_all_fine_prts == prt, "just checking");
    _first_all_fine_prts = prt->next();
    // list is empty now?
    if (_first_all_fine_prts == NULL) {
      _last_all_fine_prts = NULL;
    }
  }

  if (prt->next() != NULL) {
    prt->next()->set_prev(prt->prev());
  }

  prt->set_next(NULL);
  prt->set_prev(NULL);

  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
         "just checking");
  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
         "just checking");
  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
         "just checking");
}

367 368 369
int**  FromCardCache::_cache = NULL;
uint   FromCardCache::_max_regions = 0;
size_t FromCardCache::_static_mem_size = 0;
370

371 372
void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
  guarantee(_cache == NULL, "Should not call this multiple times");
373

374 375 376 377
  _max_regions = max_num_regions;
  _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
                                                       _max_regions,
                                                       &_static_mem_size);
378

379
  invalidate(0, _max_regions);
380 381
}

382 383
void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
384
            err_msg("Trying to invalidate beyond maximum region, from %u size " SIZE_FORMAT,
385
                    start_idx, new_num_regions));
386
  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
387 388 389
    uint end_idx = (start_idx + (uint)new_num_regions);
    assert(end_idx <= _max_regions, "Must be within max.");
    for (uint j = start_idx; j < end_idx; j++) {
390
      set(i, j, InvalidCard);
391 392 393 394 395
    }
  }
}

#ifndef PRODUCT
396
void FromCardCache::print(outputStream* out) {
397
  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
398
    for (uint j = 0; j < _max_regions; j++) {
399
      out->print_cr("_from_card_cache[" UINT32_FORMAT "][" UINT32_FORMAT "] = " INT32_FORMAT ".",
400
                    i, j, at(i, j));
401 402 403 404 405
    }
  }
}
#endif

406 407 408 409 410 411 412
void FromCardCache::clear(uint region_idx) {
  uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
  for (uint i = 0; i < num_par_remsets; i++) {
    set(i, region_idx, InvalidCard);
  }
}

413
void OtherRegionsTable::initialize(uint max_regions) {
414 415 416
  FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
}

417 418
void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
  FromCardCache::invalidate(start_idx, num_regions);
419 420 421 422 423 424
}

void OtherRegionsTable::print_from_card_cache() {
  FromCardCache::print();
}

425
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
426
  uint cur_hrm_ind = hr()->hrm_index();
427

428 429 430 431
  if (G1TraceHeapRegionRememberedSet) {
    gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
                                                    from,
                                                    UseCompressedOops
432 433
                                                    ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
                                                    : (void *)oopDesc::load_decode_heap_oop((oop*)from));
434
  }
435 436 437

  int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);

438
  if (G1TraceHeapRegionRememberedSet) {
439
    gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = " INT32_FORMAT ")",
440
                  hr()->bottom(), from_card,
441
                  FromCardCache::at((uint)tid, cur_hrm_ind));
442
  }
443

444
  if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
445 446 447
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("  from-card cache hit.");
    }
448
    assert(contains_reference(from), err_msg("We just found " PTR_FORMAT " in the FromCardCache", from));
449 450 451 452 453
    return;
  }

  // Note that this may be a continued H region.
  HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
454
  RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
455 456

  // If the region is already coarsened, return.
457
  if (_coarse_map.at(from_hrm_ind)) {
458 459 460
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("  coarse map hit.");
    }
461
    assert(contains_reference(from), err_msg("We just found " PTR_FORMAT " in the Coarse table", from));
462 463 464 465
    return;
  }

  // Otherwise find a per-region table to add it to.
466
  size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
467
  PerRegionTable* prt = find_region_table(ind, from_hr);
468
  if (prt == NULL) {
469
    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
470 471 472 473 474 475 476
    // Confirm that it's really not there...
    prt = find_region_table(ind, from_hr);
    if (prt == NULL) {

      uintptr_t from_hr_bot_card_index =
        uintptr_t(from_hr->bottom())
          >> CardTableModRefBS::card_shift;
477
      CardIdx_t card_index = from_card - from_hr_bot_card_index;
478
      assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
479 480
             "Must be in range.");
      if (G1HRRSUseSparseTable &&
481
          _sparse_table.add_card(from_hrm_ind, card_index)) {
482 483
        if (G1RecordHRRSOops) {
          HeapRegionRemSet::record(hr(), from);
484 485 486 487 488 489 490 491 492 493
          if (G1TraceHeapRegionRememberedSet) {
            gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
                                "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                                align_size_down(uintptr_t(from),
                                                CardTableModRefBS::card_size),
                                hr()->bottom(), from);
          }
        }
        if (G1TraceHeapRegionRememberedSet) {
          gclog_or_tty->print_cr("   added card to sparse table.");
494
        }
495
        assert(contains_reference_locked(from), err_msg("We just added " PTR_FORMAT " to the Sparse table", from));
496 497
        return;
      } else {
498 499
        if (G1TraceHeapRegionRememberedSet) {
          gclog_or_tty->print_cr("   [tid %d] sparse table entry "
500
                        "overflow(f: %d, t: %u)",
501
                        tid, from_hrm_ind, cur_hrm_ind);
502
        }
503 504 505 506
      }

      if (_n_fine_entries == _max_fine_entries) {
        prt = delete_region_table();
507 508 509
        // There is no need to clear the links to the 'all' list here:
        // prt will be reused immediately, i.e. remain in the 'all' list.
        prt->init(from_hr, false /* clear_links_to_all_list */);
510
      } else {
511
        prt = PerRegionTable::alloc(from_hr);
512
        link_to_all(prt);
513 514
      }

515
      PerRegionTable* first_prt = _fine_grain_regions[ind];
516
      prt->set_collision_list_next(first_prt);
517 518 519 520 521 522 523 524 525
      // The assignment into _fine_grain_regions allows the prt to
      // start being used concurrently. In addition to
      // collision_list_next which must be visible (else concurrent
      // parsing of the list, if any, may fail to see other entries),
      // the content of the prt must be visible (else for instance
      // some mark bits may not yet seem cleared or a 'later' update
      // performed by a concurrent thread could be undone when the
      // zeroing becomes visible). This requires store ordering.
      OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
526 527 528
      _n_fine_entries++;

      if (G1HRRSUseSparseTable) {
529
        // Transfer from sparse to fine-grain.
530
        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
531 532 533
        assert(sprt_entry != NULL, "There should have been an entry");
        for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
          CardIdx_t c = sprt_entry->card(i);
534 535 536 537 538
          if (c != SparsePRTEntry::NullEntry) {
            prt->add_card(c);
          }
        }
        // Now we can delete the sparse entry.
539
        bool res = _sparse_table.delete_entry(from_hrm_ind);
540 541 542 543 544 545 546 547 548 549
        assert(res, "It should have been there.");
      }
    }
    assert(prt != NULL && prt->hr() == from_hr, "consequence");
  }
  // Note that we can't assert "prt->hr() == from_hr", because of the
  // possibility of concurrent reuse.  But see head comment of
  // OtherRegionsTable for why this is OK.
  assert(prt != NULL, "Inv");

550 551
  prt->add_reference(from);

552 553
  if (G1RecordHRRSOops) {
    HeapRegionRemSet::record(hr(), from);
554 555 556 557 558 559 560
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print("Added card " PTR_FORMAT " to region "
                          "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                          align_size_down(uintptr_t(from),
                                          CardTableModRefBS::card_size),
                          hr()->bottom(), from);
    }
561
  }
562
  assert(contains_reference(from), err_msg("We just added " PTR_FORMAT " to the PRT", from));
563 564
}

565
PerRegionTable*
566 567
OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
  assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
568
  PerRegionTable* prt = _fine_grain_regions[ind];
569
  while (prt != NULL && prt->hr() != hr) {
570
    prt = prt->collision_list_next();
571 572 573 574 575 576 577
  }
  // Loop postcondition is the method postcondition.
  return prt;
}

jint OtherRegionsTable::_n_coarsenings = 0;

578
PerRegionTable* OtherRegionsTable::delete_region_table() {
579
  assert(_m->owned_by_self(), "Precondition");
580
  assert(_n_fine_entries == _max_fine_entries, "Precondition");
581
  PerRegionTable* max = NULL;
582
  jint max_occ = 0;
583
  PerRegionTable** max_prev = NULL;
584 585 586 587 588 589 590 591 592 593 594
  size_t max_ind;

  size_t i = _fine_eviction_start;
  for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
    size_t ii = i;
    // Make sure we get a non-NULL sample.
    while (_fine_grain_regions[ii] == NULL) {
      ii++;
      if (ii == _max_fine_entries) ii = 0;
      guarantee(ii != i, "We must find one.");
    }
595 596
    PerRegionTable** prev = &_fine_grain_regions[ii];
    PerRegionTable* cur = *prev;
597 598 599 600 601 602 603 604
    while (cur != NULL) {
      jint cur_occ = cur->occupied();
      if (max == NULL || cur_occ > max_occ) {
        max = cur;
        max_prev = prev;
        max_ind = i;
        max_occ = cur_occ;
      }
605 606
      prev = cur->collision_list_next_addr();
      cur = cur->collision_list_next();
607 608 609 610
    }
    i = i + _fine_eviction_stride;
    if (i >= _n_fine_entries) i = i - _n_fine_entries;
  }
611

612
  _fine_eviction_start++;
613 614

  if (_fine_eviction_start >= _n_fine_entries) {
615 616
    _fine_eviction_start -= _n_fine_entries;
  }
617

618
  guarantee(max != NULL, "Since _n_fine_entries > 0");
619
  guarantee(max_prev != NULL, "Since max != NULL.");
620 621

  // Set the corresponding coarse bit.
622 623 624
  size_t max_hrm_index = (size_t) max->hr()->hrm_index();
  if (!_coarse_map.at(max_hrm_index)) {
    _coarse_map.at_put(max_hrm_index, true);
625
    _n_coarse_entries++;
626 627 628 629 630 631 632
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
                 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
                 hr()->bottom(),
                 max->hr()->bottom(),
                 _n_coarse_entries);
    }
633 634 635
  }

  // Unsplice.
636
  *max_prev = max->collision_list_next();
637 638 639 640 641 642 643 644 645 646
  Atomic::inc(&_n_coarsenings);
  _n_fine_entries--;
  return max;
}


// At present, this must be called stop-world single-threaded.
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
                              BitMap* region_bm, BitMap* card_bm) {
  // First eliminated garbage regions from the coarse map.
647
  if (G1RSScrubVerbose) {
648
    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
649
  }
650 651

  assert(_coarse_map.size() == region_bm->size(), "Precondition");
652
  if (G1RSScrubVerbose) {
653
    gclog_or_tty->print("   Coarse map: before = " SIZE_FORMAT "...",
654 655
                        _n_coarse_entries);
  }
656 657
  _coarse_map.set_intersection(*region_bm);
  _n_coarse_entries = _coarse_map.count_one_bits();
658
  if (G1RSScrubVerbose) {
659
    gclog_or_tty->print_cr("   after = " SIZE_FORMAT ".", _n_coarse_entries);
660
  }
661 662 663

  // Now do the fine-grained maps.
  for (size_t i = 0; i < _max_fine_entries; i++) {
664 665
    PerRegionTable* cur = _fine_grain_regions[i];
    PerRegionTable** prev = &_fine_grain_regions[i];
666
    while (cur != NULL) {
667
      PerRegionTable* nxt = cur->collision_list_next();
668
      // If the entire region is dead, eliminate.
669 670
      if (G1RSScrubVerbose) {
        gclog_or_tty->print_cr("     For other region %u:",
671
                               cur->hr()->hrm_index());
672
      }
673
      if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
674
        *prev = nxt;
675
        cur->set_collision_list_next(NULL);
676
        _n_fine_entries--;
677
        if (G1RSScrubVerbose) {
678
          gclog_or_tty->print_cr("          deleted via region map.");
679
        }
680
        unlink_from_all(cur);
681
        PerRegionTable::free(cur);
682 683
      } else {
        // Do fine-grain elimination.
684
        if (G1RSScrubVerbose) {
685
          gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
686
        }
687
        cur->scrub(ctbs, card_bm);
688
        if (G1RSScrubVerbose) {
689
          gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
690
        }
691 692 693
        // Did that empty the table completely?
        if (cur->occupied() == 0) {
          *prev = nxt;
694
          cur->set_collision_list_next(NULL);
695
          _n_fine_entries--;
696
          unlink_from_all(cur);
697
          PerRegionTable::free(cur);
698
        } else {
699
          prev = cur->collision_list_next_addr();
700 701 702 703 704 705 706 707 708 709
        }
      }
      cur = nxt;
    }
  }
  // Since we may have deleted a from_card_cache entry from the RS, clear
  // the FCC.
  clear_fcc();
}

710 711 712 713 714 715 716 717 718 719 720 721
bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
  if (limit <= (size_t)G1RSetSparseRegionEntries) {
    return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
  } else {
    // Current uses of this method may only use values less than G1RSetSparseRegionEntries
    // for the limit. The solution, comparing against occupied() would be too slow
    // at this time.
    Unimplemented();
    return false;
  }
}

722 723 724
bool OtherRegionsTable::is_empty() const {
  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
}
725 726 727 728 729 730 731 732 733 734

size_t OtherRegionsTable::occupied() const {
  size_t sum = occ_fine();
  sum += occ_sparse();
  sum += occ_coarse();
  return sum;
}

size_t OtherRegionsTable::occ_fine() const {
  size_t sum = 0;
735 736 737 738 739 740 741

  size_t num = 0;
  PerRegionTable * cur = _first_all_fine_prts;
  while (cur != NULL) {
    sum += cur->occupied();
    cur = cur->next();
    num++;
742
  }
743
  guarantee(num == _n_fine_entries, "just checking");
744 745 746 747
  return sum;
}

size_t OtherRegionsTable::occ_coarse() const {
748
  return (_n_coarse_entries * HeapRegion::CardsPerRegion);
749 750 751 752 753 754 755 756
}

size_t OtherRegionsTable::occ_sparse() const {
  return _sparse_table.occupied();
}

size_t OtherRegionsTable::mem_size() const {
  size_t sum = 0;
757 758 759 760 761
  // all PRTs are of the same size so it is sufficient to query only one of them.
  if (_first_all_fine_prts != NULL) {
    assert(_last_all_fine_prts != NULL &&
      _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
    sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
762
  }
763
  sum += (sizeof(PerRegionTable*) * _max_fine_entries);
764 765
  sum += (_coarse_map.size_in_words() * HeapWordSize);
  sum += (_sparse_table.mem_size());
766
  sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
767 768 769 770
  return sum;
}

size_t OtherRegionsTable::static_mem_size() {
771
  return FromCardCache::static_mem_size();
772 773 774
}

size_t OtherRegionsTable::fl_mem_size() {
775
  return PerRegionTable::fl_mem_size();
776 777 778
}

void OtherRegionsTable::clear_fcc() {
779
  FromCardCache::clear(hr()->hrm_index());
780 781 782
}

void OtherRegionsTable::clear() {
783 784 785 786 787 788 789
  // if there are no entries, skip this step
  if (_first_all_fine_prts != NULL) {
    guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
    PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
    memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
  } else {
    guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
790
  }
791 792

  _first_all_fine_prts = _last_all_fine_prts = NULL;
793 794 795 796 797 798 799 800 801 802 803
  _sparse_table.clear();
  _coarse_map.clear();
  _n_fine_entries = 0;
  _n_coarse_entries = 0;

  clear_fcc();
}

bool OtherRegionsTable::del_single_region_table(size_t ind,
                                                HeapRegion* hr) {
  assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
804 805
  PerRegionTable** prev_addr = &_fine_grain_regions[ind];
  PerRegionTable* prt = *prev_addr;
806
  while (prt != NULL && prt->hr() != hr) {
807 808
    prev_addr = prt->collision_list_next_addr();
    prt = prt->collision_list_next();
809 810 811
  }
  if (prt != NULL) {
    assert(prt->hr() == hr, "Loop postcondition.");
812 813
    *prev_addr = prt->collision_list_next();
    unlink_from_all(prt);
814
    PerRegionTable::free(prt);
815 816 817 818 819 820 821
    _n_fine_entries--;
    return true;
  } else {
    return false;
  }
}

822
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
823
  // Cast away const in this case.
824
  MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
825 826 827
  return contains_reference_locked(from);
}

828
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
829
  HeapRegion* hr = _g1h->heap_region_containing_raw(from);
830
  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
831 832 833
  // Is this region in the coarse map?
  if (_coarse_map.at(hr_ind)) return true;

834
  PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
835 836 837 838 839 840 841 842 843 844
                                     hr);
  if (prt != NULL) {
    return prt->contains_reference(from);

  } else {
    uintptr_t from_card =
      (uintptr_t(from) >> CardTableModRefBS::card_shift);
    uintptr_t hr_bot_card_index =
      uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
    assert(from_card >= hr_bot_card_index, "Inv");
845
    CardIdx_t card_index = from_card - hr_bot_card_index;
846
    assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
847
           "Must be in range.");
848
    return _sparse_table.contains_card(hr_ind, card_index);
849 850 851
  }
}

T
tonyp 已提交
852 853 854 855 856
void
OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  _sparse_table.do_cleanup_work(hrrs_cleanup_task);
}

857 858 859
// Determines how many threads can add records to an rset in parallel.
// This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads.
860
uint HeapRegionRemSet::num_par_rem_sets() {
861
  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
862 863 864 865
}

HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                   HeapRegion* hr)
866
  : _bosa(bosa),
867
    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
868
    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
869 870
  reset_for_par_iteration();
}
871

872 873 874
void HeapRegionRemSet::setup_remset_size() {
  // Setup sparse and fine-grain tables sizes.
  // table_size = base * (log(region_size / 1M) + 1)
875 876
  const int LOG_M = 20;
  int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
877 878 879 880 881 882 883 884 885
  if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
    G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
  }
  if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
    G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
  }
  guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
}

886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
bool HeapRegionRemSet::claim_iter() {
  if (_iter_state != Unclaimed) return false;
  jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
  return (res == Unclaimed);
}

void HeapRegionRemSet::set_iter_complete() {
  _iter_state = Complete;
}

bool HeapRegionRemSet::iter_is_complete() {
  return _iter_state == Complete;
}

#ifndef PRODUCT
901
void HeapRegionRemSet::print() {
902
  HeapRegionRemSetIterator iter(this);
903 904 905 906
  size_t card_index;
  while (iter.has_next(card_index)) {
    HeapWord* card_start =
      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
907
    gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
  }
  if (iter.n_yielded() != occupied()) {
    gclog_or_tty->print_cr("Yielded disagrees with occupied:");
    gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
                  iter.n_yielded(),
                  iter.n_yielded_coarse(), iter.n_yielded_fine());
    gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
                  occupied(), occ_coarse(), occ_fine());
  }
  guarantee(iter.n_yielded() == occupied(),
            "We should have yielded all the represented cards.");
}
#endif

void HeapRegionRemSet::cleanup() {
  SparsePRT::cleanup_all();
}

void HeapRegionRemSet::clear() {
927 928 929
  MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
  clear_locked();
}
J
johnc 已提交
930

931 932
void HeapRegionRemSet::clear_locked() {
  _code_roots.clear();
933
  _other_regions.clear();
934
  assert(occupied_locked() == 0, "Should be clear.");
935 936 937 938 939 940 941 942
  reset_for_par_iteration();
}

void HeapRegionRemSet::reset_for_par_iteration() {
  _iter_state = Unclaimed;
  _iter_claimed = 0;
  // It's good to check this to make sure that the two methods are in sync.
  assert(verify_ready_for_par_iteration(), "post-condition");
943 944 945 946 947 948 949
}

void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
                             BitMap* region_bm, BitMap* card_bm) {
  _other_regions.scrub(ctbs, region_bm, card_bm);
}

J
johnc 已提交
950
// Code roots support
951 952 953 954 955 956 957
//
// The code root set is protected by two separate locking schemes
// When at safepoint the per-hrrs lock must be held during modifications
// except when doing a full gc.
// When not at safepoint the CodeCache_lock must be held during modifications.
// When concurrent readers access the contains() function
// (during the evacuation phase) no removals are allowed.
J
johnc 已提交
958 959

void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
960
  assert(nm != NULL, "sanity");
961 962 963
  assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
          err_msg("should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
                  BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint())));
964 965 966 967 968 969 970 971
  // Optimistic unlocked contains-check
  if (!_code_roots.contains(nm)) {
    MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
    add_strong_code_root_locked(nm);
  }
}

void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
J
johnc 已提交
972
  assert(nm != NULL, "sanity");
973 974 975 976 977 978
  assert((CodeCache_lock->owned_by_self() ||
         (SafepointSynchronize::is_at_safepoint() &&
          (_m.owned_by_self() || Thread::current()->is_VM_thread()))),
          err_msg("not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
                  BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
                  BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread())));
979
  _code_roots.add(nm);
J
johnc 已提交
980 981 982 983
}

void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
  assert(nm != NULL, "sanity");
984 985
  assert_locked_or_safepoint(CodeCache_lock);

986 987
  MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
  _code_roots.remove(nm);
988

J
johnc 已提交
989
  // Check that there were no duplicates
990
  guarantee(!_code_roots.contains(nm), "duplicate entry found");
J
johnc 已提交
991 992 993
}

void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
994
  _code_roots.nmethods_do(blk);
J
johnc 已提交
995 996
}

997 998 999 1000
void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
  _code_roots.clean(hr);
}

J
johnc 已提交
1001
size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1002
  return _code_roots.mem_size();
J
johnc 已提交
1003 1004
}

1005
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
1006
  _hrrs(hrrs),
1007
  _g1h(G1CollectedHeap::heap()),
1008 1009 1010
  _coarse_map(&hrrs->_other_regions._coarse_map),
  _bosa(hrrs->bosa()),
  _is(Sparse),
1011
  // Set these values so that we increment to the first region.
1012 1013
  _coarse_cur_region_index(-1),
  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
1014
  _cur_card_in_prt(HeapRegion::CardsPerRegion),
1015 1016 1017 1018 1019
  _fine_cur_prt(NULL),
  _n_yielded_coarse(0),
  _n_yielded_fine(0),
  _n_yielded_sparse(0),
  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1020 1021 1022 1023 1024 1025

bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
  if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
  // Go to the next card.
  _coarse_cur_region_cur_card++;
  // Was the last the last card in the current region?
1026
  if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1027 1028 1029 1030 1031 1032 1033 1034
    // Yes: find the next region.  This may leave _coarse_cur_region_index
    // Set to the last index, in which case there are no more coarse
    // regions.
    _coarse_cur_region_index =
      (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
    if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
      _coarse_cur_region_cur_card = 0;
      HeapWord* r_bot =
1035
        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
      _cur_region_card_offset = _bosa->index_for(r_bot);
    } else {
      return false;
    }
  }
  // If we didn't return false above, then we can yield a card.
  card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
  return true;
}

bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
  if (fine_has_next()) {
1048 1049
    _cur_card_in_prt =
      _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1050
  }
1051 1052 1053 1054 1055
  if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
    // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
    // the remembered set.
    if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
      return false;
1056
    }
1057 1058 1059
    PerRegionTable* next_prt = _fine_cur_prt->next();
    switch_to_prt(next_prt);
    _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1060
  }
1061 1062 1063

  card_index = _cur_region_card_offset + _cur_card_in_prt;
  guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
1064
            err_msg("Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt));
1065 1066 1067 1068
  return true;
}

bool HeapRegionRemSetIterator::fine_has_next() {
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
  return _cur_card_in_prt != HeapRegion::CardsPerRegion;
}

void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
  assert(prt != NULL, "Cannot switch to NULL prt");
  _fine_cur_prt = prt;

  HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
  _cur_region_card_offset = _bosa->index_for(r_bot);

  // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
  // To avoid special-casing this start case, and not miss the first bitmap
  // entry, initialize _cur_region_cur_card with -1 instead of 0.
  _cur_card_in_prt = (size_t)-1;
1083 1084 1085 1086
}

bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
  switch (_is) {
1087
  case Sparse: {
1088 1089 1090 1091 1092 1093
    if (_sparse_iter.has_next(card_index)) {
      _n_yielded_sparse++;
      return true;
    }
    // Otherwise, deliberate fall-through
    _is = Fine;
1094 1095 1096 1097 1098
    PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
    if (initial_fine_prt != NULL) {
      switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
    }
  }
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
  case Fine:
    if (fine_has_next(card_index)) {
      _n_yielded_fine++;
      return true;
    }
    // Otherwise, deliberate fall-through
    _is = Coarse;
  case Coarse:
    if (coarse_has_next(card_index)) {
      _n_yielded_coarse++;
      return true;
    }
    // Otherwise...
    break;
  }
  assert(ParallelGCThreads > 1 ||
         n_yielded() == _hrrs->occupied(),
         "Should have yielded all the cards in the rem set "
         "(in the non-par case).");
  return false;
}



1123 1124 1125 1126
OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
HeapWord**          HeapRegionRemSet::_recorded_cards = NULL;
HeapRegion**        HeapRegionRemSet::_recorded_regions = NULL;
int                 HeapRegionRemSet::_n_recorded = 0;
1127 1128 1129 1130 1131

HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
int*         HeapRegionRemSet::_recorded_event_index = NULL;
int          HeapRegionRemSet::_n_recorded_events = 0;

1132
void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1133 1134 1135 1136 1137
  if (_recorded_oops == NULL) {
    assert(_n_recorded == 0
           && _recorded_cards == NULL
           && _recorded_regions == NULL,
           "Inv");
Z
zgu 已提交
1138 1139 1140
    _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
    _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded, mtGC);
    _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded, mtGC);
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
  }
  if (_n_recorded == MaxRecorded) {
    gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
  } else {
    _recorded_cards[_n_recorded] =
      (HeapWord*)align_size_down(uintptr_t(f),
                                 CardTableModRefBS::card_size);
    _recorded_oops[_n_recorded] = f;
    _recorded_regions[_n_recorded] = hr;
    _n_recorded++;
  }
}

void HeapRegionRemSet::record_event(Event evnt) {
  if (!G1RecordHRRSEvents) return;

  if (_recorded_events == NULL) {
    assert(_n_recorded_events == 0
           && _recorded_event_index == NULL,
           "Inv");
Z
zgu 已提交
1161 1162
    _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
    _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
  }
  if (_n_recorded_events == MaxRecordedEvents) {
    gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
  } else {
    _recorded_events[_n_recorded_events] = evnt;
    _recorded_event_index[_n_recorded_events] = _n_recorded;
    _n_recorded_events++;
  }
}

void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
  switch (evnt) {
  case Event_EvacStart:
    str->print("Evac Start");
    break;
  case Event_EvacEnd:
    str->print("Evac End");
    break;
  case Event_RSUpdateEnd:
    str->print("RS Update End");
    break;
  }
}

void HeapRegionRemSet::print_recorded() {
  int cur_evnt = 0;
1189
  Event cur_evnt_kind = Event_illegal;
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
  int cur_evnt_ind = 0;
  if (_n_recorded_events > 0) {
    cur_evnt_kind = _recorded_events[cur_evnt];
    cur_evnt_ind = _recorded_event_index[cur_evnt];
  }

  for (int i = 0; i < _n_recorded; i++) {
    while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
      gclog_or_tty->print("Event: ");
      print_event(gclog_or_tty, cur_evnt_kind);
1200
      gclog_or_tty->cr();
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
      cur_evnt++;
      if (cur_evnt < MaxRecordedEvents) {
        cur_evnt_kind = _recorded_events[cur_evnt];
        cur_evnt_ind = _recorded_event_index[cur_evnt];
      }
    }
    gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
                        " for ref " PTR_FORMAT ".\n",
                        _recorded_cards[i], _recorded_regions[i]->bottom(),
                        _recorded_oops[i]);
  }
}

T
tonyp 已提交
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
void HeapRegionRemSet::reset_for_cleanup_tasks() {
  SparsePRT::reset_for_cleanup_tasks();
}

void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  _other_regions.do_cleanup_work(hrrs_cleanup_task);
}

void
HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
  SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
}

1227
#ifndef PRODUCT
1228 1229
void PerRegionTable::test_fl_mem_size() {
  PerRegionTable* dummy = alloc(NULL);
1230 1231 1232

  size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
  assert(dummy->mem_size() > min_prt_size,
1233 1234
         err_msg("PerRegionTable memory usage is suspiciously small, only has " SIZE_FORMAT " bytes. "
                 "Should be at least " SIZE_FORMAT " bytes.", dummy->mem_size(), min_prt_size));
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
  free(dummy);
  guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
  // try to reset the state
  _free_list = NULL;
  delete dummy;
}

void HeapRegionRemSet::test_prt() {
  PerRegionTable::test_fl_mem_size();
}

1246 1247 1248 1249
void HeapRegionRemSet::test() {
  os::sleep(Thread::current(), (jlong)5000, false);
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

1250
  // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
  // hash bucket.
  HeapRegion* hr0 = g1h->region_at(0);
  HeapRegion* hr1 = g1h->region_at(1);
  HeapRegion* hr2 = g1h->region_at(5);
  HeapRegion* hr3 = g1h->region_at(6);
  HeapRegion* hr4 = g1h->region_at(7);
  HeapRegion* hr5 = g1h->region_at(8);

  HeapWord* hr1_start = hr1->bottom();
  HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
  HeapWord* hr1_last = hr1->end() - 1;

  HeapWord* hr2_start = hr2->bottom();
  HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
  HeapWord* hr2_last = hr2->end() - 1;

  HeapWord* hr3_start = hr3->bottom();
  HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
  HeapWord* hr3_last = hr3->end() - 1;

  HeapRegionRemSet* hrrs = hr0->rem_set();

  // Make three references from region 0x101...
1274 1275 1276
  hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1277

1278 1279 1280
  hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1281

1282 1283 1284
  hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1285 1286

  // Now cause a coarsening.
1287 1288
  hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
  hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1289 1290

  // Now, does iteration yield these three?
1291
  HeapRegionRemSetIterator iter(hrrs);
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
  size_t sum = 0;
  size_t card_index;
  while (iter.has_next(card_index)) {
    HeapWord* card_start =
      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
    gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
    sum++;
  }
  guarantee(sum == 11 - 3 + 2048, "Failure");
  guarantee(sum == hrrs->occupied(), "Failure");
}
#endif