heapRegionRemSet.cpp 43.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28 29
#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentG1Refine.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
30
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
31
#include "memory/allocation.hpp"
32
#include "memory/padded.inline.hpp"
33
#include "memory/space.inline.hpp"
34
#include "oops/oop.inline.hpp"
35 36
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
J
johnc 已提交
37
#include "utilities/growableArray.hpp"
38

39 40
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC

Z
zgu 已提交
41
class PerRegionTable: public CHeapObj<mtGC> {
42 43 44 45 46 47 48
  friend class OtherRegionsTable;
  friend class HeapRegionRemSetIterator;

  HeapRegion*     _hr;
  BitMap          _bm;
  jint            _occupied;

49
  // next pointer for free/allocated 'all' list
50
  PerRegionTable* _next;
51

52 53
  // prev pointer for the allocated 'all' list
  PerRegionTable* _prev;
54

55 56
  // next pointer in collision list
  PerRegionTable * _collision_list_next;
57

58 59
  // Global free list of PRTs
  static PerRegionTable* _free_list;
60 61 62 63 64 65 66 67 68 69 70 71

protected:
  // We need access in order to union things into the base table.
  BitMap* bm() { return &_bm; }

  void recount_occupied() {
    _occupied = (jint) bm()->count_one_bits();
  }

  PerRegionTable(HeapRegion* hr) :
    _hr(hr),
    _occupied(0),
72 73
    _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
    _collision_list_next(NULL), _next(NULL), _prev(NULL)
74 75
  {}

76
  void add_card_work(CardIdx_t from_card, bool par) {
77 78 79 80 81 82 83 84 85 86 87 88
    if (!_bm.at(from_card)) {
      if (par) {
        if (_bm.par_at_put(from_card, 1)) {
          Atomic::inc(&_occupied);
        }
      } else {
        _bm.at_put(from_card, 1);
        _occupied++;
      }
    }
  }

89
  void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 91 92
    // Must make this robust in case "from" is not in "_hr", because of
    // concurrency.

93 94 95 96
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
                             from,
                             UseCompressedOops
97 98
                             ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
                             : (void *)oopDesc::load_decode_heap_oop((oop*)from));
99
    }
100 101 102 103 104

    HeapRegion* loc_hr = hr();
    // If the test below fails, then this table was reused concurrently
    // with this operation.  This is OK, since the old table was coarsened,
    // and adding a bit to the new table is never incorrect.
105 106 107 108 109
    // If the table used to belong to a continues humongous region and is
    // now reused for the corresponding start humongous region, we need to
    // make sure that we detect this. Thus, we call is_in_reserved_raw()
    // instead of just is_in_reserved() here.
    if (loc_hr->is_in_reserved_raw(from)) {
110
      size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
111 112
      CardIdx_t from_card = (CardIdx_t)
          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
113

114
      assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
115
             "Must be in range.");
116
      add_card_work(from_card, par);
117 118 119 120 121
    }
  }

public:

122 123 124
  HeapRegion* hr() const {
    return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
  }
125 126 127 128 129 130 131

  jint occupied() const {
    // Overkill, but if we ever need it...
    // guarantee(_occupied == _bm.count_one_bits(), "Check");
    return _occupied;
  }

132 133 134 135 136 137
  void init(HeapRegion* hr, bool clear_links_to_all_list) {
    if (clear_links_to_all_list) {
      set_next(NULL);
      set_prev(NULL);
    }
    _collision_list_next = NULL;
138 139
    _occupied = 0;
    _bm.clear();
140 141 142
    // Make sure that the bitmap clearing above has been finished before publishing
    // this PRT to concurrent threads.
    OrderAccess::release_store_ptr(&_hr, hr);
143 144
  }

145
  void add_reference(OopOrNarrowOopStar from) {
146 147 148
    add_reference_work(from, /*parallel*/ true);
  }

149
  void seq_add_reference(OopOrNarrowOopStar from) {
150 151 152 153 154
    add_reference_work(from, /*parallel*/ false);
  }

  void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
    HeapWord* hr_bot = hr()->bottom();
155
    size_t hr_first_card_index = ctbs->index_for(hr_bot);
156 157 158 159
    bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
    recount_occupied();
  }

160
  void add_card(CardIdx_t from_card_index) {
161 162 163
    add_card_work(from_card_index, /*parallel*/ true);
  }

164
  void seq_add_card(CardIdx_t from_card_index) {
165 166 167 168 169 170 171 172 173 174 175
    add_card_work(from_card_index, /*parallel*/ false);
  }

  // (Destructively) union the bitmap of the current table into the given
  // bitmap (which is assumed to be of the same size.)
  void union_bitmap_into(BitMap* bm) {
    bm->set_union(_bm);
  }

  // Mem size in bytes.
  size_t mem_size() const {
176
    return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
177 178 179
  }

  // Requires "from" to be in "hr()".
180
  bool contains_reference(OopOrNarrowOopStar from) const {
181 182 183 184 185 186
    assert(hr()->is_in_reserved(from), "Precondition.");
    size_t card_ind = pointer_delta(from, hr()->bottom(),
                                    CardTableModRefBS::card_size);
    return _bm.at(card_ind);
  }

187 188 189
  // Bulk-free the PRTs from prt to last, assumes that they are
  // linked together using their _next field.
  static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
190
    while (true) {
191
      PerRegionTable* fl = _free_list;
192 193 194 195 196
      last->set_next(fl);
      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
      if (res == fl) {
        return;
      }
197 198 199 200
    }
    ShouldNotReachHere();
  }

201 202 203 204 205
  static void free(PerRegionTable* prt) {
    bulk_free(prt, prt);
  }

  // Returns an initialized PerRegionTable instance.
206 207
  static PerRegionTable* alloc(HeapRegion* hr) {
    PerRegionTable* fl = _free_list;
208
    while (fl != NULL) {
209 210 211
      PerRegionTable* nxt = fl->next();
      PerRegionTable* res =
        (PerRegionTable*)
212 213
        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
      if (res == fl) {
214
        fl->init(hr, true);
215 216 217 218 219 220
        return fl;
      } else {
        fl = _free_list;
      }
    }
    assert(fl == NULL, "Loop condition.");
221
    return new PerRegionTable(hr);
222 223
  }

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  PerRegionTable* next() const { return _next; }
  void set_next(PerRegionTable* next) { _next = next; }
  PerRegionTable* prev() const { return _prev; }
  void set_prev(PerRegionTable* prev) { _prev = prev; }

  // Accessor and Modification routines for the pointer for the
  // singly linked collision list that links the PRTs within the
  // OtherRegionsTable::_fine_grain_regions hash table.
  //
  // It might be useful to also make the collision list doubly linked
  // to avoid iteration over the collisions list during scrubbing/deletion.
  // OTOH there might not be many collisions.

  PerRegionTable* collision_list_next() const {
    return _collision_list_next;
  }

  void set_collision_list_next(PerRegionTable* next) {
    _collision_list_next = next;
  }

  PerRegionTable** collision_list_next_addr() {
    return &_collision_list_next;
  }

249
  static size_t fl_mem_size() {
250
    PerRegionTable* cur = _free_list;
251 252
    size_t res = 0;
    while (cur != NULL) {
253
      res += cur->mem_size();
254 255 256 257
      cur = cur->next();
    }
    return res;
  }
258 259

  static void test_fl_mem_size();
260 261
};

262
PerRegionTable* PerRegionTable::_free_list = NULL;
263 264 265 266 267 268

size_t OtherRegionsTable::_max_fine_entries = 0;
size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
size_t OtherRegionsTable::_fine_eviction_stride = 0;
size_t OtherRegionsTable::_fine_eviction_sample_size = 0;

269
OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
270
  _g1h(G1CollectedHeap::heap()),
271
  _hr(hr), _m(m),
272 273 274
  _coarse_map(G1CollectedHeap::heap()->max_regions(),
              false /* in-resource-area */),
  _fine_grain_regions(NULL),
275
  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
276 277 278 279
  _n_fine_entries(0), _n_coarse_entries(0),
  _fine_eviction_start(0),
  _sparse_table(hr)
{
280 281
  typedef PerRegionTable* PerRegionTablePtr;

282 283
  if (_max_fine_entries == 0) {
    assert(_mod_max_fine_entries_mask == 0, "Both or none.");
284
    size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
285
    _max_fine_entries = (size_t)1 << max_entries_log;
286
    _mod_max_fine_entries_mask = _max_fine_entries - 1;
287

288 289
    assert(_fine_eviction_sample_size == 0
           && _fine_eviction_stride == 0, "All init at same time.");
290
    _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
291 292
    _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
  }
293

294
  _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
295
                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
296 297

  if (_fine_grain_regions == NULL) {
298
    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
299
                          "Failed to allocate _fine_grain_entries.");
300 301
  }

302 303 304 305 306
  for (size_t i = 0; i < _max_fine_entries; i++) {
    _fine_grain_regions[i] = NULL;
  }
}

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
  // We always append to the beginning of the list for convenience;
  // the order of entries in this list does not matter.
  if (_first_all_fine_prts != NULL) {
    assert(_first_all_fine_prts->prev() == NULL, "invariant");
    _first_all_fine_prts->set_prev(prt);
    prt->set_next(_first_all_fine_prts);
  } else {
    // this is the first element we insert. Adjust the "last" pointer
    _last_all_fine_prts = prt;
    assert(prt->next() == NULL, "just checking");
  }
  // the new element is always the first element without a predecessor
  prt->set_prev(NULL);
  _first_all_fine_prts = prt;

  assert(prt->prev() == NULL, "just checking");
  assert(_first_all_fine_prts == prt, "just checking");
  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
         "just checking");
  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
         "just checking");
  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
         "just checking");
}

void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
  if (prt->prev() != NULL) {
    assert(_first_all_fine_prts != prt, "just checking");
    prt->prev()->set_next(prt->next());
    // removing the last element in the list?
    if (_last_all_fine_prts == prt) {
      _last_all_fine_prts = prt->prev();
    }
  } else {
    assert(_first_all_fine_prts == prt, "just checking");
    _first_all_fine_prts = prt->next();
    // list is empty now?
    if (_first_all_fine_prts == NULL) {
      _last_all_fine_prts = NULL;
    }
  }

  if (prt->next() != NULL) {
    prt->next()->set_prev(prt->prev());
  }

  prt->set_next(NULL);
  prt->set_prev(NULL);

  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
         "just checking");
  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
         "just checking");
  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
         "just checking");
}

367 368 369
int**  FromCardCache::_cache = NULL;
uint   FromCardCache::_max_regions = 0;
size_t FromCardCache::_static_mem_size = 0;
370

371 372
void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
  guarantee(_cache == NULL, "Should not call this multiple times");
373

374 375 376 377
  _max_regions = max_num_regions;
  _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
                                                       _max_regions,
                                                       &_static_mem_size);
378

379
  invalidate(0, _max_regions);
380 381
}

382 383 384 385
void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
  guarantee((size_t)start_idx + new_num_regions <= max_uintx,
            err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
                    start_idx, new_num_regions));
386
  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
387 388 389
    uint end_idx = (start_idx + (uint)new_num_regions);
    assert(end_idx <= _max_regions, "Must be within max.");
    for (uint j = start_idx; j < end_idx; j++) {
390
      set(i, j, InvalidCard);
391 392 393 394 395
    }
  }
}

#ifndef PRODUCT
396
void FromCardCache::print(outputStream* out) {
397
  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
398 399 400
    for (uint j = 0; j < _max_regions; j++) {
      out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
                    i, j, at(i, j));
401 402 403 404 405
    }
  }
}
#endif

406 407 408 409 410 411 412
void FromCardCache::clear(uint region_idx) {
  uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
  for (uint i = 0; i < num_par_remsets; i++) {
    set(i, region_idx, InvalidCard);
  }
}

413
void OtherRegionsTable::initialize(uint max_regions) {
414 415 416
  FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
}

417 418
void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
  FromCardCache::invalidate(start_idx, num_regions);
419 420 421 422 423 424
}

void OtherRegionsTable::print_from_card_cache() {
  FromCardCache::print();
}

425
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
426
  uint cur_hrm_ind = hr()->hrm_index();
427

428 429 430 431
  if (G1TraceHeapRegionRememberedSet) {
    gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
                                                    from,
                                                    UseCompressedOops
432 433
                                                    ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
                                                    : (void *)oopDesc::load_decode_heap_oop((oop*)from));
434
  }
435 436 437

  int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);

438
  if (G1TraceHeapRegionRememberedSet) {
439
    gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
440
                  hr()->bottom(), from_card,
441
                  FromCardCache::at((uint)tid, cur_hrm_ind));
442
  }
443

444
  if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
445 446 447
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("  from-card cache hit.");
    }
448
    assert(contains_reference(from), err_msg("We just found " PTR_FORMAT " in the FromCardCache", from));
449 450 451 452 453
    return;
  }

  // Note that this may be a continued H region.
  HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
454
  RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
455 456

  // If the region is already coarsened, return.
457
  if (_coarse_map.at(from_hrm_ind)) {
458 459 460
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print_cr("  coarse map hit.");
    }
461
    assert(contains_reference(from), err_msg("We just found " PTR_FORMAT " in the Coarse table", from));
462 463 464 465
    return;
  }

  // Otherwise find a per-region table to add it to.
466
  size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
467
  PerRegionTable* prt = find_region_table(ind, from_hr);
468
  if (prt == NULL) {
469
    MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
470 471 472 473 474 475 476
    // Confirm that it's really not there...
    prt = find_region_table(ind, from_hr);
    if (prt == NULL) {

      uintptr_t from_hr_bot_card_index =
        uintptr_t(from_hr->bottom())
          >> CardTableModRefBS::card_shift;
477
      CardIdx_t card_index = from_card - from_hr_bot_card_index;
478
      assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
479 480
             "Must be in range.");
      if (G1HRRSUseSparseTable &&
481
          _sparse_table.add_card(from_hrm_ind, card_index)) {
482 483
        if (G1RecordHRRSOops) {
          HeapRegionRemSet::record(hr(), from);
484 485 486 487 488 489 490 491 492 493
          if (G1TraceHeapRegionRememberedSet) {
            gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
                                "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                                align_size_down(uintptr_t(from),
                                                CardTableModRefBS::card_size),
                                hr()->bottom(), from);
          }
        }
        if (G1TraceHeapRegionRememberedSet) {
          gclog_or_tty->print_cr("   added card to sparse table.");
494
        }
495
        assert(contains_reference_locked(from), err_msg("We just added " PTR_FORMAT " to the Sparse table", from));
496 497
        return;
      } else {
498 499
        if (G1TraceHeapRegionRememberedSet) {
          gclog_or_tty->print_cr("   [tid %d] sparse table entry "
500
                        "overflow(f: %d, t: %u)",
501
                        tid, from_hrm_ind, cur_hrm_ind);
502
        }
503 504 505 506
      }

      if (_n_fine_entries == _max_fine_entries) {
        prt = delete_region_table();
507 508 509
        // There is no need to clear the links to the 'all' list here:
        // prt will be reused immediately, i.e. remain in the 'all' list.
        prt->init(from_hr, false /* clear_links_to_all_list */);
510
      } else {
511
        prt = PerRegionTable::alloc(from_hr);
512
        link_to_all(prt);
513 514
      }

515
      PerRegionTable* first_prt = _fine_grain_regions[ind];
516
      prt->set_collision_list_next(first_prt);
517 518 519 520
      _fine_grain_regions[ind] = prt;
      _n_fine_entries++;

      if (G1HRRSUseSparseTable) {
521
        // Transfer from sparse to fine-grain.
522
        SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
523 524 525
        assert(sprt_entry != NULL, "There should have been an entry");
        for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
          CardIdx_t c = sprt_entry->card(i);
526 527 528 529 530
          if (c != SparsePRTEntry::NullEntry) {
            prt->add_card(c);
          }
        }
        // Now we can delete the sparse entry.
531
        bool res = _sparse_table.delete_entry(from_hrm_ind);
532 533 534 535 536 537 538 539 540 541
        assert(res, "It should have been there.");
      }
    }
    assert(prt != NULL && prt->hr() == from_hr, "consequence");
  }
  // Note that we can't assert "prt->hr() == from_hr", because of the
  // possibility of concurrent reuse.  But see head comment of
  // OtherRegionsTable for why this is OK.
  assert(prt != NULL, "Inv");

542 543
  prt->add_reference(from);

544 545
  if (G1RecordHRRSOops) {
    HeapRegionRemSet::record(hr(), from);
546 547 548 549 550 551 552
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print("Added card " PTR_FORMAT " to region "
                          "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                          align_size_down(uintptr_t(from),
                                          CardTableModRefBS::card_size),
                          hr()->bottom(), from);
    }
553
  }
554
  assert(contains_reference(from), err_msg("We just added " PTR_FORMAT " to the PRT", from));
555 556
}

557
PerRegionTable*
558 559
OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
  assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
560
  PerRegionTable* prt = _fine_grain_regions[ind];
561
  while (prt != NULL && prt->hr() != hr) {
562
    prt = prt->collision_list_next();
563 564 565 566 567 568 569
  }
  // Loop postcondition is the method postcondition.
  return prt;
}

jint OtherRegionsTable::_n_coarsenings = 0;

570
PerRegionTable* OtherRegionsTable::delete_region_table() {
571
  assert(_m->owned_by_self(), "Precondition");
572
  assert(_n_fine_entries == _max_fine_entries, "Precondition");
573
  PerRegionTable* max = NULL;
574
  jint max_occ = 0;
575
  PerRegionTable** max_prev = NULL;
576 577 578 579 580 581 582 583 584 585 586
  size_t max_ind;

  size_t i = _fine_eviction_start;
  for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
    size_t ii = i;
    // Make sure we get a non-NULL sample.
    while (_fine_grain_regions[ii] == NULL) {
      ii++;
      if (ii == _max_fine_entries) ii = 0;
      guarantee(ii != i, "We must find one.");
    }
587 588
    PerRegionTable** prev = &_fine_grain_regions[ii];
    PerRegionTable* cur = *prev;
589 590 591 592 593 594 595 596
    while (cur != NULL) {
      jint cur_occ = cur->occupied();
      if (max == NULL || cur_occ > max_occ) {
        max = cur;
        max_prev = prev;
        max_ind = i;
        max_occ = cur_occ;
      }
597 598
      prev = cur->collision_list_next_addr();
      cur = cur->collision_list_next();
599 600 601 602
    }
    i = i + _fine_eviction_stride;
    if (i >= _n_fine_entries) i = i - _n_fine_entries;
  }
603

604
  _fine_eviction_start++;
605 606

  if (_fine_eviction_start >= _n_fine_entries) {
607 608
    _fine_eviction_start -= _n_fine_entries;
  }
609

610
  guarantee(max != NULL, "Since _n_fine_entries > 0");
611
  guarantee(max_prev != NULL, "Since max != NULL.");
612 613

  // Set the corresponding coarse bit.
614 615 616
  size_t max_hrm_index = (size_t) max->hr()->hrm_index();
  if (!_coarse_map.at(max_hrm_index)) {
    _coarse_map.at_put(max_hrm_index, true);
617
    _n_coarse_entries++;
618 619 620 621 622 623 624
    if (G1TraceHeapRegionRememberedSet) {
      gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
                 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
                 hr()->bottom(),
                 max->hr()->bottom(),
                 _n_coarse_entries);
    }
625 626 627
  }

  // Unsplice.
628
  *max_prev = max->collision_list_next();
629 630 631 632 633 634 635 636 637 638
  Atomic::inc(&_n_coarsenings);
  _n_fine_entries--;
  return max;
}


// At present, this must be called stop-world single-threaded.
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
                              BitMap* region_bm, BitMap* card_bm) {
  // First eliminated garbage regions from the coarse map.
639
  if (G1RSScrubVerbose) {
640
    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
641
  }
642 643

  assert(_coarse_map.size() == region_bm->size(), "Precondition");
644 645 646 647
  if (G1RSScrubVerbose) {
    gclog_or_tty->print("   Coarse map: before = "SIZE_FORMAT"...",
                        _n_coarse_entries);
  }
648 649
  _coarse_map.set_intersection(*region_bm);
  _n_coarse_entries = _coarse_map.count_one_bits();
650 651 652
  if (G1RSScrubVerbose) {
    gclog_or_tty->print_cr("   after = "SIZE_FORMAT".", _n_coarse_entries);
  }
653 654 655

  // Now do the fine-grained maps.
  for (size_t i = 0; i < _max_fine_entries; i++) {
656 657
    PerRegionTable* cur = _fine_grain_regions[i];
    PerRegionTable** prev = &_fine_grain_regions[i];
658
    while (cur != NULL) {
659
      PerRegionTable* nxt = cur->collision_list_next();
660
      // If the entire region is dead, eliminate.
661 662
      if (G1RSScrubVerbose) {
        gclog_or_tty->print_cr("     For other region %u:",
663
                               cur->hr()->hrm_index());
664
      }
665
      if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
666
        *prev = nxt;
667
        cur->set_collision_list_next(NULL);
668
        _n_fine_entries--;
669
        if (G1RSScrubVerbose) {
670
          gclog_or_tty->print_cr("          deleted via region map.");
671
        }
672
        unlink_from_all(cur);
673
        PerRegionTable::free(cur);
674 675
      } else {
        // Do fine-grain elimination.
676
        if (G1RSScrubVerbose) {
677
          gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
678
        }
679
        cur->scrub(ctbs, card_bm);
680
        if (G1RSScrubVerbose) {
681
          gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
682
        }
683 684 685
        // Did that empty the table completely?
        if (cur->occupied() == 0) {
          *prev = nxt;
686
          cur->set_collision_list_next(NULL);
687
          _n_fine_entries--;
688
          unlink_from_all(cur);
689
          PerRegionTable::free(cur);
690
        } else {
691
          prev = cur->collision_list_next_addr();
692 693 694 695 696 697 698 699 700 701
        }
      }
      cur = nxt;
    }
  }
  // Since we may have deleted a from_card_cache entry from the RS, clear
  // the FCC.
  clear_fcc();
}

702 703 704 705 706 707 708 709 710 711 712 713
bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
  if (limit <= (size_t)G1RSetSparseRegionEntries) {
    return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
  } else {
    // Current uses of this method may only use values less than G1RSetSparseRegionEntries
    // for the limit. The solution, comparing against occupied() would be too slow
    // at this time.
    Unimplemented();
    return false;
  }
}

714 715 716
bool OtherRegionsTable::is_empty() const {
  return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
}
717 718 719 720 721 722 723 724 725 726

size_t OtherRegionsTable::occupied() const {
  size_t sum = occ_fine();
  sum += occ_sparse();
  sum += occ_coarse();
  return sum;
}

size_t OtherRegionsTable::occ_fine() const {
  size_t sum = 0;
727 728 729 730 731 732 733

  size_t num = 0;
  PerRegionTable * cur = _first_all_fine_prts;
  while (cur != NULL) {
    sum += cur->occupied();
    cur = cur->next();
    num++;
734
  }
735
  guarantee(num == _n_fine_entries, "just checking");
736 737 738 739
  return sum;
}

size_t OtherRegionsTable::occ_coarse() const {
740
  return (_n_coarse_entries * HeapRegion::CardsPerRegion);
741 742 743 744 745 746 747 748
}

size_t OtherRegionsTable::occ_sparse() const {
  return _sparse_table.occupied();
}

size_t OtherRegionsTable::mem_size() const {
  size_t sum = 0;
749 750 751 752 753
  // all PRTs are of the same size so it is sufficient to query only one of them.
  if (_first_all_fine_prts != NULL) {
    assert(_last_all_fine_prts != NULL &&
      _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
    sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
754
  }
755
  sum += (sizeof(PerRegionTable*) * _max_fine_entries);
756 757
  sum += (_coarse_map.size_in_words() * HeapWordSize);
  sum += (_sparse_table.mem_size());
758
  sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
759 760 761 762
  return sum;
}

size_t OtherRegionsTable::static_mem_size() {
763
  return FromCardCache::static_mem_size();
764 765 766
}

size_t OtherRegionsTable::fl_mem_size() {
767
  return PerRegionTable::fl_mem_size();
768 769 770
}

void OtherRegionsTable::clear_fcc() {
771
  FromCardCache::clear(hr()->hrm_index());
772 773 774
}

void OtherRegionsTable::clear() {
775 776 777 778 779 780 781
  // if there are no entries, skip this step
  if (_first_all_fine_prts != NULL) {
    guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
    PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
    memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
  } else {
    guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
782
  }
783 784

  _first_all_fine_prts = _last_all_fine_prts = NULL;
785 786 787 788 789 790 791 792 793 794 795
  _sparse_table.clear();
  _coarse_map.clear();
  _n_fine_entries = 0;
  _n_coarse_entries = 0;

  clear_fcc();
}

bool OtherRegionsTable::del_single_region_table(size_t ind,
                                                HeapRegion* hr) {
  assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
796 797
  PerRegionTable** prev_addr = &_fine_grain_regions[ind];
  PerRegionTable* prt = *prev_addr;
798
  while (prt != NULL && prt->hr() != hr) {
799 800
    prev_addr = prt->collision_list_next_addr();
    prt = prt->collision_list_next();
801 802 803
  }
  if (prt != NULL) {
    assert(prt->hr() == hr, "Loop postcondition.");
804 805
    *prev_addr = prt->collision_list_next();
    unlink_from_all(prt);
806
    PerRegionTable::free(prt);
807 808 809 810 811 812 813
    _n_fine_entries--;
    return true;
  } else {
    return false;
  }
}

814
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
815
  // Cast away const in this case.
816
  MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
817 818 819
  return contains_reference_locked(from);
}

820
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
821
  HeapRegion* hr = _g1h->heap_region_containing_raw(from);
822
  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
823 824 825
  // Is this region in the coarse map?
  if (_coarse_map.at(hr_ind)) return true;

826
  PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
827 828 829 830 831 832 833 834 835 836
                                     hr);
  if (prt != NULL) {
    return prt->contains_reference(from);

  } else {
    uintptr_t from_card =
      (uintptr_t(from) >> CardTableModRefBS::card_shift);
    uintptr_t hr_bot_card_index =
      uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
    assert(from_card >= hr_bot_card_index, "Inv");
837
    CardIdx_t card_index = from_card - hr_bot_card_index;
838
    assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
839
           "Must be in range.");
840
    return _sparse_table.contains_card(hr_ind, card_index);
841 842 843
  }
}

T
tonyp 已提交
844 845 846 847 848
void
OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  _sparse_table.do_cleanup_work(hrrs_cleanup_task);
}

849 850 851
// Determines how many threads can add records to an rset in parallel.
// This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads.
852
uint HeapRegionRemSet::num_par_rem_sets() {
853
  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
854 855 856 857
}

HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                   HeapRegion* hr)
858
  : _bosa(bosa),
859
    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
860
    _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
861 862
  reset_for_par_iteration();
}
863

864 865 866
void HeapRegionRemSet::setup_remset_size() {
  // Setup sparse and fine-grain tables sizes.
  // table_size = base * (log(region_size / 1M) + 1)
867 868
  const int LOG_M = 20;
  int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
869 870 871 872 873 874 875 876 877
  if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
    G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
  }
  if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
    G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
  }
  guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
}

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
bool HeapRegionRemSet::claim_iter() {
  if (_iter_state != Unclaimed) return false;
  jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
  return (res == Unclaimed);
}

void HeapRegionRemSet::set_iter_complete() {
  _iter_state = Complete;
}

bool HeapRegionRemSet::iter_is_complete() {
  return _iter_state == Complete;
}

#ifndef PRODUCT
893
void HeapRegionRemSet::print() {
894
  HeapRegionRemSetIterator iter(this);
895 896 897 898
  size_t card_index;
  while (iter.has_next(card_index)) {
    HeapWord* card_start =
      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
899
    gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
  }
  if (iter.n_yielded() != occupied()) {
    gclog_or_tty->print_cr("Yielded disagrees with occupied:");
    gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
                  iter.n_yielded(),
                  iter.n_yielded_coarse(), iter.n_yielded_fine());
    gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
                  occupied(), occ_coarse(), occ_fine());
  }
  guarantee(iter.n_yielded() == occupied(),
            "We should have yielded all the represented cards.");
}
#endif

void HeapRegionRemSet::cleanup() {
  SparsePRT::cleanup_all();
}

void HeapRegionRemSet::clear() {
919 920 921
  MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
  clear_locked();
}
J
johnc 已提交
922

923 924
void HeapRegionRemSet::clear_locked() {
  _code_roots.clear();
925
  _other_regions.clear();
926
  assert(occupied_locked() == 0, "Should be clear.");
927 928 929 930 931 932 933 934
  reset_for_par_iteration();
}

void HeapRegionRemSet::reset_for_par_iteration() {
  _iter_state = Unclaimed;
  _iter_claimed = 0;
  // It's good to check this to make sure that the two methods are in sync.
  assert(verify_ready_for_par_iteration(), "post-condition");
935 936 937 938 939 940 941
}

void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
                             BitMap* region_bm, BitMap* card_bm) {
  _other_regions.scrub(ctbs, region_bm, card_bm);
}

J
johnc 已提交
942
// Code roots support
943 944 945 946 947 948 949
//
// The code root set is protected by two separate locking schemes
// When at safepoint the per-hrrs lock must be held during modifications
// except when doing a full gc.
// When not at safepoint the CodeCache_lock must be held during modifications.
// When concurrent readers access the contains() function
// (during the evacuation phase) no removals are allowed.
J
johnc 已提交
950 951

void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
952 953 954 955 956 957 958 959 960
  assert(nm != NULL, "sanity");
  // Optimistic unlocked contains-check
  if (!_code_roots.contains(nm)) {
    MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
    add_strong_code_root_locked(nm);
  }
}

void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
J
johnc 已提交
961
  assert(nm != NULL, "sanity");
962
  _code_roots.add(nm);
J
johnc 已提交
963 964 965 966
}

void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
  assert(nm != NULL, "sanity");
967 968
  assert_locked_or_safepoint(CodeCache_lock);

969 970
  MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
  _code_roots.remove(nm);
971

J
johnc 已提交
972
  // Check that there were no duplicates
973
  guarantee(!_code_roots.contains(nm), "duplicate entry found");
J
johnc 已提交
974 975 976
}

void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
977
  _code_roots.nmethods_do(blk);
J
johnc 已提交
978 979
}

980 981 982 983
void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
  _code_roots.clean(hr);
}

J
johnc 已提交
984
size_t HeapRegionRemSet::strong_code_roots_mem_size() {
985
  return _code_roots.mem_size();
J
johnc 已提交
986 987
}

988
HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
989
  _hrrs(hrrs),
990
  _g1h(G1CollectedHeap::heap()),
991 992 993
  _coarse_map(&hrrs->_other_regions._coarse_map),
  _bosa(hrrs->bosa()),
  _is(Sparse),
994
  // Set these values so that we increment to the first region.
995 996
  _coarse_cur_region_index(-1),
  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
997
  _cur_card_in_prt(HeapRegion::CardsPerRegion),
998 999 1000 1001 1002
  _fine_cur_prt(NULL),
  _n_yielded_coarse(0),
  _n_yielded_fine(0),
  _n_yielded_sparse(0),
  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1003 1004 1005 1006 1007 1008

bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
  if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
  // Go to the next card.
  _coarse_cur_region_cur_card++;
  // Was the last the last card in the current region?
1009
  if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1010 1011 1012 1013 1014 1015 1016 1017
    // Yes: find the next region.  This may leave _coarse_cur_region_index
    // Set to the last index, in which case there are no more coarse
    // regions.
    _coarse_cur_region_index =
      (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
    if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
      _coarse_cur_region_cur_card = 0;
      HeapWord* r_bot =
1018
        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
      _cur_region_card_offset = _bosa->index_for(r_bot);
    } else {
      return false;
    }
  }
  // If we didn't return false above, then we can yield a card.
  card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
  return true;
}

bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
  if (fine_has_next()) {
1031 1032
    _cur_card_in_prt =
      _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1033
  }
1034 1035 1036 1037 1038
  if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
    // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
    // the remembered set.
    if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
      return false;
1039
    }
1040 1041 1042
    PerRegionTable* next_prt = _fine_cur_prt->next();
    switch_to_prt(next_prt);
    _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1043
  }
1044 1045 1046 1047

  card_index = _cur_region_card_offset + _cur_card_in_prt;
  guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
            err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt));
1048 1049 1050 1051
  return true;
}

bool HeapRegionRemSetIterator::fine_has_next() {
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
  return _cur_card_in_prt != HeapRegion::CardsPerRegion;
}

void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
  assert(prt != NULL, "Cannot switch to NULL prt");
  _fine_cur_prt = prt;

  HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
  _cur_region_card_offset = _bosa->index_for(r_bot);

  // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
  // To avoid special-casing this start case, and not miss the first bitmap
  // entry, initialize _cur_region_cur_card with -1 instead of 0.
  _cur_card_in_prt = (size_t)-1;
1066 1067 1068 1069
}

bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
  switch (_is) {
1070
  case Sparse: {
1071 1072 1073 1074 1075 1076
    if (_sparse_iter.has_next(card_index)) {
      _n_yielded_sparse++;
      return true;
    }
    // Otherwise, deliberate fall-through
    _is = Fine;
1077 1078 1079 1080 1081
    PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
    if (initial_fine_prt != NULL) {
      switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
    }
  }
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
  case Fine:
    if (fine_has_next(card_index)) {
      _n_yielded_fine++;
      return true;
    }
    // Otherwise, deliberate fall-through
    _is = Coarse;
  case Coarse:
    if (coarse_has_next(card_index)) {
      _n_yielded_coarse++;
      return true;
    }
    // Otherwise...
    break;
  }
  assert(ParallelGCThreads > 1 ||
         n_yielded() == _hrrs->occupied(),
         "Should have yielded all the cards in the rem set "
         "(in the non-par case).");
  return false;
}



1106 1107 1108 1109
OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
HeapWord**          HeapRegionRemSet::_recorded_cards = NULL;
HeapRegion**        HeapRegionRemSet::_recorded_regions = NULL;
int                 HeapRegionRemSet::_n_recorded = 0;
1110 1111 1112 1113 1114

HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
int*         HeapRegionRemSet::_recorded_event_index = NULL;
int          HeapRegionRemSet::_n_recorded_events = 0;

1115
void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1116 1117 1118 1119 1120
  if (_recorded_oops == NULL) {
    assert(_n_recorded == 0
           && _recorded_cards == NULL
           && _recorded_regions == NULL,
           "Inv");
Z
zgu 已提交
1121 1122 1123
    _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
    _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded, mtGC);
    _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded, mtGC);
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
  }
  if (_n_recorded == MaxRecorded) {
    gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
  } else {
    _recorded_cards[_n_recorded] =
      (HeapWord*)align_size_down(uintptr_t(f),
                                 CardTableModRefBS::card_size);
    _recorded_oops[_n_recorded] = f;
    _recorded_regions[_n_recorded] = hr;
    _n_recorded++;
  }
}

void HeapRegionRemSet::record_event(Event evnt) {
  if (!G1RecordHRRSEvents) return;

  if (_recorded_events == NULL) {
    assert(_n_recorded_events == 0
           && _recorded_event_index == NULL,
           "Inv");
Z
zgu 已提交
1144 1145
    _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
    _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
  }
  if (_n_recorded_events == MaxRecordedEvents) {
    gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
  } else {
    _recorded_events[_n_recorded_events] = evnt;
    _recorded_event_index[_n_recorded_events] = _n_recorded;
    _n_recorded_events++;
  }
}

void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
  switch (evnt) {
  case Event_EvacStart:
    str->print("Evac Start");
    break;
  case Event_EvacEnd:
    str->print("Evac End");
    break;
  case Event_RSUpdateEnd:
    str->print("RS Update End");
    break;
  }
}

void HeapRegionRemSet::print_recorded() {
  int cur_evnt = 0;
1172
  Event cur_evnt_kind = Event_illegal;
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
  int cur_evnt_ind = 0;
  if (_n_recorded_events > 0) {
    cur_evnt_kind = _recorded_events[cur_evnt];
    cur_evnt_ind = _recorded_event_index[cur_evnt];
  }

  for (int i = 0; i < _n_recorded; i++) {
    while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
      gclog_or_tty->print("Event: ");
      print_event(gclog_or_tty, cur_evnt_kind);
1183
      gclog_or_tty->cr();
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
      cur_evnt++;
      if (cur_evnt < MaxRecordedEvents) {
        cur_evnt_kind = _recorded_events[cur_evnt];
        cur_evnt_ind = _recorded_event_index[cur_evnt];
      }
    }
    gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
                        " for ref " PTR_FORMAT ".\n",
                        _recorded_cards[i], _recorded_regions[i]->bottom(),
                        _recorded_oops[i]);
  }
}

T
tonyp 已提交
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
void HeapRegionRemSet::reset_for_cleanup_tasks() {
  SparsePRT::reset_for_cleanup_tasks();
}

void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  _other_regions.do_cleanup_work(hrrs_cleanup_task);
}

void
HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
  SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
}

1210
#ifndef PRODUCT
1211 1212
void PerRegionTable::test_fl_mem_size() {
  PerRegionTable* dummy = alloc(NULL);
1213 1214 1215 1216 1217

  size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
  assert(dummy->mem_size() > min_prt_size,
         err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. "
                 "Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size));
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
  free(dummy);
  guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
  // try to reset the state
  _free_list = NULL;
  delete dummy;
}

void HeapRegionRemSet::test_prt() {
  PerRegionTable::test_fl_mem_size();
}

1229 1230 1231 1232
void HeapRegionRemSet::test() {
  os::sleep(Thread::current(), (jlong)5000, false);
  G1CollectedHeap* g1h = G1CollectedHeap::heap();

1233
  // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
  // hash bucket.
  HeapRegion* hr0 = g1h->region_at(0);
  HeapRegion* hr1 = g1h->region_at(1);
  HeapRegion* hr2 = g1h->region_at(5);
  HeapRegion* hr3 = g1h->region_at(6);
  HeapRegion* hr4 = g1h->region_at(7);
  HeapRegion* hr5 = g1h->region_at(8);

  HeapWord* hr1_start = hr1->bottom();
  HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
  HeapWord* hr1_last = hr1->end() - 1;

  HeapWord* hr2_start = hr2->bottom();
  HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
  HeapWord* hr2_last = hr2->end() - 1;

  HeapWord* hr3_start = hr3->bottom();
  HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
  HeapWord* hr3_last = hr3->end() - 1;

  HeapRegionRemSet* hrrs = hr0->rem_set();

  // Make three references from region 0x101...
1257 1258 1259
  hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1260

1261 1262 1263
  hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1264

1265 1266 1267
  hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
  hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
  hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1268 1269

  // Now cause a coarsening.
1270 1271
  hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
  hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1272 1273

  // Now, does iteration yield these three?
1274
  HeapRegionRemSetIterator iter(hrrs);
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
  size_t sum = 0;
  size_t card_index;
  while (iter.has_next(card_index)) {
    HeapWord* card_start =
      G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
    gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
    sum++;
  }
  guarantee(sum == 11 - 3 + 2048, "Failure");
  guarantee(sum == hrrs->occupied(), "Failure");
}
#endif