heapRegionRemSet.hpp 15.0 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28 29
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP

#include "gc_implementation/g1/sparsePRT.hpp"

30 31 32 33 34 35 36 37
// Remembered set for a heap region.  Represent a set of "cards" that
// contain pointers into the owner heap region.  Cards are defined somewhat
// abstractly, in terms of what the "BlockOffsetTable" in use can parse.

class G1CollectedHeap;
class G1BlockOffsetSharedArray;
class HeapRegion;
class HeapRegionRemSetIterator;
38
class PerRegionTable;
39
class SparsePRT;
J
johnc 已提交
40
class nmethod;
41

T
tonyp 已提交
42 43 44 45
// Essentially a wrapper around SparsePRTCleanupTask. See
// sparsePRT.hpp for more details.
class HRRSCleanupTask : public SparsePRTCleanupTask {
};
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

// The "_coarse_map" is a bitmap with one bit for each region, where set
// bits indicate that the corresponding region may contain some pointer
// into the owning region.

// The "_fine_grain_entries" array is an open hash table of PerRegionTables
// (PRTs), indicating regions for which we're keeping the RS as a set of
// cards.  The strategy is to cap the size of the fine-grain table,
// deleting an entry and setting the corresponding coarse-grained bit when
// we would overflow this cap.

// We use a mixture of locking and lock-free techniques here.  We allow
// threads to locate PRTs without locking, but threads attempting to alter
// a bucket list obtain a lock.  This means that any failing attempt to
// find a PRT must be retried with the lock.  It might seem dangerous that
// a read can find a PRT that is concurrently deleted.  This is all right,
// because:
//
//   1) We only actually free PRT's at safe points (though we reuse them at
//      other times).
//   2) We find PRT's in an attempt to add entries.  If a PRT is deleted,
//      it's _coarse_map bit is set, so the that we were attempting to add
//      is represented.  If a deleted PRT is re-used, a thread adding a bit,
//      thinking the PRT is for a different region, does no harm.

71
class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
72 73 74 75 76 77 78 79 80 81 82
  friend class HeapRegionRemSetIterator;

  G1CollectedHeap* _g1h;
  Mutex            _m;
  HeapRegion*      _hr;

  // These are protected by "_m".
  BitMap      _coarse_map;
  size_t      _n_coarse_entries;
  static jint _n_coarsenings;

83 84
  PerRegionTable** _fine_grain_regions;
  size_t           _n_fine_entries;
85

86 87 88 89 90 91 92 93
  // The fine grain remembered sets are doubly linked together using
  // their 'next' and 'prev' fields.
  // This allows fast bulk freeing of all the fine grain remembered
  // set entries, and fast finding of all of them without iterating
  // over the _fine_grain_regions table.
  PerRegionTable * _first_all_fine_prts;
  PerRegionTable * _last_all_fine_prts;

94 95
  // Used to sample a subset of the fine grain PRTs to determine which
  // PRT to evict and coarsen.
96 97 98 99 100 101 102 103 104 105 106 107 108
  size_t        _fine_eviction_start;
  static size_t _fine_eviction_stride;
  static size_t _fine_eviction_sample_size;

  SparsePRT   _sparse_table;

  // These are static after init.
  static size_t _max_fine_entries;
  static size_t _mod_max_fine_entries_mask;

  // Requires "prt" to be the first element of the bucket list appropriate
  // for "hr".  If this list contains an entry for "hr", return it,
  // otherwise return "NULL".
109
  PerRegionTable* find_region_table(size_t ind, HeapRegion* hr) const;
110

111
  // Find, delete, and return a candidate PerRegionTable, if any exists,
112 113
  // adding the deleted region to the coarse bitmap.  Requires the caller
  // to hold _m, and the fine-grain table to be full.
114
  PerRegionTable* delete_region_table();
115 116 117 118 119 120 121 122 123 124 125

  // If a PRT for "hr" is in the bucket list indicated by "ind" (which must
  // be the correct index for "hr"), delete it and return true; else return
  // false.
  bool del_single_region_table(size_t ind, HeapRegion* hr);

  // Indexed by thread X heap region, to minimize thread contention.
  static int** _from_card_cache;
  static size_t _from_card_cache_max_regions;
  static size_t _from_card_cache_mem_size;

126 127 128 129 130
  // link/add the given fine grain remembered set into the "all" list
  void link_to_all(PerRegionTable * prt);
  // unlink/remove the given fine grain remembered set into the "all" list
  void unlink_from_all(PerRegionTable * prt);

131 132 133 134 135 136 137
public:
  OtherRegionsTable(HeapRegion* hr);

  HeapRegion* hr() const { return _hr; }

  // For now.  Could "expand" some tables in the future, so that this made
  // sense.
138
  void add_reference(OopOrNarrowOopStar from, int tid);
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

  // Removes any entries shown by the given bitmaps to contain only dead
  // objects.
  void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);

  // Not const because it takes a lock.
  size_t occupied() const;
  size_t occ_fine() const;
  size_t occ_coarse() const;
  size_t occ_sparse() const;

  static jint n_coarsenings() { return _n_coarsenings; }

  // Returns size in bytes.
  // Not const because it takes a lock.
  size_t mem_size() const;
  static size_t static_mem_size();
  static size_t fl_mem_size();

158 159
  bool contains_reference(OopOrNarrowOopStar from) const;
  bool contains_reference_locked(OopOrNarrowOopStar from) const;
160 161 162 163 164 165 166 167 168

  void clear();

  // Specifically clear the from_card_cache.
  void clear_fcc();

  // "from_hr" is being cleared; remove any entries from it.
  void clear_incoming_entry(HeapRegion* from_hr);

T
tonyp 已提交
169 170
  void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);

171 172 173 174 175 176 177 178 179 180 181
  // Declare the heap size (in # of regions) to the OtherRegionsTable.
  // (Uses it to initialize from_card_cache).
  static void init_from_card_cache(size_t max_regions);

  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
  // Make sure any entries for higher regions are invalid.
  static void shrink_from_card_cache(size_t new_n_regs);

  static void print_from_card_cache();
};

Z
zgu 已提交
182
class HeapRegionRemSet : public CHeapObj<mtGC> {
183 184 185 186 187 188 189 190 191 192 193 194
  friend class VMStructs;
  friend class HeapRegionRemSetIterator;

public:
  enum Event {
    Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
  };

private:
  G1BlockOffsetSharedArray* _bosa;
  G1BlockOffsetSharedArray* bosa() const { return _bosa; }

J
johnc 已提交
195 196 197 198
  // A list of code blobs (nmethods) whose code contains pointers into
  // the region that owns this RSet.
  GrowableArray<nmethod*>* _strong_code_roots_list;

199 200 201
  OtherRegionsTable _other_regions;

  enum ParIterState { Unclaimed, Claimed, Complete };
202 203
  volatile ParIterState _iter_state;
  volatile jlong _iter_claimed;
204 205 206 207

  // Unused unless G1RecordHRRSOops is true.

  static const int MaxRecorded = 1000000;
208 209 210 211
  static OopOrNarrowOopStar* _recorded_oops;
  static HeapWord**          _recorded_cards;
  static HeapRegion**        _recorded_regions;
  static int                 _n_recorded;
212 213 214 215 216 217 218 219 220 221 222 223 224

  static const int MaxRecordedEvents = 1000;
  static Event*       _recorded_events;
  static int*         _recorded_event_index;
  static int          _n_recorded_events;

  static void print_event(outputStream* str, Event evnt);

public:
  HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                   HeapRegion* hr);

  static int num_par_rem_sets();
225
  static void setup_remset_size();
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

  HeapRegion* hr() const {
    return _other_regions.hr();
  }

  size_t occupied() const {
    return _other_regions.occupied();
  }
  size_t occ_fine() const {
    return _other_regions.occ_fine();
  }
  size_t occ_coarse() const {
    return _other_regions.occ_coarse();
  }
  size_t occ_sparse() const {
    return _other_regions.occ_sparse();
  }

  static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }

246
  // Used in the sequential case.
247
  void add_reference(OopOrNarrowOopStar from) {
248
    _other_regions.add_reference(from, 0);
249 250
  }

251
  // Used in the parallel case.
252
  void add_reference(OopOrNarrowOopStar from, int tid) {
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
    _other_regions.add_reference(from, tid);
  }

  // Removes any entries shown by the given bitmaps to contain only dead
  // objects.
  void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);

  // The region is being reclaimed; clear its remset, and any mention of
  // entries for this region in other remsets.
  void clear();

  // Attempt to claim the region.  Returns true iff this call caused an
  // atomic transition from Unclaimed to Claimed.
  bool claim_iter();
  // Sets the iteration state to "complete".
  void set_iter_complete();
  // Returns "true" iff the region's iteration is complete.
  bool iter_is_complete();

272 273 274 275 276 277 278 279 280 281 282
  // Support for claiming blocks of cards during iteration
  size_t iter_claimed() const { return (size_t)_iter_claimed; }
  // Claim the next block of cards
  size_t iter_claimed_next(size_t step) {
    size_t current, next;
    do {
      current = iter_claimed();
      next = current + step;
    } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
    return current;
  }
283 284 285 286 287
  void reset_for_par_iteration();

  bool verify_ready_for_par_iteration() {
    return (_iter_state == Unclaimed) && (_iter_claimed == 0);
  }
288

289
  // The actual # of bytes this hr_remset takes up.
J
johnc 已提交
290
  // Note also includes the strong code root set.
291 292 293 294
  size_t mem_size() {
    return _other_regions.mem_size()
      // This correction is necessary because the above includes the second
      // part.
J
johnc 已提交
295 296
      + (sizeof(this) - sizeof(OtherRegionsTable))
      + strong_code_roots_mem_size();
297 298 299 300 301 302 303 304 305 306 307 308 309 310
  }

  // Returns the memory occupancy of all static data structures associated
  // with remembered sets.
  static size_t static_mem_size() {
    return OtherRegionsTable::static_mem_size();
  }

  // Returns the memory occupancy of all free_list data structures associated
  // with remembered sets.
  static size_t fl_mem_size() {
    return OtherRegionsTable::fl_mem_size();
  }

311
  bool contains_reference(OopOrNarrowOopStar from) const {
312 313
    return _other_regions.contains_reference(from);
  }
J
johnc 已提交
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

  // Routines for managing the list of code roots that point into
  // the heap region that owns this RSet.
  void add_strong_code_root(nmethod* nm);
  void remove_strong_code_root(nmethod* nm);

  // During a collection, migrate the successfully evacuated strong
  // code roots that referenced into the region that owns this RSet
  // to the RSets of the new regions that they now point into.
  // Unsuccessfully evacuated code roots are not migrated.
  void migrate_strong_code_roots();

  // Applies blk->do_code_blob() to each of the entries in
  // the strong code roots list
  void strong_code_roots_do(CodeBlobClosure* blk) const;

  // Returns the number of elements in the strong code roots list
  int strong_code_roots_list_length() {
    return _strong_code_roots_list->length();
  }

  // Returns true if the strong code roots contains the given
  // nmethod.
  bool strong_code_roots_list_contains(nmethod* nm) {
    return _strong_code_roots_list->contains(nm);
  }

  // Returns the amount of memory, in bytes, currently
  // consumed by the strong code roots.
  size_t strong_code_roots_mem_size();

345 346 347 348 349 350 351
  void print() const;

  // Called during a stop-world phase to perform any deferred cleanups.
  static void cleanup();

  // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
  // (Uses it to initialize from_card_cache).
352 353
  static void init_heap(uint max_regions) {
    OtherRegionsTable::init_from_card_cache((size_t) max_regions);
354 355 356
  }

  // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
357 358
  static void shrink_heap(uint new_n_regs) {
    OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
359 360 361 362 363 364 365 366
  }

#ifndef PRODUCT
  static void print_from_card_cache() {
    OtherRegionsTable::print_from_card_cache();
  }
#endif

367
  static void record(HeapRegion* hr, OopOrNarrowOopStar f);
368 369 370
  static void print_recorded();
  static void record_event(Event evnt);

T
tonyp 已提交
371 372 373 374 375 376
  // These are wrappers for the similarly-named methods on
  // SparsePRT. Look at sparsePRT.hpp for more details.
  static void reset_for_cleanup_tasks();
  void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
  static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task);

377 378
  // Run unit tests.
#ifndef PRODUCT
379
  static void test_prt();
380 381 382 383
  static void test();
#endif
};

384
class HeapRegionRemSetIterator : public StackObj {
385

386
  // The region RSet over which we're iterating.
387 388 389 390
  const HeapRegionRemSet* _hrrs;

  // Local caching of HRRS fields.
  const BitMap*             _coarse_map;
391
  PerRegionTable**          _fine_grain_regions;
392 393 394 395 396 397 398 399 400

  G1BlockOffsetSharedArray* _bosa;
  G1CollectedHeap*          _g1h;

  // The number yielded since initialization.
  size_t _n_yielded_fine;
  size_t _n_yielded_coarse;
  size_t _n_yielded_sparse;

401 402 403 404
  // Indicates what granularity of table that we're currently iterating over.
  // We start iterating over the sparse table, progress to the fine grain
  // table, and then finish with the coarse table.
  // See HeapRegionRemSetIterator::has_next().
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
  enum IterState {
    Sparse,
    Fine,
    Coarse
  };
  IterState _is;

  // In both kinds of iteration, heap offset of first card of current
  // region.
  size_t _cur_region_card_offset;
  // Card offset within cur region.
  size_t _cur_region_cur_card;

  // Coarse table iteration fields:

  // Current region index;
421 422
  int    _coarse_cur_region_index;
  size_t _coarse_cur_region_cur_card;
423 424 425 426 427 428 429

  bool coarse_has_next(size_t& card_index);

  // Fine table iteration fields:

  // Index of bucket-list we're working on.
  int _fine_array_index;
430

431
  // Per Region Table we're doing within current bucket list.
432
  PerRegionTable* _fine_cur_prt;
433 434 435 436 437 438 439 440 441 442 443

  /* SparsePRT::*/ SparsePRTIter _sparse_iter;

  void fine_find_next_non_null_prt();

  bool fine_has_next();
  bool fine_has_next(size_t& card_index);

public:
  // We require an iterator to be initialized before use, so the
  // constructor does little.
444
  HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459

  // If there remains one or more cards to be yielded, returns true and
  // sets "card_index" to one of those cards (which is then considered
  // yielded.)   Otherwise, returns false (and leaves "card_index"
  // undefined.)
  bool has_next(size_t& card_index);

  size_t n_yielded_fine() { return _n_yielded_fine; }
  size_t n_yielded_coarse() { return _n_yielded_coarse; }
  size_t n_yielded_sparse() { return _n_yielded_sparse; }
  size_t n_yielded() {
    return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse();
  }
};

460
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP