g1CollectedHeap.hpp 77.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
22 23 24
 *
 */

25 26 27 28
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP

#include "gc_implementation/g1/concurrentMark.hpp"
29
#include "gc_implementation/g1/g1AllocRegion.hpp"
30
#include "gc_implementation/g1/g1HRPrinter.hpp"
31
#include "gc_implementation/g1/g1RemSet.hpp"
32
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
33
#include "gc_implementation/g1/heapRegionSeq.hpp"
34
#include "gc_implementation/g1/heapRegionSets.hpp"
35
#include "gc_implementation/shared/hSpaceCounters.hpp"
36 37 38 39 40
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
#include "memory/barrierSet.hpp"
#include "memory/memRegion.hpp"
#include "memory/sharedHeap.hpp"

41 42 43 44 45 46
// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
// It uses the "Garbage First" heap organization and algorithm, which
// may combine concurrent marking with parallel, incremental compaction of
// heap subsets that will yield large amounts of garbage.

class HeapRegion;
T
tonyp 已提交
47
class HRRSCleanupTask;
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
class PermanentGenerationSpec;
class GenerationSpec;
class OopsInHeapRegionClosure;
class G1ScanHeapEvacClosure;
class ObjectClosure;
class SpaceClosure;
class CompactibleSpaceClosure;
class Space;
class G1CollectorPolicy;
class GenRemSet;
class G1RemSet;
class HeapRegionRemSetIterator;
class ConcurrentMark;
class ConcurrentMarkThread;
class ConcurrentG1Refine;
63
class GenerationCounters;
64

65
typedef OverflowTaskQueue<StarTask>         RefToScanQueue;
66
typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
67

68 69 70
typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )

71 72 73 74 75 76 77 78 79 80 81 82
enum GCAllocPurpose {
  GCAllocForTenured,
  GCAllocForSurvived,
  GCAllocPurposeCount
};

class YoungList : public CHeapObj {
private:
  G1CollectedHeap* _g1h;

  HeapRegion* _head;

83 84
  HeapRegion* _survivor_head;
  HeapRegion* _survivor_tail;
85 86 87

  HeapRegion* _curr;

88
  size_t      _length;
89 90
  size_t      _survivor_length;

91 92 93 94
  size_t      _last_sampled_rs_lengths;
  size_t      _sampled_rs_lengths;

  void         empty_list(HeapRegion* list);
95 96 97 98

public:
  YoungList(G1CollectedHeap* g1h);

99 100 101 102 103 104 105
  void         push_region(HeapRegion* hr);
  void         add_survivor_region(HeapRegion* hr);

  void         empty_list();
  bool         is_empty() { return _length == 0; }
  size_t       length() { return _length; }
  size_t       survivor_length() { return _survivor_length; }
106

107 108 109 110 111 112 113 114 115 116 117 118 119
  // Currently we do not keep track of the used byte sum for the
  // young list and the survivors and it'd be quite a lot of work to
  // do so. When we'll eventually replace the young list with
  // instances of HeapRegionLinkedList we'll get that for free. So,
  // we'll report the more accurate information then.
  size_t       eden_used_bytes() {
    assert(length() >= survivor_length(), "invariant");
    return (length() - survivor_length()) * HeapRegion::GrainBytes;
  }
  size_t       survivor_used_bytes() {
    return survivor_length() * HeapRegion::GrainBytes;
  }

120 121 122 123 124 125 126 127 128 129 130
  void rs_length_sampling_init();
  bool rs_length_sampling_more();
  void rs_length_sampling_next();

  void reset_sampled_info() {
    _last_sampled_rs_lengths =   0;
  }
  size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }

  // for development purposes
  void reset_auxilary_lists();
131 132 133 134 135 136 137 138
  void clear() { _head = NULL; _length = 0; }

  void clear_survivors() {
    _survivor_head    = NULL;
    _survivor_tail    = NULL;
    _survivor_length  = 0;
  }

139 140
  HeapRegion* first_region() { return _head; }
  HeapRegion* first_survivor_region() { return _survivor_head; }
141
  HeapRegion* last_survivor_region() { return _survivor_tail; }
142 143 144

  // debugging
  bool          check_list_well_formed();
145
  bool          check_list_empty(bool check_sample = true);
146 147 148
  void          print();
};

149 150 151 152 153 154 155 156 157
class MutatorAllocRegion : public G1AllocRegion {
protected:
  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
  MutatorAllocRegion()
    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};

158 159 160 161 162 163 164 165 166 167 168 169 170
// The G1 STW is alive closure.
// An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW
// reference processor. It is also extensively used during
// refence processing during STW evacuation pauses.
class G1STWIsAliveClosure: public BoolObjectClosure {
  G1CollectedHeap* _g1;
public:
  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
  void do_object(oop p) { assert(false, "Do not call."); }
  bool do_object_b(oop p);
};

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
  SurvivorGCAllocRegion()
  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
};

class OldGCAllocRegion : public G1AllocRegion {
protected:
  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
  OldGCAllocRegion()
  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
};

189
class RefineCardTableEntryClosure;
190

191 192 193 194 195 196
class G1CollectedHeap : public SharedHeap {
  friend class VM_G1CollectForAllocation;
  friend class VM_GenCollectForPermanentAllocation;
  friend class VM_G1CollectFull;
  friend class VM_G1IncCollectionPause;
  friend class VMStructs;
197
  friend class MutatorAllocRegion;
198 199
  friend class SurvivorGCAllocRegion;
  friend class OldGCAllocRegion;
200 201 202 203 204 205 206 207 208 209 210 211 212

  // Closures used in implementation.
  friend class G1ParCopyHelper;
  friend class G1IsAliveClosure;
  friend class G1EvacuateFollowersClosure;
  friend class G1ParScanThreadState;
  friend class G1ParScanClosureSuper;
  friend class G1ParEvacuateFollowersClosure;
  friend class G1ParTask;
  friend class G1FreeGarbageRegionClosure;
  friend class RefineCardTableEntryClosure;
  friend class G1PrepareCompactClosure;
  friend class RegionSorter;
213
  friend class RegionResetter;
214 215
  friend class CountRCClosure;
  friend class EvacPopObjClosure;
216
  friend class G1ParCleanupCTTask;
217 218 219 220 221 222 223 224

  // Other related classes.
  friend class G1MarkSweep;

private:
  // The one and only G1CollectedHeap, so static functions can find it.
  static G1CollectedHeap* _g1h;

225 226
  static size_t _humongous_object_threshold_in_words;

227 228 229 230 231 232 233
  // Storage for the G1 heap (excludes the permanent generation).
  VirtualSpace _g1_storage;
  MemRegion    _g1_reserved;

  // The part of _g1_storage that is currently committed.
  MemRegion _g1_committed;

234 235 236 237 238 239 240 241
  // The master free list. It will satisfy all new region allocations.
  MasterFreeRegionList      _free_list;

  // The secondary free list which contains regions that have been
  // freed up during the cleanup process. This will be appended to the
  // master free list when appropriate.
  SecondaryFreeRegionList   _secondary_free_list;

T
tonyp 已提交
242 243 244
  // It keeps track of the old regions.
  MasterOldRegionSet        _old_set;

245 246
  // It keeps track of the humongous regions.
  MasterHumongousRegionSet  _humongous_set;
247 248 249 250 251 252 253

  // The number of regions we could create by expansion.
  size_t _expansion_regions;

  // The block offset table for the G1 heap.
  G1BlockOffsetSharedArray* _bot_shared;

T
tonyp 已提交
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
  // Tears down the region sets / lists so that they are empty and the
  // regions on the heap do not belong to a region set / list. The
  // only exception is the humongous set which we leave unaltered. If
  // free_list_only is true, it will only tear down the master free
  // list. It is called before a Full GC (free_list_only == false) or
  // before heap shrinking (free_list_only == true).
  void tear_down_region_sets(bool free_list_only);

  // Rebuilds the region sets / lists so that they are repopulated to
  // reflect the contents of the heap. The only exception is the
  // humongous set which was not torn down in the first place. If
  // free_list_only is true, it will only rebuild the master free
  // list. It is called after a Full GC (free_list_only == false) or
  // after heap shrinking (free_list_only == true).
  void rebuild_region_sets(bool free_list_only);
269 270

  // The sequence of all heap regions in the heap.
271
  HeapRegionSeq _hrs;
272

273 274 275
  // Alloc region used to satisfy mutator allocation requests.
  MutatorAllocRegion _mutator_alloc_region;

276 277 278 279 280 281 282 283 284 285 286 287
  // Alloc region used to satisfy allocation requests by the GC for
  // survivor objects.
  SurvivorGCAllocRegion _survivor_gc_alloc_region;

  // Alloc region used to satisfy allocation requests by the GC for
  // old objects.
  OldGCAllocRegion _old_gc_alloc_region;

  // The last old region we allocated to during the last GC.
  // Typically, it is not full so we should re-use it during the next GC.
  HeapRegion* _retained_old_gc_alloc_region;

288 289 290 291 292
  // It resets the mutator alloc region before new allocations can take place.
  void init_mutator_alloc_region();

  // It releases the mutator alloc region.
  void release_mutator_alloc_region();
293

294 295
  // It initializes the GC alloc regions at the start of a GC.
  void init_gc_alloc_regions();
296

297 298 299 300 301 302
  // It releases the GC alloc regions at the end of a GC.
  void release_gc_alloc_regions();

  // It does any cleanup that needs to be done on the GC alloc regions
  // before a Full GC.
  void abandon_gc_alloc_regions();
303

304 305 306
  // Helper for monitoring and management support.
  G1MonitoringSupport* _g1mm;

307 308 309
  // Determines PLAB size for a particular allocation purpose.
  static size_t desired_plab_sz(GCAllocPurpose purpose);

310 311 312 313
  // Outside of GC pauses, the number of bytes used in all regions other
  // than the current allocation region.
  size_t _summary_bytes_used;

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
  // This is used for a quick test on whether a reference points into
  // the collection set or not. Basically, we have an array, with one
  // byte per region, and that byte denotes whether the corresponding
  // region is in the collection set or not. The entry corresponding
  // the bottom of the heap, i.e., region 0, is pointed to by
  // _in_cset_fast_test_base.  The _in_cset_fast_test field has been
  // biased so that it actually points to address 0 of the address
  // space, to make the test as fast as possible (we can simply shift
  // the address to address into it, instead of having to subtract the
  // bottom of the heap from the address before shifting it; basically
  // it works in the same way the card table works).
  bool* _in_cset_fast_test;

  // The allocated array used for the fast test on whether a reference
  // points into the collection set or not. This field is also used to
  // free the array.
  bool* _in_cset_fast_test_base;

  // The length of the _in_cset_fast_test_base array.
  size_t _in_cset_fast_test_length;

335
  volatile unsigned _gc_time_stamp;
336 337 338

  size_t* _surviving_young_words;

339 340
  G1HRPrinter _hr_printer;

341 342 343 344
  void setup_surviving_young_words();
  void update_surviving_young_words(size_t* surv_young_words);
  void cleanup_surviving_young_words();

345 346 347 348 349 350 351 352 353 354 355 356
  // It decides whether an explicit GC should start a concurrent cycle
  // instead of doing a STW GC. Currently, a concurrent cycle is
  // explicitly started if:
  // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
  // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
  bool should_do_concurrent_full_gc(GCCause::Cause cause);

  // Keeps track of how many "full collections" (i.e., Full GCs or
  // concurrent cycles) we have completed. The number of them we have
  // started is maintained in _total_full_collections in CollectedHeap.
  volatile unsigned int _full_collections_completed;

357 358 359 360 361 362 363 364
  // This is a non-product method that is helpful for testing. It is
  // called at the end of a GC and artificially expands the heap by
  // allocating a number of dead regions. This way we can induce very
  // frequent marking cycles and stress the cleanup / concurrent
  // cleanup code more (as all the regions that will be allocated by
  // this method will be found dead by the marking cycle).
  void allocate_dummy_regions() PRODUCT_RETURN;

365 366 367
  // These are macros so that, if the assert fires, we get the correct
  // line number, file, etc.

T
tonyp 已提交
368
#define heap_locking_asserts_err_msg(_extra_message_)                         \
369
  err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
T
tonyp 已提交
370
          (_extra_message_),                                                  \
371 372 373
          BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
          BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
          BOOL_TO_STR(Thread::current()->is_VM_thread()))
374 375 376 377 378 379 380

#define assert_heap_locked()                                                  \
  do {                                                                        \
    assert(Heap_lock->owned_by_self(),                                        \
           heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
  } while (0)

T
tonyp 已提交
381
#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
382 383
  do {                                                                        \
    assert(Heap_lock->owned_by_self() ||                                      \
384
           (SafepointSynchronize::is_at_safepoint() &&                        \
T
tonyp 已提交
385
             ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
           heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
                                        "should be at a safepoint"));         \
  } while (0)

#define assert_heap_locked_and_not_at_safepoint()                             \
  do {                                                                        \
    assert(Heap_lock->owned_by_self() &&                                      \
                                    !SafepointSynchronize::is_at_safepoint(), \
          heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
                                       "should not be at a safepoint"));      \
  } while (0)

#define assert_heap_not_locked()                                              \
  do {                                                                        \
    assert(!Heap_lock->owned_by_self(),                                       \
        heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
  } while (0)

#define assert_heap_not_locked_and_not_at_safepoint()                         \
  do {                                                                        \
    assert(!Heap_lock->owned_by_self() &&                                     \
                                    !SafepointSynchronize::is_at_safepoint(), \
      heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
                                   "should not be at a safepoint"));          \
  } while (0)

T
tonyp 已提交
412
#define assert_at_safepoint(_should_be_vm_thread_)                            \
413
  do {                                                                        \
414
    assert(SafepointSynchronize::is_at_safepoint() &&                         \
T
tonyp 已提交
415
              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
416 417 418 419 420 421 422 423 424
           heap_locking_asserts_err_msg("should be at a safepoint"));         \
  } while (0)

#define assert_not_at_safepoint()                                             \
  do {                                                                        \
    assert(!SafepointSynchronize::is_at_safepoint(),                          \
           heap_locking_asserts_err_msg("should not be at a safepoint"));     \
  } while (0)

425 426
protected:

427
  // The young region list.
428 429 430 431 432
  YoungList*  _young_list;

  // The current policy object for the collector.
  G1CollectorPolicy* _g1_policy;

433
  // This is the second level of trying to allocate a new region. If
434 435 436 437
  // new_region() didn't find a region on the free_list, this call will
  // check whether there's anything available on the
  // secondary_free_list and/or wait for more regions to appear on
  // that list, if _free_regions_coming is set.
T
tonyp 已提交
438 439 440 441 442 443
  HeapRegion* new_region_try_secondary_free_list();

  // Try to allocate a single non-humongous HeapRegion sufficient for
  // an allocation of the given word_size. If do_expand is true,
  // attempt to expand the heap if necessary to satisfy the allocation
  // request.
444
  HeapRegion* new_region(size_t word_size, bool do_expand);
445

T
tonyp 已提交
446 447 448
  // Attempt to satisfy a humongous allocation request of the given
  // size by finding a contiguous set of free regions of num_regions
  // length and remove them from the master free list. Return the
449 450 451 452
  // index of the first region or G1_NULL_HRS_INDEX if the search
  // was unsuccessful.
  size_t humongous_obj_allocate_find_first(size_t num_regions,
                                           size_t word_size);
453

T
tonyp 已提交
454 455 456
  // Initialize a contiguous set of free regions of length num_regions
  // and starting at index first so that they appear as a single
  // humongous region.
457
  HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
T
tonyp 已提交
458 459 460 461 462
                                                      size_t num_regions,
                                                      size_t word_size);

  // Attempt to allocate a humongous object of the given size. Return
  // NULL if unsuccessful.
463
  HeapWord* humongous_obj_allocate(size_t word_size);
464 465 466 467 468 469 470 471 472 473 474 475 476

  // The following two methods, allocate_new_tlab() and
  // mem_allocate(), are the two main entry points from the runtime
  // into the G1's allocation routines. They have the following
  // assumptions:
  //
  // * They should both be called outside safepoints.
  //
  // * They should both be called without holding the Heap_lock.
  //
  // * All allocation requests for new TLABs should go to
  //   allocate_new_tlab().
  //
477
  // * All non-TLAB allocation requests should go to mem_allocate().
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
  //
  // * If either call cannot satisfy the allocation request using the
  //   current allocating region, they will try to get a new one. If
  //   this fails, they will attempt to do an evacuation pause and
  //   retry the allocation.
  //
  // * If all allocation attempts fail, even after trying to schedule
  //   an evacuation pause, allocate_new_tlab() will return NULL,
  //   whereas mem_allocate() will attempt a heap expansion and/or
  //   schedule a Full GC.
  //
  // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
  //   should never be called with word_size being humongous. All
  //   humongous allocation requests should go to mem_allocate() which
  //   will satisfy them with a special path.

  virtual HeapWord* allocate_new_tlab(size_t word_size);

  virtual HeapWord* mem_allocate(size_t word_size,
                                 bool*  gc_overhead_limit_was_exceeded);

499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
  // The following three methods take a gc_count_before_ret
  // parameter which is used to return the GC count if the method
  // returns NULL. Given that we are required to read the GC count
  // while holding the Heap_lock, and these paths will take the
  // Heap_lock at some point, it's easier to get them to read the GC
  // count while holding the Heap_lock before they return NULL instead
  // of the caller (namely: mem_allocate()) having to also take the
  // Heap_lock just to read the GC count.

  // First-level mutator allocation attempt: try to allocate out of
  // the mutator alloc region without taking the Heap_lock. This
  // should only be used for non-humongous allocations.
  inline HeapWord* attempt_allocation(size_t word_size,
                                      unsigned int* gc_count_before_ret);

  // Second-level mutator allocation attempt: take the Heap_lock and
  // retry the allocation attempt, potentially scheduling a GC
  // pause. This should only be used for non-humongous allocations.
  HeapWord* attempt_allocation_slow(size_t word_size,
                                    unsigned int* gc_count_before_ret);

  // Takes the Heap_lock and attempts a humongous allocation. It can
  // potentially schedule a GC pause.
522
  HeapWord* attempt_allocation_humongous(size_t word_size,
523
                                         unsigned int* gc_count_before_ret);
524

525 526 527 528
  // Allocation attempt that should be called during safepoints (e.g.,
  // at the end of a successful GC). expect_null_mutator_alloc_region
  // specifies whether the mutator alloc region is expected to be NULL
  // or not.
529
  HeapWord* attempt_allocation_at_safepoint(size_t word_size,
530
                                       bool expect_null_mutator_alloc_region);
531 532 533 534 535 536

  // It dirties the cards that cover the block so that so that the post
  // write barrier never queues anything when updating objects on this
  // block. It is assumed (and in fact we assert) that the block
  // belongs to a young region.
  inline void dirty_young_block(HeapWord* start, size_t word_size);
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552

  // Allocate blocks during garbage collection. Will ensure an
  // allocation region, either by picking one or expanding the
  // heap, and then allocate a block of the given size. The block
  // may not be a humongous - it must fit into a single heap region.
  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);

  HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
                                    HeapRegion*    alloc_region,
                                    bool           par,
                                    size_t         word_size);

  // Ensure that no further allocations can happen in "r", bearing in mind
  // that parallel threads might be attempting allocations.
  void par_allocate_remaining_space(HeapRegion* r);

553 554
  // Allocation attempt during GC for a survivor object / PLAB.
  inline HeapWord* survivor_attempt_allocation(size_t word_size);
555

556 557
  // Allocation attempt during GC for an old object / PLAB.
  inline HeapWord* old_attempt_allocation(size_t word_size);
558

559 560 561
  // These methods are the "callbacks" from the G1AllocRegion class.

  // For mutator alloc regions.
562 563 564 565
  HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
  void retire_mutator_alloc_region(HeapRegion* alloc_region,
                                   size_t allocated_bytes);

566 567 568 569 570 571
  // For GC alloc regions.
  HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
                                  GCAllocPurpose ap);
  void retire_gc_alloc_region(HeapRegion* alloc_region,
                              size_t allocated_bytes, GCAllocPurpose ap);

572
  // - if explicit_gc is true, the GC is for a System.gc() or a heap
573 574 575
  //   inspection request and should collect the entire heap
  // - if clear_all_soft_refs is true, all soft references should be
  //   cleared during the GC
576
  // - if explicit_gc is false, word_size describes the allocation that
577 578 579 580
  //   the GC should attempt (at least) to satisfy
  // - it returns false if it is unable to do the collection due to the
  //   GC locker being active, true otherwise
  bool do_collection(bool explicit_gc,
581
                     bool clear_all_soft_refs,
582 583 584 585 586 587 588 589 590 591 592 593 594 595
                     size_t word_size);

  // Callback from VM_G1CollectFull operation.
  // Perform a full collection.
  void do_full_collection(bool clear_all_soft_refs);

  // Resize the heap if necessary after a full collection.  If this is
  // after a collect-for allocation, "word_size" is the allocation size,
  // and will be considered part of the used portion of the heap.
  void resize_if_necessary_after_full_collection(size_t word_size);

  // Callback from VM_G1CollectForAllocation operation.
  // This function does everything necessary/possible to satisfy a
  // failed allocation request (including collection, expansion, etc.)
596
  HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
597 598 599 600 601

  // Attempting to expand the heap sufficiently
  // to support an allocation of the given "word_size".  If
  // successful, perform the allocation and return the address of the
  // allocated block, or else "NULL".
602
  HeapWord* expand_and_allocate(size_t word_size);
603

604 605 606 607 608 609 610 611
  // Process any reference objects discovered during
  // an incremental evacuation pause.
  void process_discovered_references();

  // Enqueue any remaining discovered references
  // after processing.
  void enqueue_discovered_references();

612
public:
613

614 615 616 617
  G1MonitoringSupport* g1mm() {
    assert(_g1mm != NULL, "should have been initialized");
    return _g1mm;
  }
618

619
  // Expand the garbage-first heap by at least the given size (in bytes!).
620 621
  // Returns true if the heap was expanded by the requested amount;
  // false otherwise.
622
  // (Rounds up to a HeapRegion boundary.)
623
  bool expand(size_t expand_bytes);
624 625 626 627 628

  // Do anything common to GC's.
  virtual void gc_prologue(bool full);
  virtual void gc_epilogue(bool full);

629 630 631 632 633
  // We register a region with the fast "in collection set" test. We
  // simply set to true the array slot corresponding to this region.
  void register_region_with_in_cset_fast_test(HeapRegion* r) {
    assert(_in_cset_fast_test_base != NULL, "sanity");
    assert(r->in_collection_set(), "invariant");
634 635
    size_t index = r->hrs_index();
    assert(index < _in_cset_fast_test_length, "invariant");
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
    assert(!_in_cset_fast_test_base[index], "invariant");
    _in_cset_fast_test_base[index] = true;
  }

  // This is a fast test on whether a reference points into the
  // collection set or not. It does not assume that the reference
  // points into the heap; if it doesn't, it will return false.
  bool in_cset_fast_test(oop obj) {
    assert(_in_cset_fast_test != NULL, "sanity");
    if (_g1_committed.contains((HeapWord*) obj)) {
      // no need to subtract the bottom of the heap from obj,
      // _in_cset_fast_test is biased
      size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
      bool ret = _in_cset_fast_test[index];
      // let's make sure the result is consistent with what the slower
      // test returns
      assert( ret || !obj_in_cs(obj), "sanity");
      assert(!ret ||  obj_in_cs(obj), "sanity");
      return ret;
    } else {
      return false;
    }
  }

660 661 662 663 664 665
  void clear_cset_fast_test() {
    assert(_in_cset_fast_test_base != NULL, "sanity");
    memset(_in_cset_fast_test_base, false,
        _in_cset_fast_test_length * sizeof(bool));
  }

666 667 668 669 670
  // This is called at the end of either a concurrent cycle or a Full
  // GC to update the number of full collections completed. Those two
  // can happen in a nested fashion, i.e., we start a concurrent
  // cycle, a Full GC happens half-way through it which ends first,
  // and then the cycle notices that a Full GC happened and ends
671 672 673 674 675 676 677 678
  // too. The concurrent parameter is a boolean to help us do a bit
  // tighter consistency checking in the method. If concurrent is
  // false, the caller is the inner caller in the nesting (i.e., the
  // Full GC). If concurrent is true, the caller is the outer caller
  // in this nesting (i.e., the concurrent cycle). Further nesting is
  // not currently supported. The end of the this call also notifies
  // the FullGCCount_lock in case a Java thread is waiting for a full
  // GC to happen (e.g., it called System.gc() with
679
  // +ExplicitGCInvokesConcurrent).
680
  void increment_full_collections_completed(bool concurrent);
681 682 683 684 685

  unsigned int full_collections_completed() {
    return _full_collections_completed;
  }

686 687
  G1HRPrinter* hr_printer() { return &_hr_printer; }

688 689 690 691 692 693 694
protected:

  // Shrink the garbage-first heap by at most the given size (in bytes!).
  // (Rounds down to a HeapRegion boundary.)
  virtual void shrink(size_t expand_bytes);
  void shrink_helper(size_t expand_bytes);

695 696 697 698 699 700
  #if TASKQUEUE_STATS
  static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
  void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
  void reset_taskqueue_stats();
  #endif // TASKQUEUE_STATS

701 702 703 704 705 706 707 708 709 710 711 712 713
  // Schedule the VM operation that will do an evacuation pause to
  // satisfy an allocation request of word_size. *succeeded will
  // return whether the VM operation was successful (it did do an
  // evacuation pause) or not (another thread beat us to it or the GC
  // locker was active). Given that we should not be holding the
  // Heap_lock when we enter this method, we will pass the
  // gc_count_before (i.e., total_collections()) as a parameter since
  // it has to be read while holding the Heap_lock. Currently, both
  // methods that call do_collection_pause() release the Heap_lock
  // before the call, so it's easy to read gc_count_before just before.
  HeapWord* do_collection_pause(size_t       word_size,
                                unsigned int gc_count_before,
                                bool*        succeeded);
714 715

  // The guts of the incremental collection pause, executed by the vm
716 717 718
  // thread. It returns false if it is unable to do the collection due
  // to the GC locker being active, true otherwise
  bool do_collection_pause_at_safepoint(double target_pause_time_ms);
719 720

  // Actually do the work of evacuating the collection set.
721
  void evacuate_collection_set();
722 723 724 725 726 727

  // The g1 remembered set of the heap.
  G1RemSet* _g1_rem_set;
  // And it's mod ref barrier set, used to track updates for the above.
  ModRefBarrierSet* _mr_bs;

728 729 730 731
  // A set of cards that cover the objects for which the Rsets should be updated
  // concurrently after the collection.
  DirtyCardQueueSet _dirty_card_queue_set;

732 733 734 735 736 737 738 739 740
  // The Heap Region Rem Set Iterator.
  HeapRegionRemSetIterator** _rem_set_iterator;

  // The closure used to refine a single card.
  RefineCardTableEntryClosure* _refine_cte_cl;

  // A function to check the consistency of dirty card logs.
  void check_ct_logs_at_safepoint();

J
johnc 已提交
741 742 743 744 745 746
  // A DirtyCardQueueSet that is used to hold cards that contain
  // references into the current collection set. This is used to
  // update the remembered sets of the regions in the collection
  // set in the event of an evacuation failure.
  DirtyCardQueueSet _into_cset_dirty_card_queue_set;

747 748 749 750
  // After a collection pause, make the regions in the CS into free
  // regions.
  void free_collection_set(HeapRegion* cs_head);

751 752 753 754
  // Abandon the current collection set without recording policy
  // statistics or updating free lists.
  void abandon_collection_set(HeapRegion* cs_head);

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
  // Applies "scan_non_heap_roots" to roots outside the heap,
  // "scan_rs" to roots inside the heap (having done "set_region" to
  // indicate the region in which the root resides), and does "scan_perm"
  // (setting the generation to the perm generation.)  If "scan_rs" is
  // NULL, then this step is skipped.  The "worker_i"
  // param is for use with parallel roots processing, and should be
  // the "i" of the calling parallel worker thread's work(i) function.
  // In the sequential case this param will be ignored.
  void g1_process_strong_roots(bool collecting_perm_gen,
                               SharedHeap::ScanningOption so,
                               OopClosure* scan_non_heap_roots,
                               OopsInHeapRegionClosure* scan_rs,
                               OopsInGenClosure* scan_perm,
                               int worker_i);

  // Apply "blk" to all the weak roots of the system.  These include
  // JNI weak roots, the code cache, system dictionary, symbol table,
  // string table, and referents of reachable weak refs.
  void g1_process_weak_roots(OopClosure* root_closure,
                             OopClosure* non_root_closure);

T
tonyp 已提交
776
  // Frees a non-humongous region by initializing its contents and
777 778 779 780 781 782 783 784 785 786
  // adding it to the free list that's passed as a parameter (this is
  // usually a local list which will be appended to the master free
  // list later). The used bytes of freed regions are accumulated in
  // pre_used. If par is true, the region's RSet will not be freed
  // up. The assumption is that this will be done later.
  void free_region(HeapRegion* hr,
                   size_t* pre_used,
                   FreeRegionList* free_list,
                   bool par);

T
tonyp 已提交
787 788 789 790 791 792 793
  // Frees a humongous region by collapsing it into individual regions
  // and calling free_region() for each of them. The freed regions
  // will be added to the free list that's passed as a parameter (this
  // is usually a local list which will be appended to the master free
  // list later). The used bytes of freed regions are accumulated in
  // pre_used. If par is true, the region's RSet will not be freed
  // up. The assumption is that this will be done later.
794 795 796 797 798
  void free_humongous_region(HeapRegion* hr,
                             size_t* pre_used,
                             FreeRegionList* free_list,
                             HumongousRegionSet* humongous_proxy_set,
                             bool par);
799

800 801 802 803 804
  // Notifies all the necessary spaces that the committed space has
  // been updated (either expanded or shrunk). It should be called
  // after _g1_storage is updated.
  void update_committed_space(HeapWord* old_end, HeapWord* new_end);

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
  // The concurrent marker (and the thread it runs in.)
  ConcurrentMark* _cm;
  ConcurrentMarkThread* _cmThread;
  bool _mark_in_progress;

  // The concurrent refiner.
  ConcurrentG1Refine* _cg1r;

  // The parallel task queues
  RefToScanQueueSet *_task_queues;

  // True iff a evacuation has failed in the current collection.
  bool _evacuation_failed;

  // Set the attribute indicating whether evacuation has failed in the
  // current collection.
  void set_evacuation_failed(bool b) { _evacuation_failed = b; }

  // Failed evacuations cause some logical from-space objects to have
  // forwarding pointers to themselves.  Reset them.
  void remove_self_forwarding_pointers();

  // When one is non-null, so is the other.  Together, they each pair is
  // an object with a preserved mark, and its mark value.
  GrowableArray<oop>*     _objs_with_preserved_marks;
  GrowableArray<markOop>* _preserved_marks_of_objs;

  // Preserve the mark of "obj", if necessary, in preparation for its mark
  // word being overwritten with a self-forwarding-pointer.
  void preserve_mark_if_necessary(oop obj, markOop m);

  // The stack of evac-failure objects left to be scanned.
  GrowableArray<oop>*    _evac_failure_scan_stack;
  // The closure to apply to evac-failure objects.

  OopsInHeapRegionClosure* _evac_failure_closure;
  // Set the field above.
  void
  set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
    _evac_failure_closure = evac_failure_closure;
  }

  // Push "obj" on the scan stack.
  void push_on_evac_failure_scan_stack(oop obj);
  // Process scan stack entries until the stack is empty.
  void drain_evac_failure_scan_stack();
  // True iff an invocation of "drain_scan_stack" is in progress; to
  // prevent unnecessary recursion.
  bool _drain_in_progress;

  // Do any necessary initialization for evacuation-failure handling.
  // "cl" is the closure that will be used to process evac-failure
  // objects.
  void init_for_evac_failure(OopsInHeapRegionClosure* cl);
  // Do any necessary cleanup for evacuation-failure handling data
  // structures.
  void finalize_for_evac_failure();

  // An attempt to evacuate "obj" has failed; take necessary steps.
864 865
  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
                                    bool should_mark_root);
866 867
  void handle_evacuation_failure_common(oop obj, markOop m);

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
  // ("Weak") Reference processing support.
  //
  // G1 has 2 instances of the referece processor class. One
  // (_ref_processor_cm) handles reference object discovery
  // and subsequent processing during concurrent marking cycles.
  //
  // The other (_ref_processor_stw) handles reference object
  // discovery and processing during full GCs and incremental
  // evacuation pauses.
  //
  // During an incremental pause, reference discovery will be
  // temporarily disabled for _ref_processor_cm and will be
  // enabled for _ref_processor_stw. At the end of the evacuation
  // pause references discovered by _ref_processor_stw will be
  // processed and discovery will be disabled. The previous
  // setting for reference object discovery for _ref_processor_cm
  // will be re-instated.
  //
  // At the start of marking:
  //  * Discovery by the CM ref processor is verified to be inactive
  //    and it's discovered lists are empty.
  //  * Discovery by the CM ref processor is then enabled.
  //
  // At the end of marking:
  //  * Any references on the CM ref processor's discovered
  //    lists are processed (possibly MT).
  //
  // At the start of full GC we:
  //  * Disable discovery by the CM ref processor and
  //    empty CM ref processor's discovered lists
  //    (without processing any entries).
  //  * Verify that the STW ref processor is inactive and it's
  //    discovered lists are empty.
  //  * Temporarily set STW ref processor discovery as single threaded.
  //  * Temporarily clear the STW ref processor's _is_alive_non_header
  //    field.
  //  * Finally enable discovery by the STW ref processor.
  //
  // The STW ref processor is used to record any discovered
  // references during the full GC.
  //
  // At the end of a full GC we:
  //  * Enqueue any reference objects discovered by the STW ref processor
  //    that have non-live referents. This has the side-effect of
  //    making the STW ref processor inactive by disabling discovery.
  //  * Verify that the CM ref processor is still inactive
  //    and no references have been placed on it's discovered
  //    lists (also checked as a precondition during initial marking).

  // The (stw) reference processor...
  ReferenceProcessor* _ref_processor_stw;

  // During reference object discovery, the _is_alive_non_header
  // closure (if non-null) is applied to the referent object to
  // determine whether the referent is live. If so then the
  // reference object does not need to be 'discovered' and can
  // be treated as a regular oop. This has the benefit of reducing
  // the number of 'discovered' reference objects that need to
  // be processed.
  //
  // Instance of the is_alive closure for embedding into the
  // STW reference processor as the _is_alive_non_header field.
  // Supplying a value for the _is_alive_non_header field is
  // optional but doing so prevents unnecessary additions to
  // the discovered lists during reference discovery.
  G1STWIsAliveClosure _is_alive_closure_stw;

  // The (concurrent marking) reference processor...
  ReferenceProcessor* _ref_processor_cm;
937

938 939 940 941 942 943 944
  // Instance of the concurrent mark is_alive closure for embedding
  // into the Concurrent Marking reference processor as the
  // _is_alive_non_header field. Supplying a value for the
  // _is_alive_non_header field is optional but doing so prevents
  // unnecessary additions to the discovered lists during reference
  // discovery.
  G1CMIsAliveClosure _is_alive_closure_cm;
945 946 947 948 949 950 951 952 953 954

  enum G1H_process_strong_roots_tasks {
    G1H_PS_mark_stack_oops_do,
    G1H_PS_refProcessor_oops_do,
    // Leave this one last.
    G1H_PS_NumElements
  };

  SubTasksDone* _process_strong_tasks;

955
  volatile bool _free_regions_coming;
956 957

public:
958 959 960

  SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }

961 962
  void set_refine_cte_cl_concurrency(bool concurrent);

963
  RefToScanQueue *task_queue(int i) const;
964

965 966 967
  // A set of cards where updates happened during the GC
  DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }

J
johnc 已提交
968 969 970 971 972 973 974
  // A DirtyCardQueueSet that is used to hold cards that contain
  // references into the current collection set. This is used to
  // update the remembered sets of the regions in the collection
  // set in the event of an evacuation failure.
  DirtyCardQueueSet& into_cset_dirty_card_queue_set()
        { return _into_cset_dirty_card_queue_set; }

975 976 977 978 979 980 981 982 983 984
  // Create a G1CollectedHeap with the specified policy.
  // Must call the initialize method afterwards.
  // May not return if something goes wrong.
  G1CollectedHeap(G1CollectorPolicy* policy);

  // Initialize the G1CollectedHeap to have the initial and
  // maximum sizes, permanent generation, and remembered and barrier sets
  // specified by the policy object.
  jint initialize();

985
  // Initialize weak reference processing.
986
  virtual void ref_processing_init();
987 988 989

  void set_par_threads(int t) {
    SharedHeap::set_par_threads(t);
990 991 992 993 994 995 996 997 998 999
    // Done in SharedHeap but oddly there are
    // two _process_strong_tasks's in a G1CollectedHeap
    // so do it here too.
    _process_strong_tasks->set_n_threads(t);
  }

  // Set _n_par_threads according to a policy TBD.
  void set_par_threads();

  void set_n_termination(int t) {
1000
    _process_strong_tasks->set_n_threads(t);
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
  }

  virtual CollectedHeap::Name kind() const {
    return CollectedHeap::G1CollectedHeap;
  }

  // The current policy object for the collector.
  G1CollectorPolicy* g1_policy() const { return _g1_policy; }

  // Adaptive size policy.  No such thing for g1.
  virtual AdaptiveSizePolicy* size_policy() { return NULL; }

  // The rem set and barrier set.
  G1RemSet* g1_rem_set() const { return _g1_rem_set; }
  ModRefBarrierSet* mr_bs() const { return _mr_bs; }

  // The rem set iterator.
  HeapRegionRemSetIterator* rem_set_iterator(int i) {
    return _rem_set_iterator[i];
  }

  HeapRegionRemSetIterator* rem_set_iterator() {
    return _rem_set_iterator[0];
  }

  unsigned get_gc_time_stamp() {
    return _gc_time_stamp;
  }

  void reset_gc_time_stamp() {
    _gc_time_stamp = 0;
1032 1033 1034 1035 1036 1037
    OrderAccess::fence();
  }

  void increment_gc_time_stamp() {
    ++_gc_time_stamp;
    OrderAccess::fence();
1038 1039
  }

J
johnc 已提交
1040 1041 1042
  void iterate_dirty_card_closure(CardTableEntryClosure* cl,
                                  DirtyCardQueue* into_cset_dcq,
                                  bool concurrent, int worker_i);
1043 1044 1045 1046

  // The shared block offset table array.
  G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }

1047 1048 1049 1050 1051 1052 1053
  // Reference Processing accessors

  // The STW reference processor....
  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }

  // The Concurent Marking reference processor...
  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1054 1055 1056

  virtual size_t capacity() const;
  virtual size_t used() const;
1057 1058 1059
  // This should be called when we're not holding the heap lock. The
  // result might be a bit inaccurate.
  size_t used_unlocked() const;
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
  size_t recalculate_used() const;

  // These virtual functions do the actual allocation.
  // Some heaps may offer a contiguous region for shared non-blocking
  // allocation, via inlined code (by exporting the address of the top and
  // end fields defining the extent of the contiguous allocation region.)
  // But G1CollectedHeap doesn't yet support this.

  // Return an estimate of the maximum allocation that could be performed
  // without triggering any collection or expansion activity.  In a
  // generational collector, for example, this is probably the largest
  // allocation that could be supported (without expansion) in the youngest
  // generation.  It is "unsafe" because no locks are taken; the result
  // should be treated as an approximation, not a guarantee, for use in
  // heuristic resizing decisions.
  virtual size_t unsafe_max_alloc();

  virtual bool is_maximal_no_gc() const {
    return _g1_storage.uncommitted_size() == 0;
  }

  // The total number of regions in the heap.
1082
  size_t n_regions() { return _hrs.length(); }
1083

1084 1085
  // The max number of regions in the heap.
  size_t max_regions() { return _hrs.max_length(); }
1086 1087

  // The number of regions that are completely free.
1088
  size_t free_regions() { return _free_list.length(); }
1089 1090 1091 1092 1093 1094 1095

  // The number of regions that are not completely free.
  size_t used_regions() { return n_regions() - free_regions(); }

  // The number of regions available for "regular" expansion.
  size_t expansion_regions() { return _expansion_regions; }

1096 1097 1098 1099
  // Factory method for HeapRegion instances. It will return NULL if
  // the allocation fails.
  HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);

1100 1101
  void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
  void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1102 1103 1104
  void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
  void verify_dirty_young_regions() PRODUCT_RETURN;

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
  // verify_region_sets() performs verification over the region
  // lists. It will be compiled in the product code to be used when
  // necessary (i.e., during heap verification).
  void verify_region_sets();

  // verify_region_sets_optional() is planted in the code for
  // list verification in non-product builds (and it can be enabled in
  // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
#if HEAP_REGION_SET_FORCE_VERIFY
  void verify_region_sets_optional() {
    verify_region_sets();
  }
#else // HEAP_REGION_SET_FORCE_VERIFY
  void verify_region_sets_optional() { }
#endif // HEAP_REGION_SET_FORCE_VERIFY

#ifdef ASSERT
T
tonyp 已提交
1122
  bool is_on_master_free_list(HeapRegion* hr) {
1123 1124 1125
    return hr->containing_set() == &_free_list;
  }

T
tonyp 已提交
1126
  bool is_in_humongous_set(HeapRegion* hr) {
1127
    return hr->containing_set() == &_humongous_set;
T
tonyp 已提交
1128
  }
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
#endif // ASSERT

  // Wrapper for the region list operations that can be called from
  // methods outside this class.

  void secondary_free_list_add_as_tail(FreeRegionList* list) {
    _secondary_free_list.add_as_tail(list);
  }

  void append_secondary_free_list() {
1139
    _free_list.add_as_head(&_secondary_free_list);
1140 1141
  }

T
tonyp 已提交
1142 1143 1144
  void append_secondary_free_list_if_not_empty_with_lock() {
    // If the secondary free list looks empty there's no reason to
    // take the lock and then try to append it.
1145 1146 1147 1148 1149 1150
    if (!_secondary_free_list.is_empty()) {
      MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
      append_secondary_free_list();
    }
  }

T
tonyp 已提交
1151 1152 1153 1154
  void old_set_remove(HeapRegion* hr) {
    _old_set.remove(hr);
  }

1155 1156 1157 1158
  void set_free_regions_coming();
  void reset_free_regions_coming();
  bool free_regions_coming() { return _free_regions_coming; }
  void wait_while_free_regions_coming();
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176

  // Perform a collection of the heap; intended for use in implementing
  // "System.gc".  This probably implies as full a collection as the
  // "CollectedHeap" supports.
  virtual void collect(GCCause::Cause cause);

  // The same as above but assume that the caller holds the Heap_lock.
  void collect_locked(GCCause::Cause cause);

  // This interface assumes that it's being called by the
  // vm thread. It collects the heap assuming that the
  // heap lock is already held and that we are executing in
  // the context of the vm thread.
  virtual void collect_as_vm_thread(GCCause::Cause cause);

  // True iff a evacuation has failed in the most-recent collection.
  bool evacuation_failed() { return _evacuation_failed; }

1177 1178 1179 1180
  // It will free a region if it has allocated objects in it that are
  // all dead. It calls either free_region() or
  // free_humongous_region() depending on the type of the region that
  // is passed to it.
T
tonyp 已提交
1181 1182 1183
  void free_region_if_empty(HeapRegion* hr,
                            size_t* pre_used,
                            FreeRegionList* free_list,
T
tonyp 已提交
1184
                            OldRegionSet* old_proxy_set,
T
tonyp 已提交
1185 1186 1187
                            HumongousRegionSet* humongous_proxy_set,
                            HRRSCleanupTask* hrrs_cleanup_task,
                            bool par);
1188 1189 1190 1191 1192 1193 1194

  // It appends the free list to the master free list and updates the
  // master humongous list according to the contents of the proxy
  // list. It also adjusts the total used bytes according to pre_used
  // (if par is true, it will do so by taking the ParGCRareEvent_lock).
  void update_sets_after_freeing_regions(size_t pre_used,
                                       FreeRegionList* free_list,
T
tonyp 已提交
1195
                                       OldRegionSet* old_proxy_set,
1196 1197
                                       HumongousRegionSet* humongous_proxy_set,
                                       bool par);
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211

  // Returns "TRUE" iff "p" points into the allocated area of the heap.
  virtual bool is_in(const void* p) const;

  // Return "TRUE" iff the given object address is within the collection
  // set.
  inline bool obj_in_cs(oop obj);

  // Return "TRUE" iff the given object address is in the reserved
  // region of g1 (excluding the permanent generation).
  bool is_in_g1_reserved(const void* p) const {
    return _g1_reserved.contains(p);
  }

1212 1213 1214 1215 1216 1217 1218
  // Returns a MemRegion that corresponds to the space that has been
  // reserved for the heap
  MemRegion g1_reserved() {
    return _g1_reserved;
  }

  // Returns a MemRegion that corresponds to the space that has been
1219 1220 1221 1222 1223
  // committed in the heap
  MemRegion g1_committed() {
    return _g1_committed;
  }

J
johnc 已提交
1224
  virtual bool is_in_closed_subset(const void* p) const;
1225 1226 1227 1228 1229 1230 1231 1232 1233

  // This resets the card table to all zeros.  It is used after
  // a collection pause which used the card table to claim cards.
  void cleanUpCardTable();

  // Iteration functions.

  // Iterate over all the ref-containing fields of all objects, calling
  // "cl.do_oop" on each.
1234 1235 1236 1237
  virtual void oop_iterate(OopClosure* cl) {
    oop_iterate(cl, true);
  }
  void oop_iterate(OopClosure* cl, bool do_perm);
1238 1239

  // Same as above, restricted to a memory region.
1240 1241 1242 1243
  virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
    oop_iterate(mr, cl, true);
  }
  void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
1244 1245

  // Iterate over all objects, calling "cl.do_object" on each.
1246 1247 1248 1249 1250 1251 1252
  virtual void object_iterate(ObjectClosure* cl) {
    object_iterate(cl, true);
  }
  virtual void safe_object_iterate(ObjectClosure* cl) {
    object_iterate(cl, true);
  }
  void object_iterate(ObjectClosure* cl, bool do_perm);
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263

  // Iterate over all objects allocated since the last collection, calling
  // "cl.do_object" on each.  The heap must have been initialized properly
  // to support this function, or else this call will fail.
  virtual void object_iterate_since_last_GC(ObjectClosure* cl);

  // Iterate over all spaces in use in the heap, in ascending address order.
  virtual void space_iterate(SpaceClosure* cl);

  // Iterate over heap regions, in address order, terminating the
  // iteration early if the "doHeapRegion" method returns "true".
1264
  void heap_region_iterate(HeapRegionClosure* blk) const;
1265 1266 1267 1268

  // Iterate over heap regions starting with r (or the first region if "r"
  // is NULL), in address order, terminating early if the "doHeapRegion"
  // method returns "true".
1269
  void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
1270

1271 1272
  // Return the region with the given index. It assumes the index is valid.
  HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288

  // Divide the heap region sequence into "chunks" of some size (the number
  // of regions divided by the number of parallel threads times some
  // overpartition factor, currently 4).  Assumes that this will be called
  // in parallel by ParallelGCThreads worker threads with discinct worker
  // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
  // calls will use the same "claim_value", and that that claim value is
  // different from the claim_value of any heap region before the start of
  // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
  // attempting to claim the first region in each chunk, and, if
  // successful, applying the closure to each region in the chunk (and
  // setting the claim value of the second and subsequent regions of the
  // chunk.)  For now requires that "doHeapRegion" always returns "false",
  // i.e., that a closure never attempt to abort a traversal.
  void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
                                       int worker,
1289
                                       int no_of_par_workers,
1290 1291
                                       jint claim_value);

1292 1293 1294
  // It resets all the region claim values to the default.
  void reset_heap_region_claim_values();

1295 1296 1297 1298
#ifdef ASSERT
  bool check_heap_region_claim_values(jint claim_value);
#endif // ASSERT

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
  // Iterate over the regions (if any) in the current collection set.
  void collection_set_iterate(HeapRegionClosure* blk);

  // As above but starting from region r
  void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);

  // Returns the first (lowest address) compactible space in the heap.
  virtual CompactibleSpace* first_compactible_space();

  // A CollectedHeap will contain some number of spaces.  This finds the
  // space containing a given address, or else returns NULL.
  virtual Space* space_containing(const void* addr) const;

  // A G1CollectedHeap will contain some number of heap regions.  This
  // finds the region containing a given address, or else returns NULL.
1314 1315
  template <class T>
  inline HeapRegion* heap_region_containing(const T addr) const;
1316 1317 1318 1319

  // Like the above, but requires "addr" to be in the heap (to avoid a
  // null-check), and unlike the above, may return an continuing humongous
  // region.
1320 1321
  template <class T>
  inline HeapRegion* heap_region_containing_raw(const T addr) const;
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358

  // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
  // each address in the (reserved) heap is a member of exactly
  // one block.  The defining characteristic of a block is that it is
  // possible to find its size, and thus to progress forward to the next
  // block.  (Blocks may be of different sizes.)  Thus, blocks may
  // represent Java objects, or they might be free blocks in a
  // free-list-based heap (or subheap), as long as the two kinds are
  // distinguishable and the size of each is determinable.

  // Returns the address of the start of the "block" that contains the
  // address "addr".  We say "blocks" instead of "object" since some heaps
  // may not pack objects densely; a chunk may either be an object or a
  // non-object.
  virtual HeapWord* block_start(const void* addr) const;

  // Requires "addr" to be the start of a chunk, and returns its size.
  // "addr + size" is required to be the start of a new chunk, or the end
  // of the active area of the heap.
  virtual size_t block_size(const HeapWord* addr) const;

  // Requires "addr" to be the start of a block, and returns "TRUE" iff
  // the block is an object.
  virtual bool block_is_obj(const HeapWord* addr) const;

  // Does this heap support heap inspection? (+PrintClassHistogram)
  virtual bool supports_heap_inspection() const { return true; }

  // Section on thread-local allocation buffers (TLABs)
  // See CollectedHeap for semantics.

  virtual bool supports_tlab_allocation() const;
  virtual size_t tlab_capacity(Thread* thr) const;
  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;

  // Can a compiler initialize a new object without store barriers?
  // This permission only extends from the creation of a new object
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
  // via a TLAB up to the first subsequent safepoint. If such permission
  // is granted for this heap type, the compiler promises to call
  // defer_store_barrier() below on any slow path allocation of
  // a new object for which such initializing store barriers will
  // have been elided. G1, like CMS, allows this, but should be
  // ready to provide a compensating write barrier as necessary
  // if that storage came out of a non-young region. The efficiency
  // of this implementation depends crucially on being able to
  // answer very efficiently in constant time whether a piece of
  // storage in the heap comes from a young region or not.
  // See ReduceInitialCardMarks.
1370
  virtual bool can_elide_tlab_store_barriers() const {
1371
    return true;
1372 1373
  }

1374 1375 1376 1377
  virtual bool card_mark_must_follow_store() const {
    return true;
  }

1378
  bool is_in_young(const oop obj) {
1379 1380 1381 1382
    HeapRegion* hr = heap_region_containing(obj);
    return hr != NULL && hr->is_young();
  }

1383 1384 1385 1386 1387 1388
#ifdef ASSERT
  virtual bool is_in_partial_collection(const void* p);
#endif

  virtual bool is_scavengable(const void* addr);

1389 1390 1391 1392
  // We don't need barriers for initializing stores to objects
  // in the young gen: for the SATB pre-barrier, there is no
  // pre-value that needs to be remembered; for the remembered-set
  // update logging post-barrier, we don't maintain remembered set
1393
  // information for young gen objects.
1394 1395
  virtual bool can_elide_initializing_store_barrier(oop new_obj) {
    return is_in_young(new_obj);
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
  }

  // Can a compiler elide a store barrier when it writes
  // a permanent oop into the heap?  Applies when the compiler
  // is storing x to the heap, where x->is_perm() is true.
  virtual bool can_elide_permanent_oop_store_barriers() const {
    // At least until perm gen collection is also G1-ified, at
    // which point this should return false.
    return true;
  }

  // Returns "true" iff the given word_size is "very large".
  static bool isHumongous(size_t word_size) {
1409 1410 1411 1412 1413 1414
    // Note this has to be strictly greater-than as the TLABs
    // are capped at the humongous thresold and we want to
    // ensure that we don't try to allocate a TLAB as
    // humongous and that we don't allocate a humongous
    // object in a TLAB.
    return word_size > _humongous_object_threshold_in_words;
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
  }

  // Update mod union table with the set of dirty cards.
  void updateModUnion();

  // Set the mod union bits corresponding to the given memRegion.  Note
  // that this is always a safe operation, since it doesn't clear any
  // bits.
  void markModUnionRange(MemRegion mr);

  // Records the fact that a marking phase is no longer in progress.
  void set_marking_complete() {
    _mark_in_progress = false;
  }
  void set_marking_started() {
    _mark_in_progress = true;
  }
  bool mark_in_progress() {
    return _mark_in_progress;
  }

  // Print the maximum heap capacity.
  virtual size_t max_capacity() const;

  virtual jlong millis_since_last_gc();

  // Perform any cleanup actions necessary before allowing a verification.
  virtual void prepare_for_verify();

  // Perform verification.
1445

1446 1447 1448 1449
  // vo == UsePrevMarking  -> use "prev" marking information,
  // vo == UseNextMarking -> use "next" marking information
  // vo == UseMarkWord    -> use the mark word in the object header
  //
1450 1451
  // NOTE: Only the "prev" marking information is guaranteed to be
  // consistent most of the time, so most calls to this should use
1452 1453 1454 1455 1456 1457 1458 1459
  // vo == UsePrevMarking.
  // Currently, there is only one case where this is called with
  // vo == UseNextMarking, which is to verify the "next" marking
  // information at the end of remark.
  // Currently there is only one place where this is called with
  // vo == UseMarkWord, which is to verify the marking during a
  // full GC.
  void verify(bool allow_dirty, bool silent, VerifyOption vo);
1460 1461

  // Override; it uses the "prev" marking information
1462 1463
  virtual void verify(bool allow_dirty, bool silent);
  virtual void print_on(outputStream* st) const;
1464
  virtual void print_extended_on(outputStream* st) const;
1465 1466 1467 1468 1469 1470 1471

  virtual void print_gc_threads_on(outputStream* st) const;
  virtual void gc_threads_do(ThreadClosure* tc) const;

  // Override
  void print_tracing_info() const;

1472 1473 1474 1475
  // The following two methods are helpful for debugging RSet issues.
  void print_cset_rsets() PRODUCT_RETURN;
  void print_all_rsets() PRODUCT_RETURN;

1476 1477 1478 1479 1480 1481 1482
  // Convenience function to be used in situations where the heap type can be
  // asserted to be this type.
  static G1CollectedHeap* heap();

  void set_region_short_lived_locked(HeapRegion* hr);
  // add appropriate methods for any other surv rate groups

1483
  YoungList* young_list() { return _young_list; }
1484 1485 1486 1487 1488

  // debugging
  bool check_young_list_well_formed() {
    return _young_list->check_list_well_formed();
  }
1489 1490

  bool check_young_list_empty(bool check_heap,
1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
                              bool check_sample = true);

  // *** Stuff related to concurrent marking.  It's not clear to me that so
  // many of these need to be public.

  // The functions below are helper functions that a subclass of
  // "CollectedHeap" can use in the implementation of its virtual
  // functions.
  // This performs a concurrent marking of the live objects in a
  // bitmap off to the side.
  void doConcurrentMark();

  bool isMarkedPrev(oop obj) const;
  bool isMarkedNext(oop obj) const;

1506 1507 1508
  // vo == UsePrevMarking -> use "prev" marking information,
  // vo == UseNextMarking -> use "next" marking information,
  // vo == UseMarkWord    -> use mark word from object header
1509 1510
  bool is_obj_dead_cond(const oop obj,
                        const HeapRegion* hr,
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
                        const VerifyOption vo) const {

    switch (vo) {
      case VerifyOption_G1UsePrevMarking:
        return is_obj_dead(obj, hr);
      case VerifyOption_G1UseNextMarking:
        return is_obj_ill(obj, hr);
      default:
        assert(vo == VerifyOption_G1UseMarkWord, "must be");
        return !obj->is_gc_marked();
1521 1522 1523
    }
  }

1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
  // Determine if an object is dead, given the object and also
  // the region to which the object belongs. An object is dead
  // iff a) it was not allocated since the last mark and b) it
  // is not marked.

  bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
    return
      !hr->obj_allocated_since_prev_marking(obj) &&
      !isMarkedPrev(obj);
  }

  // This is used when copying an object to survivor space.
  // If the object is marked live, then we mark the copy live.
  // If the object is allocated since the start of this mark
  // cycle, then we mark the copy live.
  // If the object has been around since the previous mark
  // phase, and hasn't been marked yet during this phase,
  // then we don't mark it, we just wait for the
  // current marking cycle to get to it.

  // This function returns true when an object has been
  // around since the previous marking and hasn't yet
  // been marked during this marking.

  bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
    return
      !hr->obj_allocated_since_next_marking(obj) &&
      !isMarkedNext(obj);
  }

  // Determine if an object is dead, given only the object itself.
  // This will find the region to which the object belongs and
  // then call the region version of the same function.

  // Added if it is in permanent gen it isn't dead.
  // Added if it is NULL it isn't dead.

1561 1562 1563
  // vo == UsePrevMarking -> use "prev" marking information,
  // vo == UseNextMarking -> use "next" marking information,
  // vo == UseMarkWord    -> use mark word from object header
1564
  bool is_obj_dead_cond(const oop obj,
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
                        const VerifyOption vo) const {

    switch (vo) {
      case VerifyOption_G1UsePrevMarking:
        return is_obj_dead(obj);
      case VerifyOption_G1UseNextMarking:
        return is_obj_ill(obj);
      default:
        assert(vo == VerifyOption_G1UseMarkWord, "must be");
        return !obj->is_gc_marked();
1575 1576 1577
    }
  }

1578
  bool is_obj_dead(const oop obj) const {
1579
    const HeapRegion* hr = heap_region_containing(obj);
1580 1581 1582 1583 1584 1585 1586 1587 1588
    if (hr == NULL) {
      if (Universe::heap()->is_in_permanent(obj))
        return false;
      else if (obj == NULL) return false;
      else return true;
    }
    else return is_obj_dead(obj, hr);
  }

1589
  bool is_obj_ill(const oop obj) const {
1590
    const HeapRegion* hr = heap_region_containing(obj);
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
    if (hr == NULL) {
      if (Universe::heap()->is_in_permanent(obj))
        return false;
      else if (obj == NULL) return false;
      else return true;
    }
    else return is_obj_ill(obj, hr);
  }

  // The following is just to alert the verification code
  // that a full collection has occurred and that the
  // remembered sets are no longer up to date.
  bool _full_collection;
  void set_full_collection() { _full_collection = true;}
  void clear_full_collection() {_full_collection = false;}
  bool full_collection() {return _full_collection;}

  ConcurrentMark* concurrent_mark() const { return _cm; }
  ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
  // The dirty cards region list is used to record a subset of regions
  // whose cards need clearing. The list if populated during the
  // remembered set scanning and drained during the card table
  // cleanup. Although the methods are reentrant, population/draining
  // phases must not overlap. For synchronization purposes the last
  // element on the list points to itself.
  HeapRegion* _dirty_cards_region_list;
  void push_dirty_cards_region(HeapRegion* hr);
  HeapRegion* pop_dirty_cards_region();

1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
public:
  void stop_conc_gc_threads();

  double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
  void check_if_region_is_too_expensive(double predicted_time_ms);
  size_t pending_card_num();
  size_t max_pending_card_num();
  size_t cards_scanned();

protected:
  size_t _max_heap_capacity;
};

1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
#define use_local_bitmaps         1
#define verify_local_bitmaps      0
#define oop_buffer_length       256

#ifndef PRODUCT
class GCLabBitMap;
class GCLabBitMapClosure: public BitMapClosure {
private:
  ConcurrentMark* _cm;
  GCLabBitMap*    _bitmap;

public:
  GCLabBitMapClosure(ConcurrentMark* cm,
                     GCLabBitMap* bitmap) {
    _cm     = cm;
    _bitmap = bitmap;
  }

  virtual bool do_bit(size_t offset);
};
#endif // !PRODUCT

class GCLabBitMap: public BitMap {
private:
  ConcurrentMark* _cm;

  int       _shifter;
  size_t    _bitmap_word_covers_words;

  // beginning of the heap
  HeapWord* _heap_start;

  // this is the actual start of the GCLab
  HeapWord* _real_start_word;

  // this is the actual end of the GCLab
  HeapWord* _real_end_word;

  // this is the first word, possibly located before the actual start
  // of the GCLab, that corresponds to the first bit of the bitmap
  HeapWord* _start_word;

  // size of a GCLab in words
  size_t _gclab_word_size;

  static int shifter() {
    return MinObjAlignment - 1;
  }

  // how many heap words does a single bitmap word corresponds to?
  static size_t bitmap_word_covers_words() {
    return BitsPerWord << shifter();
  }

1688 1689 1690 1691 1692 1693 1694 1695
  size_t gclab_word_size() const {
    return _gclab_word_size;
  }

  // Calculates actual GCLab size in words
  size_t gclab_real_word_size() const {
    return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
           / BitsPerWord;
1696 1697
  }

1698 1699
  static size_t bitmap_size_in_bits(size_t gclab_word_size) {
    size_t bits_in_bitmap = gclab_word_size >> shifter();
1700 1701 1702 1703 1704 1705 1706 1707 1708
    // We are going to ensure that the beginning of a word in this
    // bitmap also corresponds to the beginning of a word in the
    // global marking bitmap. To handle the case where a GCLab
    // starts from the middle of the bitmap, we need to add enough
    // space (i.e. up to a bitmap word) to ensure that we have
    // enough bits in the bitmap.
    return bits_in_bitmap + BitsPerWord - 1;
  }
public:
1709 1710
  GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
    : BitMap(bitmap_size_in_bits(gclab_word_size)),
1711 1712 1713 1714
      _cm(G1CollectedHeap::heap()->concurrent_mark()),
      _shifter(shifter()),
      _bitmap_word_covers_words(bitmap_word_covers_words()),
      _heap_start(heap_start),
1715
      _gclab_word_size(gclab_word_size),
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
      _real_start_word(NULL),
      _real_end_word(NULL),
      _start_word(NULL)
  {
    guarantee( size_in_words() >= bitmap_size_in_words(),
               "just making sure");
  }

  inline unsigned heapWordToOffset(HeapWord* addr) {
    unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
    assert(offset < size(), "offset should be within bounds");
    return offset;
  }

  inline HeapWord* offsetToHeapWord(size_t offset) {
    HeapWord* addr =  _start_word + (offset << _shifter);
    assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
    return addr;
  }

  bool fields_well_formed() {
    bool ret1 = (_real_start_word == NULL) &&
                (_real_end_word == NULL) &&
                (_start_word == NULL);
    if (ret1)
      return true;

    bool ret2 = _real_start_word >= _start_word &&
      _start_word < _real_end_word &&
      (_real_start_word + _gclab_word_size) == _real_end_word &&
      (_start_word + _gclab_word_size + _bitmap_word_covers_words)
                                                              > _real_end_word;
    return ret2;
  }

  inline bool mark(HeapWord* addr) {
    guarantee(use_local_bitmaps, "invariant");
    assert(fields_well_formed(), "invariant");

    if (addr >= _real_start_word && addr < _real_end_word) {
      assert(!isMarked(addr), "should not have already been marked");

      // first mark it on the bitmap
      at_put(heapWordToOffset(addr), true);

      return true;
    } else {
      return false;
    }
  }

  inline bool isMarked(HeapWord* addr) {
    guarantee(use_local_bitmaps, "invariant");
    assert(fields_well_formed(), "invariant");

    return at(heapWordToOffset(addr));
  }

  void set_buffer(HeapWord* start) {
    guarantee(use_local_bitmaps, "invariant");
    clear();

    assert(start != NULL, "invariant");
    _real_start_word = start;
    _real_end_word   = start + _gclab_word_size;

    size_t diff =
      pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
    _start_word = start - diff;

    assert(fields_well_formed(), "invariant");
  }

#ifndef PRODUCT
  void verify() {
    // verify that the marks have been propagated
    GCLabBitMapClosure cl(_cm, this);
    iterate(&cl);
  }
#endif // PRODUCT

  void retire() {
    guarantee(use_local_bitmaps, "invariant");
    assert(fields_well_formed(), "invariant");

    if (_start_word != NULL) {
      CMBitMap*       mark_bitmap = _cm->nextMarkBitMap();

      // this means that the bitmap was set up for the GCLab
      assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");

      mark_bitmap->mostly_disjoint_range_union(this,
                                0, // always start from the start of the bitmap
                                _start_word,
1810
                                gclab_real_word_size());
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
      _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));

#ifndef PRODUCT
      if (use_local_bitmaps && verify_local_bitmaps)
        verify();
#endif // PRODUCT
    } else {
      assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
    }
  }

1822 1823
  size_t bitmap_size_in_words() const {
    return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
1824
  }
1825

1826 1827 1828 1829 1830
};

class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
  bool        _retired;
1831
  bool        _should_mark_objects;
1832 1833 1834
  GCLabBitMap _bitmap;

public:
1835
  G1ParGCAllocBuffer(size_t gclab_word_size);
1836 1837 1838

  inline bool mark(HeapWord* addr) {
    guarantee(use_local_bitmaps, "invariant");
1839
    assert(_should_mark_objects, "invariant");
1840 1841 1842 1843
    return _bitmap.mark(addr);
  }

  inline void set_buf(HeapWord* buf) {
1844
    if (use_local_bitmaps && _should_mark_objects) {
1845
      _bitmap.set_buffer(buf);
1846
    }
1847 1848 1849 1850 1851 1852 1853
    ParGCAllocBuffer::set_buf(buf);
    _retired = false;
  }

  inline void retire(bool end_of_gc, bool retain) {
    if (_retired)
      return;
1854
    if (use_local_bitmaps && _should_mark_objects) {
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
      _bitmap.retire();
    }
    ParGCAllocBuffer::retire(end_of_gc, retain);
    _retired = true;
  }
};

class G1ParScanThreadState : public StackObj {
protected:
  G1CollectedHeap* _g1h;
  RefToScanQueue*  _refs;
  DirtyCardQueue   _dcq;
  CardTableModRefBS* _ct_bs;
  G1RemSet* _g1_rem;

1870 1871 1872 1873
  G1ParGCAllocBuffer  _surviving_alloc_buffer;
  G1ParGCAllocBuffer  _tenured_alloc_buffer;
  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
  ageTable            _age_table;
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884

  size_t           _alloc_buffer_waste;
  size_t           _undo_waste;

  OopsInHeapRegionClosure*      _evac_failure_cl;
  G1ParScanHeapEvacClosure*     _evac_cl;
  G1ParScanPartialArrayClosure* _partial_scan_cl;

  int _hash_seed;
  int _queue_num;

1885
  size_t _term_attempts;
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898

  double _start;
  double _start_strong_roots;
  double _strong_roots_time;
  double _start_term;
  double _term_time;

  // Map from young-age-index (0 == not young, 1 is youngest) to
  // surviving words. base is what we get back from the malloc call
  size_t* _surviving_young_words_base;
  // this points into the array, as we use the first few entries for padding
  size_t* _surviving_young_words;

1899
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936

  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }

  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }

  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
  CardTableModRefBS* ctbs()                      { return _ct_bs; }

  template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
    if (!from->is_survivor()) {
      _g1_rem->par_write_ref(from, p, tid);
    }
  }

  template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
    // If the new value of the field points to the same region or
    // is the to-space, we don't need to include it in the Rset updates.
    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
      size_t card_index = ctbs()->index_for(p);
      // If the card hasn't been added to the buffer, do it.
      if (ctbs()->mark_card_deferred(card_index)) {
        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
      }
    }
  }

public:
  G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);

  ~G1ParScanThreadState() {
    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
  }

  RefToScanQueue*   refs()            { return _refs;             }
  ageTable*         age_table()       { return &_age_table;       }

  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1937
    return _alloc_buffers[purpose];
1938 1939
  }

1940 1941
  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
  size_t undo_waste() const                      { return _undo_waste; }
1942 1943

#ifdef ASSERT
1944 1945 1946 1947
  bool verify_ref(narrowOop* ref) const;
  bool verify_ref(oop* ref) const;
  bool verify_task(StarTask ref) const;
#endif // ASSERT
1948

1949 1950 1951
  template <class T> void push_on_queue(T* ref) {
    assert(verify_ref(ref), "sanity");
    refs()->push(ref);
1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
  }

  template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
    if (G1DeferredRSUpdate) {
      deferred_rs_update(from, p, tid);
    } else {
      immediate_rs_update(from, p, tid);
    }
  }

  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {

    HeapWord* obj = NULL;
1965 1966
    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1967
      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1968 1969
      assert(gclab_word_size == alloc_buf->word_sz(),
             "dynamic resizing is not supported");
1970 1971 1972
      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
      alloc_buf->retire(false, false);

1973
      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
      if (buf == NULL) return NULL; // Let caller handle allocation failure.
      // Otherwise.
      alloc_buf->set_buf(buf);

      obj = alloc_buf->allocate(word_sz);
      assert(obj != NULL, "buffer was definitely big enough...");
    } else {
      obj = _g1h->par_allocate_during_gc(purpose, word_sz);
    }
    return obj;
  }

  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
    HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
    if (obj != NULL) return obj;
    return allocate_slow(purpose, word_sz);
  }

  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
    if (alloc_buffer(purpose)->contains(obj)) {
      assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
             "should contain whole object");
      alloc_buffer(purpose)->undo_allocation(obj, word_sz);
    } else {
      CollectedHeap::fill_with_object(obj, word_sz);
      add_to_undo_waste(word_sz);
    }
  }

  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
    _evac_failure_cl = evac_failure_cl;
  }
  OopsInHeapRegionClosure* evac_failure_closure() {
    return _evac_failure_cl;
  }

  void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
    _evac_cl = evac_cl;
  }

  void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
    _partial_scan_cl = partial_scan_cl;
  }

  int* hash_seed() { return &_hash_seed; }
  int  queue_num() { return _queue_num; }

2021
  size_t term_attempts() const  { return _term_attempts; }
2022
  void note_term_attempt() { _term_attempts++; }
2023 2024 2025 2026 2027 2028 2029

  void start_strong_roots() {
    _start_strong_roots = os::elapsedTime();
  }
  void end_strong_roots() {
    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
  }
2030
  double strong_roots_time() const { return _strong_roots_time; }
2031 2032 2033 2034 2035 2036 2037 2038

  void start_term_time() {
    note_term_attempt();
    _start_term = os::elapsedTime();
  }
  void end_term_time() {
    _term_time += (os::elapsedTime() - _start_term);
  }
2039
  double term_time() const { return _term_time; }
2040

2041
  double elapsed_time() const {
2042 2043 2044
    return os::elapsedTime() - _start;
  }

2045 2046 2047 2048 2049
  static void
    print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
  void
    print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;

2050 2051 2052 2053 2054 2055 2056 2057
  size_t* surviving_young_words() {
    // We add on to hide entry 0 which accumulates surviving words for
    // age -1 regions (i.e. non-young ones)
    return _surviving_young_words;
  }

  void retire_alloc_buffers() {
    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2058
      size_t waste = _alloc_buffers[ap]->words_remaining();
2059
      add_to_alloc_buffer_waste(waste);
2060
      _alloc_buffers[ap]->retire(true, false);
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
    }
  }

  template <class T> void deal_with_reference(T* ref_to_scan) {
    if (has_partial_array_mask(ref_to_scan)) {
      _partial_scan_cl->do_oop_nv(ref_to_scan);
    } else {
      // Note: we can use "raw" versions of "region_containing" because
      // "obj_to_scan" is definitely in the heap, and is not in a
      // humongous region.
      HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
      _evac_cl->set_region(r);
      _evac_cl->do_oop_nv(ref_to_scan);
    }
  }

2077 2078 2079 2080 2081 2082
  void deal_with_reference(StarTask ref) {
    assert(verify_task(ref), "sanity");
    if (ref.is_narrow()) {
      deal_with_reference((narrowOop*)ref);
    } else {
      deal_with_reference((oop*)ref);
2083 2084
    }
  }
2085 2086 2087

public:
  void trim_queue();
2088
};
2089 2090

#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP