genCollectedHeap.hpp 20.0 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32
#ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP
#define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP

#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/generation.hpp"
#include "memory/sharedHeap.hpp"

D
duke 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
class SubTasksDone;

// A "GenCollectedHeap" is a SharedHeap that uses generational
// collection.  It is represented with a sequence of Generation's.
class GenCollectedHeap : public SharedHeap {
  friend class GenCollectorPolicy;
  friend class Generation;
  friend class DefNewGeneration;
  friend class TenuredGeneration;
  friend class ConcurrentMarkSweepGeneration;
  friend class CMSCollector;
  friend class GenMarkSweep;
  friend class VM_GenCollectForAllocation;
  friend class VM_GenCollectFull;
  friend class VM_GenCollectFullConcurrent;
  friend class VM_GC_HeapInspection;
  friend class VM_HeapDumper;
  friend class HeapInspection;
  friend class GCCauseSetter;
  friend class VMStructs;
public:
  enum SomeConstants {
    max_gens = 10
  };

  friend class VM_PopulateDumpSharedSpace;

 protected:
  // Fields:
  static GenCollectedHeap* _gch;

 private:
  int _n_gens;
  Generation* _gens[max_gens];
  GenerationSpec** _gen_specs;

  // The generational collector policy.
  GenCollectorPolicy* _gen_policy;

72 73 74 75
  // Indicates that the most recent previous incremental collection failed.
  // The flag is cleared when an action is taken that might clear the
  // condition that caused that incremental collection to fail.
  bool _incremental_collection_failed;
D
duke 已提交
76 77 78 79 80

  // In support of ExplicitGCInvokesConcurrent functionality
  unsigned int _full_collections_completed;

  // Data structure for claiming the (potentially) parallel tasks in
81
  // (gen-specific) roots processing.
82
  SubTasksDone* _process_strong_tasks;
D
duke 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108

  // In block contents verification, the number of header words to skip
  NOT_PRODUCT(static size_t _skip_header_HeapWords;)

protected:
  // Helper functions for allocation
  HeapWord* attempt_allocation(size_t size,
                               bool   is_tlab,
                               bool   first_only);

  // Helper function for two callbacks below.
  // Considers collection of the first max_level+1 generations.
  void do_collection(bool   full,
                     bool   clear_all_soft_refs,
                     size_t size,
                     bool   is_tlab,
                     int    max_level);

  // Callback from VM_GenCollectForAllocation operation.
  // This function does everything necessary/possible to satisfy an
  // allocation request that failed in the youngest generation that should
  // have handled it (including collection, expansion, etc.)
  HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);

  // Callback from VM_GenCollectFull operation.
  // Perform a full collection of the first max_level+1 generations.
109
  virtual void do_full_collection(bool clear_all_soft_refs);
D
duke 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122
  void do_full_collection(bool clear_all_soft_refs, int max_level);

  // Does the "cause" of GC indicate that
  // we absolutely __must__ clear soft refs?
  bool must_clear_all_soft_refs();

public:
  GenCollectedHeap(GenCollectorPolicy *policy);

  GCStats* gc_stats(int level) const;

  // Returns JNI_OK on success
  virtual jint initialize();
123
  char* allocate(size_t alignment,
D
duke 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
                 size_t* _total_reserved, int* _n_covered_regions,
                 ReservedSpace* heap_rs);

  // Does operations required after initialization has been done.
  void post_initialize();

  // Initialize ("weak") refs processing support
  virtual void ref_processing_init();

  virtual CollectedHeap::Name kind() const {
    return CollectedHeap::GenCollectedHeap;
  }

  // The generational collector policy.
  GenCollectorPolicy* gen_policy() const { return _gen_policy; }
139
  virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
D
duke 已提交
140 141 142 143 144 145

  // Adaptive size policy
  virtual AdaptiveSizePolicy* size_policy() {
    return gen_policy()->size_policy();
  }

146 147 148 149 150
  // Return the (conservative) maximum heap alignment
  static size_t conservative_max_heap_alignment() {
    return Generation::GenGrain;
  }

D
duke 已提交
151 152 153
  size_t capacity() const;
  size_t used() const;

154 155
  // Save the "used_region" for generations level and lower.
  void save_used_regions(int level);
D
duke 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

  size_t max_capacity() const;

  HeapWord* mem_allocate(size_t size,
                         bool*  gc_overhead_limit_was_exceeded);

  // We may support a shared contiguous allocation area, if the youngest
  // generation does.
  bool supports_inline_contig_alloc() const;
  HeapWord** top_addr() const;
  HeapWord** end_addr() const;

  // Does this heap support heap inspection? (+PrintClassHistogram)
  virtual bool supports_heap_inspection() const { return true; }

  // Perform a full collection of the heap; intended for use in implementing
  // "System.gc". This implies as full a collection as the CollectedHeap
  // supports. Caller does not hold the Heap_lock on entry.
  void collect(GCCause::Cause cause);

  // The same as above but assume that the caller holds the Heap_lock.
  void collect_locked(GCCause::Cause cause);

  // Perform a full collection of the first max_level+1 generations.
  // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
  void collect(GCCause::Cause cause, int max_level);

S
stefank 已提交
183
  // Returns "TRUE" iff "p" points into the committed areas of the heap.
D
duke 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
  // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
  // be expensive to compute in general, so, to prevent
  // their inadvertent use in product jvm's, we restrict their use to
  // assertion checking or verification only.
  bool is_in(const void* p) const;

  // override
  bool is_in_closed_subset(const void* p) const {
    if (UseConcMarkSweepGC) {
      return is_in_reserved(p);
    } else {
      return is_in(p);
    }
  }

199 200 201 202 203 204 205 206 207 208 209 210
  // Returns true if the reference is to an object in the reserved space
  // for the young generation.
  // Assumes the the young gen address range is less than that of the old gen.
  bool is_in_young(oop p);

#ifdef ASSERT
  virtual bool is_in_partial_collection(const void* p);
#endif

  virtual bool is_scavengable(const void* addr) {
    return is_in_young((oop)addr);
  }
D
duke 已提交
211 212

  // Iteration functions.
213
  void oop_iterate(ExtendedOopClosure* cl);
D
duke 已提交
214
  void object_iterate(ObjectClosure* cl);
215
  void safe_object_iterate(ObjectClosure* cl);
D
duke 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  Space* space_containing(const void* addr) const;

  // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
  // each address in the (reserved) heap is a member of exactly
  // one block.  The defining characteristic of a block is that it is
  // possible to find its size, and thus to progress forward to the next
  // block.  (Blocks may be of different sizes.)  Thus, blocks may
  // represent Java objects, or they might be free blocks in a
  // free-list-based heap (or subheap), as long as the two kinds are
  // distinguishable and the size of each is determinable.

  // Returns the address of the start of the "block" that contains the
  // address "addr".  We say "blocks" instead of "object" since some heaps
  // may not pack objects densely; a chunk may either be an object or a
  // non-object.
  virtual HeapWord* block_start(const void* addr) const;

  // Requires "addr" to be the start of a chunk, and returns its size.
  // "addr + size" is required to be the start of a new chunk, or the end
  // of the active area of the heap. Assumes (and verifies in non-product
  // builds) that addr is in the allocated part of the heap and is
  // the start of a chunk.
  virtual size_t block_size(const HeapWord* addr) const;

  // Requires "addr" to be the start of a block, and returns "TRUE" iff
  // the block is an object. Assumes (and verifies in non-product
  // builds) that addr is in the allocated part of the heap and is
  // the start of a chunk.
  virtual bool block_is_obj(const HeapWord* addr) const;

  // Section on TLAB's.
  virtual bool supports_tlab_allocation() const;
  virtual size_t tlab_capacity(Thread* thr) const;
B
brutisso 已提交
249
  virtual size_t tlab_used(Thread* thr) const;
D
duke 已提交
250 251 252
  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
  virtual HeapWord* allocate_new_tlab(size_t size);

253 254 255 256 257 258 259
  // Can a compiler initialize a new object without store barriers?
  // This permission only extends from the creation of a new object
  // via a TLAB up to the first subsequent safepoint.
  virtual bool can_elide_tlab_store_barriers() const {
    return true;
  }

260 261 262 263
  virtual bool card_mark_must_follow_store() const {
    return UseConcMarkSweepGC;
  }

264 265 266 267 268 269
  // We don't need barriers for stores to objects in the
  // young gen and, a fortiori, for initializing stores to
  // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
  // only and may need to be re-examined in case other
  // kinds of collectors are implemented in the future.
  virtual bool can_elide_initializing_store_barrier(oop new_obj) {
270 271 272 273 274
    // We wanted to assert that:-
    // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
    //       "Check can_elide_initializing_store_barrier() for this collector");
    // but unfortunately the flag UseSerialGC need not necessarily always
    // be set when DefNew+Tenured are being used.
275
    return is_in_young(new_obj);
276 277
  }

D
duke 已提交
278 279 280 281 282 283 284
  // The "requestor" generation is performing some garbage collection
  // action for which it would be useful to have scratch space.  The
  // requestor promises to allocate no more than "max_alloc_words" in any
  // older generation (via promotion say.)   Any blocks of space that can
  // be provided are returned as a list of ScratchBlocks, sorted by
  // decreasing size.
  ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
285 286 287
  // Allow each generation to reset any scratch space that it has
  // contributed as it needs.
  void release_scratch();
D
duke 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

  // Ensure parsability: override
  virtual void ensure_parsability(bool retire_tlabs);

  // Time in ms since the longest time a collector ran in
  // in any generation.
  virtual jlong millis_since_last_gc();

  // Total number of full collections completed.
  unsigned int total_full_collections_completed() {
    assert(_full_collections_completed <= _total_full_collections,
           "Can't complete more collections than were started");
    return _full_collections_completed;
  }

  // Update above counter, as appropriate, at the end of a stop-world GC cycle
  unsigned int update_full_collections_completed();
  // Update above counter, as appropriate, at the end of a concurrent GC cycle
  unsigned int update_full_collections_completed(unsigned int count);

  // Update "time of last gc" for all constituent generations
  // to "now".
  void update_time_of_last_gc(jlong now) {
    for (int i = 0; i < _n_gens; i++) {
      _gens[i]->update_time_of_last_gc(now);
    }
  }

  // Update the gc statistics for each generation.
  // "level" is the level of the lastest collection
  void update_gc_stats(int current_level, bool full) {
    for (int i = 0; i < _n_gens; i++) {
      _gens[i]->update_gc_stats(current_level, full);
    }
  }

  // Override.
  bool no_gc_in_progress() { return !is_gc_active(); }

  // Override.
  void prepare_for_verify();

  // Override.
331
  void verify(bool silent, VerifyOption option);
D
duke 已提交
332 333

  // Override.
334
  virtual void print_on(outputStream* st) const;
D
duke 已提交
335 336 337
  virtual void print_gc_threads_on(outputStream* st) const;
  virtual void gc_threads_do(ThreadClosure* tc) const;
  virtual void print_tracing_info() const;
338
  virtual void print_on_error(outputStream* st) const;
D
duke 已提交
339 340 341 342 343 344 345 346 347 348 349 350 351

  // PrintGC, PrintGCDetails support
  void print_heap_change(size_t prev_used) const;

  // The functions below are helper functions that a subclass of
  // "CollectedHeap" can use in the implementation of its virtual
  // functions.

  class GenClosure : public StackObj {
   public:
    virtual void do_generation(Generation* gen) = 0;
  };

352 353
  // Apply "cl.do_generation" to all generations in the heap
  // If "old_to_young" determines the order.
D
duke 已提交
354 355 356 357
  void generation_iterate(GenClosure* cl, bool old_to_young);

  void space_iterate(SpaceClosure* cl);

358
  // Return "true" if all generations have reached the
D
duke 已提交
359 360 361 362
  // maximal committed limit that they can reach, without a garbage
  // collection.
  virtual bool is_maximal_no_gc() const;

363
  // Return the generation before "gen".
D
duke 已提交
364 365
  Generation* prev_gen(Generation* gen) const {
    int l = gen->level();
366 367
    guarantee(l > 0, "Out of bounds");
    return _gens[l-1];
D
duke 已提交
368 369
  }

370
  // Return the generation after "gen".
D
duke 已提交
371 372
  Generation* next_gen(Generation* gen) const {
    int l = gen->level() + 1;
373 374
    guarantee(l < _n_gens, "Out of bounds");
    return _gens[l];
D
duke 已提交
375 376 377
  }

  Generation* get_gen(int i) const {
378 379
    guarantee(i >= 0 && i < _n_gens, "Out of bounds");
    return _gens[i];
D
duke 已提交
380 381 382 383 384 385 386 387 388 389 390
  }

  int n_gens() const {
    assert(_n_gens == gen_policy()->number_of_generations(), "Sanity");
    return _n_gens;
  }

  // Convenience function to be used in situations where the heap type can be
  // asserted to be this type.
  static GenCollectedHeap* heap();

391
  void set_par_threads(uint t);
392
  void set_n_termination(uint t);
D
duke 已提交
393 394 395 396 397 398 399 400 401

  // Invoke the "do_oop" method of one of the closures "not_older_gens"
  // or "older_gens" on root locations for the generation at
  // "level".  (The "older_gens" closure is used for scanning references
  // from older generations; "not_older_gens" is used everywhere else.)
  // If "younger_gens_as_roots" is false, younger generations are
  // not scanned as roots; in this case, the caller must be arranging to
  // scan the younger generations itself.  (For example, a generation might
  // explicitly mark reachable objects in younger generations, to avoid
402 403
  // excess storage retention.)
  // The "so" argument determines which of the roots
D
duke 已提交
404 405
  // the closure is applied to:
  // "SO_None" does none;
406 407 408 409 410 411
  enum ScanningOption {
    SO_None                =  0x0,
    SO_AllCodeCache        =  0x8,
    SO_ScavengeCodeCache   = 0x10
  };

412
 private:
413 414 415 416 417 418 419 420
  void process_roots(bool activate_scope,
                     ScanningOption so,
                     OopClosure* strong_roots,
                     OopClosure* weak_roots,
                     CLDClosure* strong_cld_closure,
                     CLDClosure* weak_cld_closure,
                     CodeBlobClosure* code_roots);

421 422 423
  void gen_process_roots(int level,
                         bool younger_gens_as_roots,
                         bool activate_scope,
424
                         ScanningOption so,
425 426 427 428 429 430 431 432 433 434 435 436 437 438
                         OopsInGenClosure* not_older_gens,
                         OopsInGenClosure* weak_roots,
                         OopsInGenClosure* older_gens,
                         CLDClosure* cld_closure,
                         CLDClosure* weak_cld_closure,
                         CodeBlobClosure* code_closure);

 public:
  static const bool StrongAndWeakRoots = false;
  static const bool StrongRootsOnly    = true;

  void gen_process_roots(int level,
                         bool younger_gens_as_roots,
                         bool activate_scope,
439
                         ScanningOption so,
440 441 442 443
                         bool only_strong_roots,
                         OopsInGenClosure* not_older_gens,
                         OopsInGenClosure* older_gens,
                         CLDClosure* cld_closure);
D
duke 已提交
444

445 446 447 448
  // Apply "root_closure" to all the weak roots of the system.
  // These include JNI weak roots, string table,
  // and referents of reachable weak refs.
  void gen_process_weak_roots(OopClosure* root_closure);
D
duke 已提交
449 450 451 452 453 454 455 456

  // Set the saved marks of generations, if that makes sense.
  // In particular, if any generation might iterate over the oops
  // in other generations, it should call this method.
  void save_marks();

  // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
  // allocated since the last call to save_marks in generations at or above
457
  // "level".  The "cur" closure is
D
duke 已提交
458
  // applied to references in the generation at "level", and the "older"
459
  // closure to older generations.
D
duke 已提交
460 461 462 463 464 465 466 467 468 469
#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix)    \
  void oop_since_save_marks_iterate(int level,                          \
                                    OopClosureType* cur,                \
                                    OopClosureType* older);

  ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)

#undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL

  // Returns "true" iff no allocations have occurred in any generation at
470
  // "level" or above since the last
D
duke 已提交
471 472 473
  // call to "save_marks".
  bool no_allocs_since_save_marks(int level);

474
  // Returns true if an incremental collection is likely to fail.
475 476 477 478
  // We optionally consult the young gen, if asked to do so;
  // otherwise we base our answer on whether the previous incremental
  // collection attempt failed with no corrective action as of yet.
  bool incremental_collection_will_fail(bool consult_young) {
479 480 481 482 483
    // Assumes a 2-generation system; the first disjunct remembers if an
    // incremental collection failed, even when we thought (second disjunct)
    // that it would not.
    assert(heap()->collector_policy()->is_two_generation_policy(),
           "the following definition may not be suitable for an n(>2)-generation system");
484 485
    return incremental_collection_failed() ||
           (consult_young && !get_gen(0)->collection_attempt_is_safe());
D
duke 已提交
486 487
  }

488 489 490 491
  // If a generation bails out of an incremental collection,
  // it sets this flag.
  bool incremental_collection_failed() const {
    return _incremental_collection_failed;
D
duke 已提交
492
  }
493 494
  void set_incremental_collection_failed() {
    _incremental_collection_failed = true;
D
duke 已提交
495
  }
496 497
  void clear_incremental_collection_failed() {
    _incremental_collection_failed = false;
D
duke 已提交
498 499
  }

500
  // Promotion of obj into gen failed.  Try to promote obj to higher
D
duke 已提交
501
  // gens in ascending order; return the new location of obj if successful.
502 503 504
  // Otherwise, try expand-and-allocate for obj in both the young and old
  // generation; return the new location of obj if successful.  Otherwise, return NULL.
  oop handle_failed_promotion(Generation* old_gen,
D
duke 已提交
505
                              oop obj,
506
                              size_t obj_size);
D
duke 已提交
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534

private:
  // Accessor for memory state verification support
  NOT_PRODUCT(
    static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
  )

  // Override
  void check_for_non_bad_heap_word_value(HeapWord* addr,
    size_t size) PRODUCT_RETURN;

  // For use by mark-sweep.  As implemented, mark-sweep-compact is global
  // in an essential way: compaction is performed across generations, by
  // iterating over spaces.
  void prepare_for_compaction();

  // Perform a full collection of the first max_level+1 generations.
  // This is the low level interface used by the public versions of
  // collect() and collect_locked(). Caller holds the Heap_lock on entry.
  void collect_locked(GCCause::Cause cause, int max_level);

  // Returns success or failure.
  bool create_cms_collector();

  // In support of ExplicitGCInvokesConcurrent functionality
  bool should_do_concurrent_full_gc(GCCause::Cause cause);
  void collect_mostly_concurrent(GCCause::Cause cause);

535 536 537
  // Save the tops of the spaces in all generations
  void record_gen_tops_before_GC() PRODUCT_RETURN;

D
duke 已提交
538 539 540 541
protected:
  virtual void gc_prologue(bool full);
  virtual void gc_epilogue(bool full);
};
542 543

#endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP