sharedHeap.hpp 9.1 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30
#ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
#define SHARE_VM_MEMORY_SHAREDHEAP_HPP

#include "gc_interface/collectedHeap.hpp"
#include "memory/generation.hpp"

D
duke 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
// A "SharedHeap" is an implementation of a java heap for HotSpot.  This
// is an abstract class: there may be many different kinds of heaps.  This
// class defines the functions that a heap must implement, and contains
// infrastructure common to all heaps.

class Generation;
class BarrierSet;
class GenRemSet;
class Space;
class SpaceClosure;
class OopClosure;
class OopsInGenClosure;
class ObjectClosure;
class SubTasksDone;
class WorkGang;
46
class FlexibleWorkGang;
D
duke 已提交
47
class CollectorPolicy;
48
class KlassClosure;
D
duke 已提交
49

50 51 52 53 54 55 56 57 58 59 60 61 62 63
// Note on use of FlexibleWorkGang's for GC.
// There are three places where task completion is determined.
// In
//    1) ParallelTaskTerminator::offer_termination() where _n_threads
//    must be set to the correct value so that count of workers that
//    have offered termination will exactly match the number
//    working on the task.  Tasks such as those derived from GCTask
//    use ParallelTaskTerminator's.  Tasks that want load balancing
//    by work stealing use this method to gauge completion.
//    2) SubTasksDone has a variable _n_threads that is used in
//    all_tasks_completed() to determine completion.  all_tasks_complete()
//    counts the number of tasks that have been done and then reset
//    the SubTasksDone so that it can be used again.  When the number of
//    tasks is set to the number of GC workers, then _n_threads must
64 65
//    be set to the number of active GC workers. G1RootProcessor and
//    GenCollectedHeap have SubTasksDone.
66 67 68 69
//    3) SequentialSubTasksDone has an _n_threads that is used in
//    a way similar to SubTasksDone and has the same dependency on the
//    number of active GC workers.  CompactibleFreeListSpace and Space
//    have SequentialSubTasksDone's.
70 71 72 73 74 75
//
// Examples of using SubTasksDone and SequentialSubTasksDone:
//  G1RootProcessor and GenCollectedHeap::process_roots() use
//  SubTasksDone* _process_strong_tasks to claim tasks for workers
//
//  GenCollectedHeap::gen_process_roots() calls
76
//      rem_set()->younger_refs_iterate()
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
//  to scan the card table and which eventually calls down into
//  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
//  uses SequentialSubTasksDone* _pst to claim tasks.
//  Both SubTasksDone and SequentialSubTasksDone call their method
//  all_tasks_completed() to count the number of GC workers that have
//  finished their work.  That logic is "when all the workers are
//  finished the tasks are finished".
//
//  The pattern that appears  in the code is to set _n_threads
//  to a value > 1 before a task that you would like executed in parallel
//  and then to set it to 0 after that task has completed.  A value of
//  0 is a "special" value in set_n_threads() which translates to
//  setting _n_threads to 1.
//
//  Some code uses _n_terminiation to decide if work should be done in
//  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
//  is an example of such code.  Look for variable "is_par" for other
//  examples.
//
//  The active_workers is not reset to 0 after a parallel phase.  It's
//  value may be used in later phases and in one instance at least
//  (the parallel remark) it has to be used (the parallel remark depends
//  on the partitioning done in the previous parallel scavenge).

D
duke 已提交
101 102 103
class SharedHeap : public CollectedHeap {
  friend class VMStructs;

104 105 106
  friend class VM_GC_Operation;
  friend class VM_CGC_Operation;

D
duke 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
protected:
  // There should be only a single instance of "SharedHeap" in a program.
  // This is enforced with the protected constructor below, which will also
  // set the static pointer "_sh" to that instance.
  static SharedHeap* _sh;

  // and the Gen Remembered Set, at least one good enough to scan the perm
  // gen.
  GenRemSet* _rem_set;

  // A gc policy, controls global gc resource issues
  CollectorPolicy *_collector_policy;

  // See the discussion below, in the specification of the reader function
  // for this variable.
  int _strong_roots_parity;

  // If we're doing parallel GC, use this gang of threads.
125
  FlexibleWorkGang* _workers;
D
duke 已提交
126 127 128 129 130

  // Full initialization is done in a concrete subtype's "initialize"
  // function.
  SharedHeap(CollectorPolicy* policy_);

131 132 133 134 135 136 137 138
  // Returns true if the calling thread holds the heap lock,
  // or the calling thread is a par gc thread and the heap_lock is held
  // by the vm thread doing a gc operation.
  bool heap_lock_held_for_gc();
  // True if the heap_lock is held by the a non-gc thread invoking a gc
  // operation.
  bool _thread_holds_heap_lock_for_gc;

D
duke 已提交
139 140 141 142 143 144 145 146 147 148 149 150
public:
  static SharedHeap* heap() { return _sh; }

  void set_barrier_set(BarrierSet* bs);

  // Does operations required after initialization has been done.
  virtual void post_initialize();

  // Initialization of ("weak") reference processing support
  virtual void ref_processing_init();

  // This function returns the "GenRemSet" object that allows us to scan
151
  // generations in a fully generational heap.
D
duke 已提交
152 153 154
  GenRemSet* rem_set() { return _rem_set; }

  // Iteration functions.
155
  void oop_iterate(ExtendedOopClosure* cl) = 0;
D
duke 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

  // Iterate over all spaces in use in the heap, in an undefined order.
  virtual void space_iterate(SpaceClosure* cl) = 0;

  // A SharedHeap will contain some number of spaces.  This finds the
  // space whose reserved area contains the given address, or else returns
  // NULL.
  virtual Space* space_containing(const void* addr) const = 0;

  bool no_gc_in_progress() { return !is_gc_active(); }

  // Some collectors will perform "process_strong_roots" in parallel.
  // Such a call will involve claiming some fine-grained tasks, such as
  // scanning of threads.  To make this process simpler, we provide the
  // "strong_roots_parity()" method.  Collectors that start parallel tasks
  // whose threads invoke "process_strong_roots" must
  // call "change_strong_roots_parity" in sequential code starting such a
  // task.  (This also means that a parallel thread may only call
  // process_strong_roots once.)
  //
176
  // For calls to process_roots by sequential code, the parity is
D
duke 已提交
177 178 179 180
  // updated automatically.
  //
  // The idea is that objects representing fine-grained tasks, such as
  // threads, will contain a "parity" field.  A task will is claimed in the
181
  // current "process_roots" call only if its parity field is the
D
duke 已提交
182 183 184 185 186 187 188 189 190 191
  // same as the "strong_roots_parity"; task claiming is accomplished by
  // updating the parity field to the strong_roots_parity with a CAS.
  //
  // If the client meats this spec, then strong_roots_parity() will have
  // the following properties:
  //   a) to return a different value than was returned before the last
  //      call to change_strong_roots_parity, and
  //   c) to never return a distinguished value (zero) with which such
  //      task-claiming variables may be initialized, to indicate "never
  //      claimed".
192
 public:
D
duke 已提交
193 194
  int strong_roots_parity() { return _strong_roots_parity; }

195
  // Call these in sequential code around process_roots.
196 197 198
  // strong_roots_prologue calls change_strong_roots_parity, if
  // parallel tasks are enabled.
  class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
199 200 201 202
    SharedHeap*   _sh;

   public:
    StrongRootsScope(SharedHeap* heap, bool activate = true);
203 204 205
  };
  friend class StrongRootsScope;

206 207 208 209
 private:
  void change_strong_roots_parity();

 public:
210
  FlexibleWorkGang* workers() const { return _workers; }
D
duke 已提交
211 212 213 214 215

  // The functions below are helper functions that a subclass of
  // "SharedHeap" can use in the implementation of its virtual
  // functions.

216
public:
D
duke 已提交
217 218 219 220 221

  // Do anything common to GC's.
  virtual void gc_prologue(bool full) = 0;
  virtual void gc_epilogue(bool full) = 0;

222
  // Sets the number of parallel threads that will be doing tasks
223
  // (such as process roots) subsequently.
224
  virtual void set_par_threads(uint t);
225

D
duke 已提交
226 227 228 229 230
  //
  // New methods from CollectedHeap
  //

  // Some utilities.
231 232
  void print_size_transition(outputStream* out,
                             size_t bytes_before,
D
duke 已提交
233 234 235
                             size_t bytes_after,
                             size_t capacity);
};
236 237

#endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP