concurrentMarkSweepGeneration.hpp 68.4 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP

#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/shared/gSpaceCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/generation.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/stack.inline.hpp"
#include "utilities/taskqueue.hpp"
#include "utilities/yieldingWorkgroup.hpp"

D
duke 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
// ConcurrentMarkSweepGeneration is in support of a concurrent
// mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
// style. We assume, for now, that this generation is always the
// seniormost generation (modulo the PermGeneration), and for simplicity
// in the first implementation, that this generation is a single compactible
// space. Neither of these restrictions appears essential, and will be
// relaxed in the future when more time is available to implement the
// greater generality (and there's a need for it).
//
// Concurrent mode failures are currently handled by
// means of a sliding mark-compact.

class CMSAdaptiveSizePolicy;
class CMSConcMarkingTask;
class CMSGCAdaptivePolicyCounters;
class ConcurrentMarkSweepGeneration;
class ConcurrentMarkSweepPolicy;
class ConcurrentMarkSweepThread;
class CompactibleFreeListSpace;
class FreeChunk;
class PromotionInfo;
class ScanMarkedObjectsAgainCarefullyClosure;

// A generic CMS bit map. It's the basis for both the CMS marking bit map
// as well as for the mod union table (in each case only a subset of the
// methods are used). This is essentially a wrapper around the BitMap class,
// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
// we have _shifter == 0. and for the mod union table we have
// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
// XXX 64-bit issues in BitMap?
class CMSBitMap VALUE_OBJ_CLASS_SPEC {
  friend class VMStructs;

  HeapWord* _bmStartWord;   // base address of range covered by map
  size_t    _bmWordSize;    // map size (in #HeapWords covered)
  const int _shifter;       // shifts to convert HeapWord to bit position
  VirtualSpace _virtual_space; // underlying the bit map
  BitMap    _bm;            // the bit map itself
 public:
  Mutex* const _lock;       // mutex protecting _bm;

 public:
  // constructor
  CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);

  // allocates the actual storage for the map
  bool allocate(MemRegion mr);
  // field getter
  Mutex* lock() const { return _lock; }
  // locking verifier convenience function
  void assert_locked() const PRODUCT_RETURN;

  // inquiries
  HeapWord* startWord()   const { return _bmStartWord; }
  size_t    sizeInWords() const { return _bmWordSize;  }
  size_t    sizeInBits()  const { return _bm.size();   }
  // the following is one past the last word in space
  HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }

  // reading marks
  bool isMarked(HeapWord* addr) const;
  bool par_isMarked(HeapWord* addr) const; // do not lock checks
  bool isUnmarked(HeapWord* addr) const;
  bool isAllClear() const;

  // writing marks
  void mark(HeapWord* addr);
  // For marking by parallel GC threads;
  // returns true if we did, false if another thread did
  bool par_mark(HeapWord* addr);

  void mark_range(MemRegion mr);
  void par_mark_range(MemRegion mr);
  void mark_large_range(MemRegion mr);
  void par_mark_large_range(MemRegion mr);
  void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
  void clear_range(MemRegion mr);
  void par_clear_range(MemRegion mr);
  void clear_large_range(MemRegion mr);
  void par_clear_large_range(MemRegion mr);
  void clear_all();
  void clear_all_incrementally();  // Not yet implemented!!

  NOT_PRODUCT(
    // checks the memory region for validity
    void region_invariant(MemRegion mr);
  )

  // iteration
  void iterate(BitMapClosure* cl) {
    _bm.iterate(cl);
  }
  void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
  void dirty_range_iterate_clear(MemRegionClosure* cl);
  void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);

  // auxiliary support for iteration
  HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
  HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
                                            HeapWord* end_addr) const;
  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
  HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
                                              HeapWord* end_addr) const;
  MemRegion getAndClearMarkedRegion(HeapWord* addr);
  MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
                                           HeapWord* end_addr);

  // conversion utilities
  HeapWord* offsetToHeapWord(size_t offset) const;
  size_t    heapWordToOffset(HeapWord* addr) const;
  size_t    heapWordDiffToOffsetDiff(size_t diff) const;

  // debugging
  // is this address range covered by the bit-map?
  NOT_PRODUCT(
    bool covers(MemRegion mr) const;
    bool covers(HeapWord* start, size_t size = 0) const;
  )
  void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
};

// Represents a marking stack used by the CMS collector.
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
class CMSMarkStack: public CHeapObj  {
  //
  friend class CMSCollector;   // to get at expasion stats further below
  //

  VirtualSpace _virtual_space;  // space for the stack
  oop*   _base;      // bottom of stack
  size_t _index;     // one more than last occupied index
  size_t _capacity;  // max #elements
  Mutex  _par_lock;  // an advisory lock used in case of parallel access
  NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run

 protected:
  size_t _hit_limit;      // we hit max stack size limit
  size_t _failed_double;  // we failed expansion before hitting limit

 public:
  CMSMarkStack():
    _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
    _hit_limit(0),
    _failed_double(0) {}

  bool allocate(size_t size);

  size_t capacity() const { return _capacity; }

  oop pop() {
    if (!isEmpty()) {
      return _base[--_index] ;
    }
    return NULL;
  }

  bool push(oop ptr) {
    if (isFull()) {
      return false;
    } else {
      _base[_index++] = ptr;
      NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
      return true;
    }
  }

  bool isEmpty() const { return _index == 0; }
  bool isFull()  const {
    assert(_index <= _capacity, "buffer overflow");
    return _index == _capacity;
  }

  size_t length() { return _index; }

  // "Parallel versions" of some of the above
  oop par_pop() {
    // lock and pop
    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
    return pop();
  }

  bool par_push(oop ptr) {
    // lock and push
    MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
    return push(ptr);
  }

  // Forcibly reset the stack, losing all of its contents.
  void reset() {
    _index = 0;
  }

  // Expand the stack, typically in response to an overflow condition
  void expand();

  // Compute the least valued stack element.
  oop least_value(HeapWord* low) {
     oop least = (oop)low;
     for (size_t i = 0; i < _index; i++) {
       least = MIN2(least, _base[i]);
     }
     return least;
  }

  // Exposed here to allow stack expansion in || case
  Mutex* par_lock() { return &_par_lock; }
};

class CardTableRS;
class CMSParGCThreadState;

class ModUnionClosure: public MemRegionClosure {
 protected:
  CMSBitMap* _t;
 public:
  ModUnionClosure(CMSBitMap* t): _t(t) { }
  void do_MemRegion(MemRegion mr);
};

class ModUnionClosurePar: public ModUnionClosure {
 public:
  ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
  void do_MemRegion(MemRegion mr);
};

// Survivor Chunk Array in support of parallelization of
// Survivor Space rescan.
class ChunkArray: public CHeapObj {
  size_t _index;
  size_t _capacity;
271
  size_t _overflows;
D
duke 已提交
272 273 274
  HeapWord** _array;   // storage for array

 public:
275
  ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
D
duke 已提交
276
  ChunkArray(HeapWord** a, size_t c):
277
    _index(0), _capacity(c), _overflows(0), _array(a) {}
D
duke 已提交
278 279 280 281 282 283 284 285

  HeapWord** array() { return _array; }
  void set_array(HeapWord** a) { _array = a; }

  size_t capacity() { return _capacity; }
  void set_capacity(size_t c) { _capacity = c; }

  size_t end() {
286 287 288
    assert(_index <= capacity(),
           err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
                   _index, _capacity));
D
duke 已提交
289 290 291 292 293 294 295 296 297 298
    return _index;
  }  // exclusive

  HeapWord* nth(size_t n) {
    assert(n < end(), "Out of bounds access");
    return _array[n];
  }

  void reset() {
    _index = 0;
299 300 301 302 303
    if (_overflows > 0 && PrintCMSStatistics > 1) {
      warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
              _capacity, _overflows);
    }
    _overflows = 0;
D
duke 已提交
304 305 306 307 308 309
  }

  void record_sample(HeapWord* p, size_t sz) {
    // For now we do not do anything with the size
    if (_index < _capacity) {
      _array[_index++] = p;
310 311 312 313 314 315
    } else {
      ++_overflows;
      assert(_index == _capacity,
             err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
                     "): out of bounds at overflow#" SIZE_FORMAT,
                     _index, _capacity, _overflows));
D
duke 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
    }
  }
};

//
// Timing, allocation and promotion statistics for gc scheduling and incremental
// mode pacing.  Most statistics are exponential averages.
//
class CMSStats VALUE_OBJ_CLASS_SPEC {
 private:
  ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.

  // The following are exponential averages with factor alpha:
  //   avg = (100 - alpha) * avg + alpha * cur_sample
  //
  //   The durations measure:  end_time[n] - start_time[n]
  //   The periods measure:    start_time[n] - start_time[n-1]
  //
  // The cms period and duration include only concurrent collections; time spent
  // in foreground cms collections due to System.gc() or because of a failure to
  // keep up are not included.
  //
  // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
  // real value, but is used only after the first period.  A value of 100 is
  // used for the first sample so it gets the entire weight.
  unsigned int _saved_alpha; // 0-100
  unsigned int _gc0_alpha;
  unsigned int _cms_alpha;

  double _gc0_duration;
  double _gc0_period;
  size_t _gc0_promoted;         // bytes promoted per gc0
  double _cms_duration;
  double _cms_duration_pre_sweep; // time from initiation to start of sweep
  double _cms_duration_per_mb;
  double _cms_period;
  size_t _cms_allocated;        // bytes of direct allocation per gc0 period

  // Timers.
  elapsedTimer _cms_timer;
  TimeStamp    _gc0_begin_time;
  TimeStamp    _cms_begin_time;
  TimeStamp    _cms_end_time;

  // Snapshots of the amount used in the CMS generation.
  size_t _cms_used_at_gc0_begin;
  size_t _cms_used_at_gc0_end;
  size_t _cms_used_at_cms_begin;

  // Used to prevent the duty cycle from being reduced in the middle of a cms
  // cycle.
  bool _allow_duty_cycle_reduction;

  enum {
    _GC0_VALID = 0x1,
    _CMS_VALID = 0x2,
    _ALL_VALID = _GC0_VALID | _CMS_VALID
  };

  unsigned int _valid_bits;

  unsigned int _icms_duty_cycle;        // icms duty cycle (0-100).

 protected:

  // Return a duty cycle that avoids wild oscillations, by limiting the amount
  // of change between old_duty_cycle and new_duty_cycle (the latter is treated
  // as a recommended value).
  static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
                                             unsigned int new_duty_cycle);
  unsigned int icms_update_duty_cycle_impl();

388 389 390 391 392
  // In support of adjusting of cms trigger ratios based on history
  // of concurrent mode failure.
  double cms_free_adjustment_factor(size_t free) const;
  void   adjust_cms_free_adjustment_factor(bool fail, size_t free);

D
duke 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
 public:
  CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
           unsigned int alpha = CMSExpAvgFactor);

  // Whether or not the statistics contain valid data; higher level statistics
  // cannot be called until this returns true (they require at least one young
  // gen and one cms cycle to have completed).
  bool valid() const;

  // Record statistics.
  void record_gc0_begin();
  void record_gc0_end(size_t cms_gen_bytes_used);
  void record_cms_begin();
  void record_cms_end();

  // Allow management of the cms timer, which must be stopped/started around
  // yield points.
  elapsedTimer& cms_timer()     { return _cms_timer; }
  void start_cms_timer()        { _cms_timer.start(); }
  void stop_cms_timer()         { _cms_timer.stop(); }

  // Basic statistics; units are seconds or bytes.
  double gc0_period() const     { return _gc0_period; }
  double gc0_duration() const   { return _gc0_duration; }
  size_t gc0_promoted() const   { return _gc0_promoted; }
  double cms_period() const          { return _cms_period; }
  double cms_duration() const        { return _cms_duration; }
  double cms_duration_per_mb() const { return _cms_duration_per_mb; }
  size_t cms_allocated() const       { return _cms_allocated; }

  size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}

  // Seconds since the last background cms cycle began or ended.
  double cms_time_since_begin() const;
  double cms_time_since_end() const;

  // Higher level statistics--caller must check that valid() returns true before
  // calling.

  // Returns bytes promoted per second of wall clock time.
  double promotion_rate() const;

  // Returns bytes directly allocated per second of wall clock time.
  double cms_allocation_rate() const;

  // Rate at which space in the cms generation is being consumed (sum of the
  // above two).
  double cms_consumption_rate() const;

  // Returns an estimate of the number of seconds until the cms generation will
  // fill up, assuming no collection work is done.
  double time_until_cms_gen_full() const;

  // Returns an estimate of the number of seconds remaining until
  // the cms generation collection should start.
  double time_until_cms_start() const;

  // End of higher level statistics.

  // Returns the cms incremental mode duty cycle, as a percentage (0-100).
  unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }

  // Update the duty cycle and return the new value.
  unsigned int icms_update_duty_cycle();

  // Debugging.
  void print_on(outputStream* st) const PRODUCT_RETURN;
  void print() const { print_on(gclog_or_tty); }
};

// A closure related to weak references processing which
// we embed in the CMSCollector, since we need to pass
// it to the reference processor for secondary filtering
// of references based on reachability of referent;
// see role of _is_alive_non_header closure in the
// ReferenceProcessor class.
// For objects in the CMS generation, this closure checks
// if the object is "live" (reachable). Used in weak
// reference processing.
class CMSIsAliveClosure: public BoolObjectClosure {
473
  const MemRegion  _span;
D
duke 已提交
474 475 476 477 478 479 480
  const CMSBitMap* _bit_map;

  friend class CMSCollector;
 public:
  CMSIsAliveClosure(MemRegion span,
                    CMSBitMap* bit_map):
    _span(span),
481 482 483 484
    _bit_map(bit_map) {
    assert(!span.is_empty(), "Empty span could spell trouble");
  }

D
duke 已提交
485 486 487
  void do_object(oop obj) {
    assert(false, "not to be invoked");
  }
488

D
duke 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
  bool do_object_b(oop obj);
};


// Implements AbstractRefProcTaskExecutor for CMS.
class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
public:

  CMSRefProcTaskExecutor(CMSCollector& collector)
    : _collector(collector)
  { }

  // Executes a task using worker threads.
  virtual void execute(ProcessTask& task);
  virtual void execute(EnqueueTask& task);
private:
  CMSCollector& _collector;
};


class CMSCollector: public CHeapObj {
  friend class VMStructs;
  friend class ConcurrentMarkSweepThread;
  friend class ConcurrentMarkSweepGeneration;
  friend class CompactibleFreeListSpace;
  friend class CMSParRemarkTask;
  friend class CMSConcMarkingTask;
  friend class CMSRefProcTaskProxy;
  friend class CMSRefProcTaskExecutor;
  friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
  friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
  friend class PushOrMarkClosure;             // to access _restart_addr
  friend class Par_PushOrMarkClosure;             // to access _restart_addr
  friend class MarkFromRootsClosure;          //  -- ditto --
                                              // ... and for clearing cards
  friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
                                              // ... and for clearing cards
  friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
  friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
  friend class PushAndMarkVerifyClosure;      //  -- ditto --
  friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
  friend class PushAndMarkClosure;            //  -- ditto --
  friend class Par_PushAndMarkClosure;        //  -- ditto --
  friend class CMSKeepAliveClosure;           //  -- ditto --
  friend class CMSDrainMarkingStackClosure;   //  -- ditto --
  friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
  NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
  friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
  friend class VM_CMS_Operation;
  friend class VM_CMS_Initial_Mark;
  friend class VM_CMS_Final_Remark;
540
  friend class TraceCMSMemoryManagerStats;
D
duke 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555

 private:
  jlong _time_of_last_gc;
  void update_time_of_last_gc(jlong now) {
    _time_of_last_gc = now;
  }

  OopTaskQueueSet* _task_queues;

  // Overflow list of grey objects, threaded through mark-word
  // Manipulated with CAS in the parallel/multi-threaded case.
  oop _overflow_list;
  // The following array-pair keeps track of mark words
  // displaced for accomodating overflow list above.
  // This code will likely be revisited under RFE#4922830.
556 557
  Stack<oop>     _preserved_oop_stack;
  Stack<markOop> _preserved_mark_stack;
D
duke 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572

  int*             _hash_seed;

  // In support of multi-threaded concurrent phases
  YieldingFlexibleWorkGang* _conc_workers;

  // Performance Counters
  CollectorCounters* _gc_counters;

  // Initialization Errors
  bool _completed_initialization;

  // In support of ExplicitGCInvokesConcurrent
  static   bool _full_gc_requested;
  unsigned int  _collection_count_start;
573

D
duke 已提交
574
  // Should we unload classes this concurrent cycle?
575 576 577 578 579
  bool _should_unload_classes;
  unsigned int  _concurrent_cycles_since_last_unload;
  unsigned int concurrent_cycles_since_last_unload() const {
    return _concurrent_cycles_since_last_unload;
  }
D
duke 已提交
580
  // Did we (allow) unload classes in the previous concurrent cycle?
581 582
  bool unloaded_classes_last_cycle() const {
    return concurrent_cycles_since_last_unload() == 0;
D
duke 已提交
583
  }
584 585 586 587 588
  // Root scanning options for perm gen
  int _roots_scanning_options;
  int roots_scanning_options() const      { return _roots_scanning_options; }
  void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
  void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
D
duke 已提交
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604

  // Verification support
  CMSBitMap     _verification_mark_bm;
  void verify_after_remark_work_1();
  void verify_after_remark_work_2();

  // true if any verification flag is on.
  bool _verifying;
  bool verifying() const { return _verifying; }
  void set_verifying(bool v) { _verifying = v; }

  // Collector policy
  ConcurrentMarkSweepPolicy* _collector_policy;
  ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }

  // XXX Move these to CMSStats ??? FIX ME !!!
605 606 607 608 609
  elapsedTimer _inter_sweep_timer;   // time between sweeps
  elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
  // padded decaying average estimates of the above
  AdaptivePaddedAverage _inter_sweep_estimate;
  AdaptivePaddedAverage _intra_sweep_estimate;
D
duke 已提交
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634

 protected:
  ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
  ConcurrentMarkSweepGeneration* _permGen; // perm gen
  MemRegion                      _span;    // span covering above two
  CardTableRS*                   _ct;      // card table

  // CMS marking support structures
  CMSBitMap     _markBitMap;
  CMSBitMap     _modUnionTable;
  CMSMarkStack  _markStack;
  CMSMarkStack  _revisitStack;            // used to keep track of klassKlass objects
                                          // to revisit
  CMSBitMap     _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.

  HeapWord*     _restart_addr; // in support of marking stack overflow
  void          lower_restart_addr(HeapWord* low);

  // Counters in support of marking stack / work queue overflow handling:
  // a non-zero value indicates certain types of overflow events during
  // the current CMS cycle and could lead to stack resizing efforts at
  // an opportune future time.
  size_t        _ser_pmc_preclean_ovflw;
  size_t        _ser_pmc_remark_ovflw;
  size_t        _par_pmc_remark_ovflw;
635
  size_t        _ser_kac_preclean_ovflw;
D
duke 已提交
636 637
  size_t        _ser_kac_ovflw;
  size_t        _par_kac_ovflw;
638
  NOT_PRODUCT(ssize_t _num_par_pushes;)
D
duke 已提交
639 640 641 642

  // ("Weak") Reference processing support
  ReferenceProcessor*            _ref_processor;
  CMSIsAliveClosure              _is_alive_closure;
643
      // keep this textually after _markBitMap and _span; c'tor dependency
D
duke 已提交
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662

  ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
  ModUnionClosure    _modUnionClosure;
  ModUnionClosurePar _modUnionClosurePar;

  // CMS abstract state machine
  // initial_state: Idling
  // next_state(Idling)            = {Marking}
  // next_state(Marking)           = {Precleaning, Sweeping}
  // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
  // next_state(AbortablePreclean) = {FinalMarking}
  // next_state(FinalMarking)      = {Sweeping}
  // next_state(Sweeping)          = {Resizing}
  // next_state(Resizing)          = {Resetting}
  // next_state(Resetting)         = {Idling}
  // The numeric values below are chosen so that:
  // . _collectorState <= Idling ==  post-sweep && pre-mark
  // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
  //                                            precleaning || abortablePrecleanb
663
 public:
D
duke 已提交
664 665 666 667 668 669 670 671 672 673 674
  enum CollectorState {
    Resizing            = 0,
    Resetting           = 1,
    Idling              = 2,
    InitialMarking      = 3,
    Marking             = 4,
    Precleaning         = 5,
    AbortablePreclean   = 6,
    FinalMarking        = 7,
    Sweeping            = 8
  };
675
 protected:
D
duke 已提交
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
  static CollectorState _collectorState;

  // State related to prologue/epilogue invocation for my generations
  bool _between_prologue_and_epilogue;

  // Signalling/State related to coordination between fore- and backgroud GC
  // Note: When the baton has been passed from background GC to foreground GC,
  // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
  static bool _foregroundGCIsActive;    // true iff foreground collector is active or
                                 // wants to go active
  static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
                                 // yet passed the baton to the foreground GC

  // Support for CMSScheduleRemark (abortable preclean)
  bool _abort_preclean;
  bool _start_sampling;

  int    _numYields;
  size_t _numDirtyCards;
695
  size_t _sweep_count;
D
duke 已提交
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
  // number of full gc's since the last concurrent gc.
  uint   _full_gcs_since_conc_gc;

  // occupancy used for bootstrapping stats
  double _bootstrap_occupancy;

  // timer
  elapsedTimer _timer;

  // Timing, allocation and promotion statistics, used for scheduling.
  CMSStats      _stats;

  // Allocation limits installed in the young gen, used only in
  // CMSIncrementalMode.  When an allocation in the young gen would cross one of
  // these limits, the cms generation is notified and the cms thread is started
  // or stopped, respectively.
  HeapWord*     _icms_start_limit;
  HeapWord*     _icms_stop_limit;

  enum CMS_op_type {
    CMS_op_checkpointRootsInitial,
    CMS_op_checkpointRootsFinal
  };

  void do_CMS_operation(CMS_op_type op);
  bool stop_world_and_do(CMS_op_type op);

  OopTaskQueueSet* task_queues() { return _task_queues; }
  int*             hash_seed(int i) { return &_hash_seed[i]; }
  YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }

  // Support for parallelizing Eden rescan in CMS remark phase
  void sample_eden(); // ... sample Eden space top

 private:
  // Support for parallelizing young gen rescan in CMS remark phase
  Generation* _young_gen;  // the younger gen
  HeapWord** _top_addr;    // ... Top of Eden
  HeapWord** _end_addr;    // ... End of Eden
  HeapWord** _eden_chunk_array; // ... Eden partitioning array
  size_t     _eden_chunk_index; // ... top (exclusive) of array
  size_t     _eden_chunk_capacity;  // ... max entries in array

  // Support for parallelizing survivor space rescan
  HeapWord** _survivor_chunk_array;
  size_t     _survivor_chunk_index;
  size_t     _survivor_chunk_capacity;
  size_t*    _cursor;
  ChunkArray* _survivor_plab_array;

  // Support for marking stack overflow handling
  bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
748 749 750
  bool par_take_from_overflow_list(size_t num,
                                   OopTaskQueue* to_work_q,
                                   int no_of_gc_threads);
D
duke 已提交
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
  void push_on_overflow_list(oop p);
  void par_push_on_overflow_list(oop p);
  // the following is, obviously, not, in general, "MT-stable"
  bool overflow_list_is_empty() const;

  void preserve_mark_if_necessary(oop p);
  void par_preserve_mark_if_necessary(oop p);
  void preserve_mark_work(oop p, markOop m);
  void restore_preserved_marks_if_any();
  NOT_PRODUCT(bool no_preserved_marks() const;)
  // in support of testing overflow code
  NOT_PRODUCT(int _overflow_counter;)
  NOT_PRODUCT(bool simulate_overflow();)       // sequential
  NOT_PRODUCT(bool par_simulate_overflow();)   // MT version

  // CMS work methods
  void checkpointRootsInitialWork(bool asynch); // initial checkpoint work

  // a return value of false indicates failure due to stack overflow
  bool markFromRootsWork(bool asynch);  // concurrent marking work

 public:   // FIX ME!!! only for testing
  bool do_marking_st(bool asynch);      // single-threaded marking
  bool do_marking_mt(bool asynch);      // multi-threaded  marking

 private:

  // concurrent precleaning work
  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
                                  ScanMarkedObjectsAgainCarefullyClosure* cl);
  size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
                             ScanMarkedObjectsAgainCarefullyClosure* cl);
  // Does precleaning work, returning a quantity indicative of
  // the amount of "useful work" done.
  size_t preclean_work(bool clean_refs, bool clean_survivors);
  void abortable_preclean(); // Preclean while looking for possible abort
  void initialize_sequential_subtasks_for_young_gen_rescan(int i);
  // Helper function for above; merge-sorts the per-thread plab samples
789
  void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
D
duke 已提交
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
  // Resets (i.e. clears) the per-thread plab sample vectors
  void reset_survivor_plab_arrays();

  // final (second) checkpoint work
  void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
                                bool init_mark_was_synchronous);
  // work routine for parallel version of remark
  void do_remark_parallel();
  // work routine for non-parallel version of remark
  void do_remark_non_parallel();
  // reference processing work routine (during second checkpoint)
  void refProcessingWork(bool asynch, bool clear_all_soft_refs);

  // concurrent sweeping work
  void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);

  // (concurrent) resetting of support data structures
  void reset(bool asynch);

  // Clear _expansion_cause fields of constituent generations
  void clear_expansion_cause();

  // An auxilliary method used to record the ends of
  // used regions of each generation to limit the extent of sweep
  void save_sweep_limits();

  // Resize the generations included in the collector.
  void compute_new_size();

  // A work method used by foreground collection to determine
  // what type of collection (compacting or not, continuing or fresh)
  // it should do.
  void decide_foreground_collection_type(bool clear_all_soft_refs,
    bool* should_compact, bool* should_start_over);

  // A work method used by the foreground collector to do
  // a mark-sweep-compact.
  void do_compaction_work(bool clear_all_soft_refs);

  // A work method used by the foreground collector to do
  // a mark-sweep, after taking over from a possibly on-going
  // concurrent mark-sweep collection.
  void do_mark_sweep_work(bool clear_all_soft_refs,
    CollectorState first_state, bool should_start_over);

  // If the backgrould GC is active, acquire control from the background
  // GC and do the collection.
  void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);

  // For synchronizing passing of control from background to foreground
  // GC.  waitForForegroundGC() is called by the background
  // collector.  It if had to wait for a foreground collection,
  // it returns true and the background collection should assume
  // that the collection was finished by the foreground
  // collector.
  bool waitForForegroundGC();

  // Incremental mode triggering:  recompute the icms duty cycle and set the
  // allocation limits in the young gen.
  void icms_update_allocation_limits();

  size_t block_size_using_printezis_bits(HeapWord* addr) const;
  size_t block_size_if_printezis_bits(HeapWord* addr) const;
  HeapWord* next_card_start_after_block(HeapWord* addr) const;

  void setup_cms_unloading_and_verification_state();
 public:
  CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
               ConcurrentMarkSweepGeneration* permGen,
               CardTableRS*                   ct,
               ConcurrentMarkSweepPolicy*     cp);
  ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }

  ReferenceProcessor* ref_processor() { return _ref_processor; }
  void ref_processor_init();

  Mutex* bitMapLock()        const { return _markBitMap.lock();    }
  static CollectorState abstract_state() { return _collectorState;  }

  bool should_abort_preclean() const; // Whether preclean should be aborted.
  size_t get_eden_used() const;
  size_t get_eden_capacity() const;

  ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }

  // locking checks
  NOT_PRODUCT(static bool have_cms_token();)

  // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
  bool shouldConcurrentCollect();

  void collect(bool   full,
               bool   clear_all_soft_refs,
               size_t size,
               bool   tlab);
  void collect_in_background(bool clear_all_soft_refs);
  void collect_in_foreground(bool clear_all_soft_refs);

  // In support of ExplicitGCInvokesConcurrent
  static void request_full_gc(unsigned int full_gc_count);
  // Should we unload classes in a particular concurrent cycle?
891 892
  bool should_unload_classes() const {
    return _should_unload_classes;
D
duke 已提交
893
  }
894
  bool update_should_unload_classes();
D
duke 已提交
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946

  void direct_allocated(HeapWord* start, size_t size);

  // Object is dead if not marked and current phase is sweeping.
  bool is_dead_obj(oop obj) const;

  // After a promotion (of "start"), do any necessary marking.
  // If "par", then it's being done by a parallel GC thread.
  // The last two args indicate if we need precise marking
  // and if so the size of the object so it can be dirtied
  // in its entirety.
  void promoted(bool par, HeapWord* start,
                bool is_obj_array, size_t obj_size);

  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
                                     size_t word_size);

  void getFreelistLocks() const;
  void releaseFreelistLocks() const;
  bool haveFreelistLocks() const;

  // GC prologue and epilogue
  void gc_prologue(bool full);
  void gc_epilogue(bool full);

  jlong time_of_last_gc(jlong now) {
    if (_collectorState <= Idling) {
      // gc not in progress
      return _time_of_last_gc;
    } else {
      // collection in progress
      return now;
    }
  }

  // Support for parallel remark of survivor space
  void* get_data_recorder(int thr_num);

  CMSBitMap* markBitMap()  { return &_markBitMap; }
  void directAllocated(HeapWord* start, size_t size);

  // main CMS steps and related support
  void checkpointRootsInitial(bool asynch);
  bool markFromRoots(bool asynch);  // a return value of false indicates failure
                                    // due to stack overflow
  void preclean();
  void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
                            bool init_mark_was_synchronous);
  void sweep(bool asynch);

  // Check that the currently executing thread is the expected
  // one (foreground collector or background collector).
947
  static void check_correct_thread_executing() PRODUCT_RETURN;
D
duke 已提交
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
  // XXXPERM void print_statistics()           PRODUCT_RETURN;

  bool is_cms_reachable(HeapWord* addr);

  // Performance Counter Support
  CollectorCounters* counters()    { return _gc_counters; }

  // timer stuff
  void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
  void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
  void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
  double  timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }

  int  yields()          { return _numYields; }
  void resetYields()     { _numYields = 0;    }
  void incrementYields() { _numYields++;      }
  void resetNumDirtyCards()               { _numDirtyCards = 0; }
  void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
  size_t  numDirtyCards()                 { return _numDirtyCards; }

  static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
  static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
  static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
  static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
972 973
  size_t sweep_count() const             { return _sweep_count; }
  void   increment_sweep_count()         { _sweep_count++; }
D
duke 已提交
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044

  // Timers/stats for gc scheduling and incremental mode pacing.
  CMSStats& stats() { return _stats; }

  // Convenience methods that check whether CMSIncrementalMode is enabled and
  // forward to the corresponding methods in ConcurrentMarkSweepThread.
  static void start_icms();
  static void stop_icms();    // Called at the end of the cms cycle.
  static void disable_icms(); // Called before a foreground collection.
  static void enable_icms();  // Called after a foreground collection.
  void icms_wait();          // Called at yield points.

  // Adaptive size policy
  CMSAdaptiveSizePolicy* size_policy();
  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();

  // debugging
  void verify(bool);
  bool verify_after_remark();
  void verify_ok_to_terminate() const PRODUCT_RETURN;
  void verify_work_stacks_empty() const PRODUCT_RETURN;
  void verify_overflow_empty() const PRODUCT_RETURN;

  // convenience methods in support of debugging
  static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
  HeapWord* block_start(const void* p) const PRODUCT_RETURN0;

  // accessors
  CMSMarkStack* verification_mark_stack() { return &_markStack; }
  CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }

  // Get the bit map with a perm gen "deadness" information.
  CMSBitMap* perm_gen_verify_bit_map()       { return &_perm_gen_verify_bit_map; }

  // Initialization errors
  bool completed_initialization() { return _completed_initialization; }
};

class CMSExpansionCause : public AllStatic  {
 public:
  enum Cause {
    _no_expansion,
    _satisfy_free_ratio,
    _satisfy_promotion,
    _satisfy_allocation,
    _allocate_par_lab,
    _allocate_par_spooling_space,
    _adaptive_size_policy
  };
  // Return a string describing the cause of the expansion.
  static const char* to_string(CMSExpansionCause::Cause cause);
};

class ConcurrentMarkSweepGeneration: public CardGeneration {
  friend class VMStructs;
  friend class ConcurrentMarkSweepThread;
  friend class ConcurrentMarkSweep;
  friend class CMSCollector;
 protected:
  static CMSCollector*       _collector; // the collector that collects us
  CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)

  // Performance Counters
  GenerationCounters*      _gen_counters;
  GSpaceCounters*          _space_counters;

  // Words directly allocated, used by CMSStats.
  size_t _direct_allocated_words;

  // Non-product stat counters
  NOT_PRODUCT(
1045 1046 1047 1048
    size_t _numObjectsPromoted;
    size_t _numWordsPromoted;
    size_t _numObjectsAllocated;
    size_t _numWordsAllocated;
D
duke 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
  )

  // Used for sizing decisions
  bool _incremental_collection_failed;
  bool incremental_collection_failed() {
    return _incremental_collection_failed;
  }
  void set_incremental_collection_failed() {
    _incremental_collection_failed = true;
  }
  void clear_incremental_collection_failed() {
    _incremental_collection_failed = false;
  }

1063 1064 1065 1066
  // accessors
  void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
  CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }

D
duke 已提交
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
 private:
  // For parallel young-gen GC support.
  CMSParGCThreadState** _par_gc_thread_states;

  // Reason generation was expanded
  CMSExpansionCause::Cause _expansion_cause;

  // In support of MinChunkSize being larger than min object size
  const double _dilatation_factor;

  enum CollectionTypes {
    Concurrent_collection_type          = 0,
    MS_foreground_collection_type       = 1,
    MSC_foreground_collection_type      = 2,
    Unknown_collection_type             = 3
  };

  CollectionTypes _debug_collection_type;

1086 1087 1088 1089
  // Fraction of current occupancy at which to start a CMS collection which
  // will collect this generation (at least).
  double _initiating_occupancy;

D
duke 已提交
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
 protected:
  // Shrink generation by specified size (returns false if unable to shrink)
  virtual void shrink_by(size_t bytes);

  // Update statistics for GC
  virtual void update_gc_stats(int level, bool full);

  // Maximum available space in the generation (including uncommitted)
  // space.
  size_t max_available() const;

1101 1102 1103 1104
  // getter and initializer for _initiating_occupancy field.
  double initiating_occupancy() const { return _initiating_occupancy; }
  void   init_initiating_occupancy(intx io, intx tr);

D
duke 已提交
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
 public:
  ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
                                int level, CardTableRS* ct,
                                bool use_adaptive_freelists,
                                FreeBlockDictionary::DictionaryChoice);

  // Accessors
  CMSCollector* collector() const { return _collector; }
  static void set_collector(CMSCollector* collector) {
    assert(_collector == NULL, "already set");
    _collector = collector;
  }
  CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }

  Mutex* freelistLock() const;

  virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }

  // Adaptive size policy
  CMSAdaptiveSizePolicy* size_policy();

  bool refs_discovery_is_atomic() const { return false; }
  bool refs_discovery_is_mt()     const {
    // Note: CMS does MT-discovery during the parallel-remark
    // phases. Use ReferenceProcessorMTMutator to make refs
    // discovery MT-safe during such phases or other parallel
    // discovery phases in the future. This may all go away
    // if/when we decide that refs discovery is sufficiently
    // rare that the cost of the CAS's involved is in the
    // noise. That's a measurement that should be done, and
    // the code simplified if that turns out to be the case.
    return false;
  }

  // Override
  virtual void ref_processor_init();

1142 1143 1144 1145 1146
  // Grow generation by specified size (returns false if unable to grow)
  bool grow_by(size_t bytes);
  // Grow generation to reserved size.
  bool grow_to_reserved();

D
duke 已提交
1147 1148 1149 1150 1151 1152
  void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }

  // Space enquiries
  size_t capacity() const;
  size_t used() const;
  size_t free() const;
1153
  double occupancy() const { return ((double)used())/((double)capacity()); }
D
duke 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
  size_t contiguous_available() const;
  size_t unsafe_max_alloc_nogc() const;

  // over-rides
  MemRegion used_region() const;
  MemRegion used_region_at_save_marks() const;

  // Does a "full" (forced) collection invoked on this generation collect
  // all younger generations as well? Note that the second conjunct is a
  // hack to allow the collection of the younger gen first if the flag is
  // set. This is better than using th policy's should_collect_gen0_first()
  // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
  virtual bool full_collects_younger_generations() const {
    return UseCMSCompactAtFullCollection && !CollectGen0First;
  }

  void space_iterate(SpaceClosure* blk, bool usedOnly = false);

  // Support for compaction
  CompactibleSpace* first_compaction_space() const;
  // Adjust quantites in the generation affected by
  // the compaction.
  void reset_after_compaction();

  // Allocation support
  HeapWord* allocate(size_t size, bool tlab);
  HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1181
  oop       promote(oop obj, size_t obj_size);
D
duke 已提交
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
  HeapWord* par_allocate(size_t size, bool tlab) {
    return allocate(size, tlab);
  }

  // Incremental mode triggering.
  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
                                     size_t word_size);

  // Used by CMSStats to track direct allocation.  The value is sampled and
  // reset after each young gen collection.
  size_t direct_allocated_words() const { return _direct_allocated_words; }
  void reset_direct_allocated_words()   { _direct_allocated_words = 0; }

  // Overrides for parallel promotion.
  virtual oop par_promote(int thread_num,
                          oop obj, markOop m, size_t word_sz);
  // This one should not be called for CMS.
  virtual void par_promote_alloc_undo(int thread_num,
                                      HeapWord* obj, size_t word_sz);
  virtual void par_promote_alloc_done(int thread_num);
  virtual void par_oop_since_save_marks_iterate_done(int thread_num);

1204
  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
D
duke 已提交
1205

1206 1207 1208 1209 1210
  // Inform this (non-young) generation that a promotion failure was
  // encountered during a collection of a younger generation that
  // promotes into this generation.
  virtual void promotion_failure_occurred();

D
duke 已提交
1211
  bool should_collect(bool full, size_t size, bool tlab);
1212 1213
  virtual bool should_concurrent_collect() const;
  virtual bool is_too_full() const;
D
duke 已提交
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
  void collect(bool   full,
               bool   clear_all_soft_refs,
               size_t size,
               bool   tlab);

  HeapWord* expand_and_allocate(size_t word_size,
                                bool tlab,
                                bool parallel = false);

  // GC prologue and epilogue
  void gc_prologue(bool full);
  void gc_prologue_work(bool full, bool registerClosure,
                        ModUnionClosure* modUnionClosure);
  void gc_epilogue(bool full);
  void gc_epilogue_work(bool full);

  // Time since last GC of this generation
  jlong time_of_last_gc(jlong now) {
    return collector()->time_of_last_gc(now);
  }
  void update_time_of_last_gc(jlong now) {
    collector()-> update_time_of_last_gc(now);
  }

  // Allocation failure
  void expand(size_t bytes, size_t expand_bytes,
    CMSExpansionCause::Cause cause);
1241
  virtual bool expand(size_t bytes, size_t expand_bytes);
D
duke 已提交
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
  void shrink(size_t bytes);
  HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
  bool expand_and_ensure_spooling_space(PromotionInfo* promo);

  // Iteration support and related enquiries
  void save_marks();
  bool no_allocs_since_save_marks();
  void object_iterate_since_last_GC(ObjectClosure* cl);
  void younger_refs_iterate(OopsInGenClosure* cl);

  // Iteration support specific to CMS generations
  void save_sweep_limit();

  // More iteration support
  virtual void oop_iterate(MemRegion mr, OopClosure* cl);
  virtual void oop_iterate(OopClosure* cl);
1258
  virtual void safe_object_iterate(ObjectClosure* cl);
D
duke 已提交
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
  virtual void object_iterate(ObjectClosure* cl);

  // Need to declare the full complement of closures, whether we'll
  // override them or not, or get message from the compiler:
  //   oop_since_save_marks_iterate_nv hides virtual function...
  #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
    void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
  ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)

  // Smart allocation  XXX -- move to CFLSpace?
  void setNearLargestChunk();
  bool isNearLargestChunk(HeapWord* addr);

  // Get the chunk at the end of the space.  Delagates to
  // the space.
  FreeChunk* find_chunk_at_end();

  // Overriding of unused functionality (sharing not yet supported with CMS)
  void pre_adjust_pointers();
  void post_compact();

  // Debugging
  void prepare_for_verify();
  void verify(bool allow_dirty);
  void print_statistics()               PRODUCT_RETURN;

  // Performance Counters support
  virtual void update_counters();
  virtual void update_counters(size_t used);
  void initialize_performance_counters();
  CollectorCounters* counters()  { return collector()->counters(); }

  // Support for parallel remark of survivor space
  void* get_data_recorder(int thr_num) {
    //Delegate to collector
    return collector()->get_data_recorder(thr_num);
  }

  // Printing
  const char* name() const;
  virtual const char* short_name() const { return "CMS"; }
  void        print() const;
  void printOccupancy(const char* s);
  bool must_be_youngest() const { return false; }
  bool must_be_oldest()   const { return true; }

  void compute_new_size();

  CollectionTypes debug_collection_type() { return _debug_collection_type; }
  void rotate_debug_collection_type();
};

class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {

  // Return the size policy from the heap's collector
  // policy casted to CMSAdaptiveSizePolicy*.
  CMSAdaptiveSizePolicy* cms_size_policy() const;

  // Resize the generation based on the adaptive size
  // policy.
  void resize(size_t cur_promo, size_t desired_promo);

  // Return the GC counters from the collector policy
  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();

  virtual void shrink_by(size_t bytes);

 public:
  virtual void compute_new_size();
  ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
                                  int level, CardTableRS* ct,
                                  bool use_adaptive_freelists,
                                  FreeBlockDictionary::DictionaryChoice
                                    dictionaryChoice) :
    ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
      use_adaptive_freelists, dictionaryChoice) {}

  virtual const char* short_name() const { return "ASCMS"; }
  virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }

  virtual void update_counters();
  virtual void update_counters(size_t used);
};

//
// Closures of various sorts used by CMS to accomplish its work
//

// This closure is used to check that a certain set of oops is empty.
class FalseClosure: public OopClosure {
 public:
1350 1351
  void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
  void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
D
duke 已提交
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
};

// This closure is used to do concurrent marking from the roots
// following the first checkpoint.
class MarkFromRootsClosure: public BitMapClosure {
  CMSCollector*  _collector;
  MemRegion      _span;
  CMSBitMap*     _bitMap;
  CMSBitMap*     _mut;
  CMSMarkStack*  _markStack;
  CMSMarkStack*  _revisitStack;
  bool           _yield;
  int            _skipBits;
  HeapWord*      _finger;
  HeapWord*      _threshold;
  DEBUG_ONLY(bool _verifying;)

 public:
  MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
                       CMSBitMap* bitMap,
                       CMSMarkStack*  markStack,
                       CMSMarkStack*  revisitStack,
                       bool should_yield, bool verifying = false);
1375
  bool do_bit(size_t offset);
D
duke 已提交
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
  void reset(HeapWord* addr);
  inline void do_yield_check();

 private:
  void scanOopsInOop(HeapWord* ptr);
  void do_yield_work();
};

// This closure is used to do concurrent multi-threaded
// marking from the roots following the first checkpoint.
// XXX This should really be a subclass of The serial version
// above, but i have not had the time to refactor things cleanly.
// That willbe done for Dolphin.
class Par_MarkFromRootsClosure: public BitMapClosure {
  CMSCollector*  _collector;
  MemRegion      _whole_span;
  MemRegion      _span;
  CMSBitMap*     _bit_map;
  CMSBitMap*     _mut;
  OopTaskQueue*  _work_queue;
  CMSMarkStack*  _overflow_stack;
  CMSMarkStack*  _revisit_stack;
  bool           _yield;
  int            _skip_bits;
  HeapWord*      _finger;
  HeapWord*      _threshold;
  CMSConcMarkingTask* _task;
 public:
  Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
                       MemRegion span,
                       CMSBitMap* bit_map,
                       OopTaskQueue* work_queue,
                       CMSMarkStack*  overflow_stack,
                       CMSMarkStack*  revisit_stack,
                       bool should_yield);
1411
  bool do_bit(size_t offset);
D
duke 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
  inline void do_yield_check();

 private:
  void scan_oops_in_oop(HeapWord* ptr);
  void do_yield_work();
  bool get_work_from_overflow_stack();
};

// The following closures are used to do certain kinds of verification of
// CMS marking.
class PushAndMarkVerifyClosure: public OopClosure {
  CMSCollector*    _collector;
  MemRegion        _span;
  CMSBitMap*       _verification_bm;
  CMSBitMap*       _cms_bm;
  CMSMarkStack*    _mark_stack;
1428 1429 1430 1431 1432 1433
 protected:
  void do_oop(oop p);
  template <class T> inline void do_oop_work(T *p) {
    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
    do_oop(obj);
  }
D
duke 已提交
1434 1435 1436 1437 1438 1439 1440
 public:
  PushAndMarkVerifyClosure(CMSCollector* cms_collector,
                           MemRegion span,
                           CMSBitMap* verification_bm,
                           CMSBitMap* cms_bm,
                           CMSMarkStack*  mark_stack);
  void do_oop(oop* p);
1441
  void do_oop(narrowOop* p);
D
duke 已提交
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
  // Deal with a stack overflow condition
  void handle_stack_overflow(HeapWord* lost);
};

class MarkFromRootsVerifyClosure: public BitMapClosure {
  CMSCollector*  _collector;
  MemRegion      _span;
  CMSBitMap*     _verification_bm;
  CMSBitMap*     _cms_bm;
  CMSMarkStack*  _mark_stack;
  HeapWord*      _finger;
  PushAndMarkVerifyClosure _pam_verify_closure;
 public:
  MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
                             CMSBitMap* verification_bm,
                             CMSBitMap* cms_bm,
                             CMSMarkStack*  mark_stack);
1459
  bool do_bit(size_t offset);
D
duke 已提交
1460 1461 1462 1463 1464 1465 1466 1467
  void reset(HeapWord* addr);
};


// This closure is used to check that a certain set of bits is
// "empty" (i.e. the bit vector doesn't have any 1-bits).
class FalseBitMapClosure: public BitMapClosure {
 public:
1468
  bool do_bit(size_t offset) {
D
duke 已提交
1469
    guarantee(false, "Should not have a 1 bit");
1470
    return true;
D
duke 已提交
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
  }
};

// This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It's invoked via
// MarkFromDirtyCardsClosure below. It uses either
// [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
// declared in genOopClosures.hpp to accomplish some of its work.
// In the parallel case the bitMap is shared, so access to
// it needs to be suitably synchronized for updates by embedded
// closures that update it; however, this closure itself only
// reads the bit_map and because it is idempotent, is immune to
// reading stale values.
class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
  #ifdef ASSERT
    CMSCollector*          _collector;
    MemRegion              _span;
    union {
      CMSMarkStack*        _mark_stack;
      OopTaskQueue*        _work_queue;
    };
  #endif // ASSERT
  bool                       _parallel;
  CMSBitMap*                 _bit_map;
  union {
    MarkRefsIntoAndScanClosure*     _scan_closure;
    Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
  };

 public:
  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
                                MemRegion span,
                                ReferenceProcessor* rp,
                                CMSBitMap* bit_map,
                                CMSMarkStack*  mark_stack,
                                CMSMarkStack*  revisit_stack,
                                MarkRefsIntoAndScanClosure* cl):
    #ifdef ASSERT
      _collector(collector),
      _span(span),
      _mark_stack(mark_stack),
    #endif // ASSERT
    _parallel(false),
    _bit_map(bit_map),
    _scan_closure(cl) { }

  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
                                MemRegion span,
                                ReferenceProcessor* rp,
                                CMSBitMap* bit_map,
                                OopTaskQueue* work_queue,
                                CMSMarkStack* revisit_stack,
                                Par_MarkRefsIntoAndScanClosure* cl):
    #ifdef ASSERT
      _collector(collector),
      _span(span),
      _work_queue(work_queue),
    #endif // ASSERT
    _parallel(true),
    _bit_map(bit_map),
    _par_scan_closure(cl) { }

  void do_object(oop obj) {
    guarantee(false, "Call do_object_b(oop, MemRegion) instead");
  }
  bool do_object_b(oop obj) {
    guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
    return false;
  }
  bool do_object_bm(oop p, MemRegion mr);
};

// This closure is used during the second checkpointing phase
// to rescan the marked objects on the dirty cards in the mod
// union table and the card table proper. It invokes
// ScanMarkedObjectsAgainClosure above to accomplish much of its work.
// In the parallel case, the bit map is shared and requires
// synchronized access.
class MarkFromDirtyCardsClosure: public MemRegionClosure {
  CompactibleFreeListSpace*      _space;
  ScanMarkedObjectsAgainClosure  _scan_cl;
  size_t                         _num_dirty_cards;

 public:
  MarkFromDirtyCardsClosure(CMSCollector* collector,
                            MemRegion span,
                            CompactibleFreeListSpace* space,
                            CMSBitMap* bit_map,
                            CMSMarkStack* mark_stack,
                            CMSMarkStack* revisit_stack,
                            MarkRefsIntoAndScanClosure* cl):
    _space(space),
    _num_dirty_cards(0),
    _scan_cl(collector, span, collector->ref_processor(), bit_map,
                 mark_stack, revisit_stack, cl) { }

  MarkFromDirtyCardsClosure(CMSCollector* collector,
                            MemRegion span,
                            CompactibleFreeListSpace* space,
                            CMSBitMap* bit_map,
                            OopTaskQueue* work_queue,
                            CMSMarkStack* revisit_stack,
                            Par_MarkRefsIntoAndScanClosure* cl):
    _space(space),
    _num_dirty_cards(0),
    _scan_cl(collector, span, collector->ref_processor(), bit_map,
             work_queue, revisit_stack, cl) { }

  void do_MemRegion(MemRegion mr);
  void set_space(CompactibleFreeListSpace* space) { _space = space; }
  size_t num_dirty_cards() { return _num_dirty_cards; }
};

// This closure is used in the non-product build to check
// that there are no MemRegions with a certain property.
class FalseMemRegionClosure: public MemRegionClosure {
  void do_MemRegion(MemRegion mr) {
    guarantee(!mr.is_empty(), "Shouldn't be empty");
    guarantee(false, "Should never be here");
  }
};

// This closure is used during the precleaning phase
// to "carefully" rescan marked objects on dirty cards.
// It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
// to accomplish some of its work.
class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
  CMSCollector*                  _collector;
  MemRegion                      _span;
  bool                           _yield;
  Mutex*                         _freelistLock;
  CMSBitMap*                     _bitMap;
  CMSMarkStack*                  _markStack;
  MarkRefsIntoAndScanClosure*    _scanningClosure;

 public:
  ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
                                         MemRegion     span,
                                         CMSBitMap* bitMap,
                                         CMSMarkStack*  markStack,
                                         CMSMarkStack*  revisitStack,
                                         MarkRefsIntoAndScanClosure* cl,
                                         bool should_yield):
    _collector(collector),
    _span(span),
    _yield(should_yield),
    _bitMap(bitMap),
    _markStack(markStack),
    _scanningClosure(cl) {
  }

  void do_object(oop p) {
    guarantee(false, "call do_object_careful instead");
  }

  size_t      do_object_careful(oop p) {
    guarantee(false, "Unexpected caller");
    return 0;
  }

  size_t      do_object_careful_m(oop p, MemRegion mr);

  void setFreelistLock(Mutex* m) {
    _freelistLock = m;
    _scanningClosure->set_freelistLock(m);
  }

 private:
  inline bool do_yield_check();

  void do_yield_work();
};

class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
  CMSCollector*                  _collector;
  MemRegion                      _span;
  bool                           _yield;
  CMSBitMap*                     _bit_map;
  CMSMarkStack*                  _mark_stack;
  PushAndMarkClosure*            _scanning_closure;
  unsigned int                   _before_count;

 public:
  SurvivorSpacePrecleanClosure(CMSCollector* collector,
                               MemRegion     span,
                               CMSBitMap*    bit_map,
                               CMSMarkStack* mark_stack,
                               PushAndMarkClosure* cl,
                               unsigned int  before_count,
                               bool          should_yield):
    _collector(collector),
    _span(span),
    _yield(should_yield),
    _bit_map(bit_map),
    _mark_stack(mark_stack),
    _scanning_closure(cl),
    _before_count(before_count)
  { }

  void do_object(oop p) {
    guarantee(false, "call do_object_careful instead");
  }

  size_t      do_object_careful(oop p);

  size_t      do_object_careful_m(oop p, MemRegion mr) {
    guarantee(false, "Unexpected caller");
    return 0;
  }

 private:
  inline void do_yield_check();
  void do_yield_work();
};

// This closure is used to accomplish the sweeping work
// after the second checkpoint but before the concurrent reset
// phase.
//
// Terminology
//   left hand chunk (LHC) - block of one or more chunks currently being
//     coalesced.  The LHC is available for coalescing with a new chunk.
//   right hand chunk (RHC) - block that is currently being swept that is
//     free or garbage that can be coalesced with the LHC.
// _inFreeRange is true if there is currently a LHC
// _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
// _freeRangeInFreeLists is true if the LHC is in the free lists.
// _freeFinger is the address of the current LHC
class SweepClosure: public BlkClosureCareful {
  CMSCollector*                  _collector;  // collector doing the work
  ConcurrentMarkSweepGeneration* _g;    // Generation being swept
  CompactibleFreeListSpace*      _sp;   // Space being swept
  HeapWord*                      _limit;
  Mutex*                         _freelistLock; // Free list lock (in space)
  CMSBitMap*                     _bitMap;       // Marking bit map (in
                                                // generation)
  bool                           _inFreeRange;  // Indicates if we are in the
                                                // midst of a free run
  bool                           _freeRangeInFreeLists;
                                        // Often, we have just found
                                        // a free chunk and started
                                        // a new free range; we do not
                                        // eagerly remove this chunk from
                                        // the free lists unless there is
                                        // a possibility of coalescing.
                                        // When true, this flag indicates
                                        // that the _freeFinger below
                                        // points to a potentially free chunk
                                        // that may still be in the free lists
  bool                           _lastFreeRangeCoalesced;
                                        // free range contains chunks
                                        // coalesced
  bool                           _yield;
                                        // Whether sweeping should be
                                        // done with yields. For instance
                                        // when done by the foreground
                                        // collector we shouldn't yield.
  HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
                                                // pointer to the "left hand
                                                // chunk"
  size_t                         _freeRangeSize;
                                        // When _inFreeRange is set, this
                                        // indicates the accumulated size
                                        // of the "left hand chunk"
  NOT_PRODUCT(
    size_t                       _numObjectsFreed;
    size_t                       _numWordsFreed;
    size_t                       _numObjectsLive;
    size_t                       _numWordsLive;
    size_t                       _numObjectsAlreadyFree;
    size_t                       _numWordsAlreadyFree;
    FreeChunk*                   _last_fc;
  )
 private:
  // Code that is common to a free chunk or garbage when
  // encountered during sweeping.
  void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
                                  size_t chunkSize);
  // Process a free chunk during sweeping.
  void doAlreadyFreeChunk(FreeChunk *fc);
  // Process a garbage chunk during sweeping.
  size_t doGarbageChunk(FreeChunk *fc);
  // Process a live chunk during sweeping.
  size_t doLiveChunk(FreeChunk* fc);

  // Accessors.
  HeapWord* freeFinger() const          { return _freeFinger; }
  void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
  size_t freeRangeSize() const          { return _freeRangeSize; }
  void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
  bool inFreeRange()    const           { return _inFreeRange; }
  void set_inFreeRange(bool v)          { _inFreeRange = v; }
  bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
  void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
  bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
  void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }

  // Initialize a free range.
  void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
  // Return this chunk to the free lists.
  void flushCurFreeChunk(HeapWord* chunk, size_t size);

  // Check if we should yield and do so when necessary.
  inline void do_yield_check(HeapWord* addr);

  // Yield
  void do_yield_work(HeapWord* addr);

  // Debugging/Printing
  void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;

 public:
  SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
               CMSBitMap* bitMap, bool should_yield);
  ~SweepClosure();

  size_t       do_blk_careful(HeapWord* addr);
};

// Closures related to weak references processing

// During CMS' weak reference processing, this is a
// work-routine/closure used to complete transitive
// marking of objects as live after a certain point
// in which an initial set has been completely accumulated.
1797 1798 1799
// This closure is currently used both during the final
// remark stop-world phase, as well as during the concurrent
// precleaning of the discovered reference lists.
D
duke 已提交
1800 1801 1802 1803 1804 1805
class CMSDrainMarkingStackClosure: public VoidClosure {
  CMSCollector*        _collector;
  MemRegion            _span;
  CMSMarkStack*        _mark_stack;
  CMSBitMap*           _bit_map;
  CMSKeepAliveClosure* _keep_alive;
1806
  bool                 _concurrent_precleaning;
D
duke 已提交
1807 1808 1809
 public:
  CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1810 1811
                      CMSKeepAliveClosure* keep_alive,
                      bool cpc):
D
duke 已提交
1812 1813 1814 1815
    _collector(collector),
    _span(span),
    _bit_map(bit_map),
    _mark_stack(mark_stack),
1816 1817 1818 1819 1820
    _keep_alive(keep_alive),
    _concurrent_precleaning(cpc) {
    assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
           "Mismatch");
  }
D
duke 已提交
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835

  void do_void();
};

// A parallel version of CMSDrainMarkingStackClosure above.
class CMSParDrainMarkingStackClosure: public VoidClosure {
  CMSCollector*           _collector;
  MemRegion               _span;
  OopTaskQueue*           _work_queue;
  CMSBitMap*              _bit_map;
  CMSInnerParMarkAndPushClosure _mark_and_push;

 public:
  CMSParDrainMarkingStackClosure(CMSCollector* collector,
                                 MemRegion span, CMSBitMap* bit_map,
1836
                                 CMSMarkStack* revisit_stack,
D
duke 已提交
1837 1838 1839 1840 1841
                                 OopTaskQueue* work_queue):
    _collector(collector),
    _span(span),
    _bit_map(bit_map),
    _work_queue(work_queue),
1842
    _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
D
duke 已提交
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892

 public:
  void trim_queue(uint max);
  void do_void();
};

// Allow yielding or short-circuiting of reference list
// prelceaning work.
class CMSPrecleanRefsYieldClosure: public YieldClosure {
  CMSCollector* _collector;
  void do_yield_work();
 public:
  CMSPrecleanRefsYieldClosure(CMSCollector* collector):
    _collector(collector) {}
  virtual bool should_return();
};


// Convenience class that locks free list locks for given CMS collector
class FreelistLocker: public StackObj {
 private:
  CMSCollector* _collector;
 public:
  FreelistLocker(CMSCollector* collector):
    _collector(collector) {
    _collector->getFreelistLocks();
  }

  ~FreelistLocker() {
    _collector->releaseFreelistLocks();
  }
};

// Mark all dead objects in a given space.
class MarkDeadObjectsClosure: public BlkClosure {
  const CMSCollector*             _collector;
  const CompactibleFreeListSpace* _sp;
  CMSBitMap*                      _live_bit_map;
  CMSBitMap*                      _dead_bit_map;
public:
  MarkDeadObjectsClosure(const CMSCollector* collector,
                         const CompactibleFreeListSpace* sp,
                         CMSBitMap *live_bit_map,
                         CMSBitMap *dead_bit_map) :
    _collector(collector),
    _sp(sp),
    _live_bit_map(live_bit_map),
    _dead_bit_map(dead_bit_map) {}
  size_t do_blk(HeapWord* addr);
};
1893 1894 1895 1896 1897 1898 1899 1900

class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {

 public:
  TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase);
  TraceCMSMemoryManagerStats();
};

1901 1902

#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP