space.hpp 54.4 KB
Newer Older
D
duke 已提交
1
/*
2
 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
D
duke 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
19 20 21
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
D
duke 已提交
22 23 24
 *
 */

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
#ifndef SHARE_VM_MEMORY_SPACE_HPP
#define SHARE_VM_MEMORY_SPACE_HPP

#include "memory/allocation.hpp"
#include "memory/blockOffsetTable.hpp"
#include "memory/cardTableModRefBS.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "memory/watermark.hpp"
#include "oops/markOop.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/prefetch.hpp"
#include "utilities/workgroup.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
N
never 已提交
47 48 49
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
50

D
duke 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
// A space is an abstraction for the "storage units" backing
// up the generation abstraction. It includes specific
// implementations for keeping track of free and used space,
// for iterating over objects and free blocks, etc.

// Here's the Space hierarchy:
//
// - Space               -- an asbtract base class describing a heap area
//   - CompactibleSpace  -- a space supporting compaction
//     - CompactibleFreeListSpace -- (used for CMS generation)
//     - ContiguousSpace -- a compactible space in which all free space
//                          is contiguous
//       - EdenSpace     -- contiguous space used as nursery
//         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
//       - OffsetTableContigSpace -- contiguous space with a block offset array
//                          that allows "fast" block_start calls
//         - TenuredSpace -- (used for TenuredGeneration)
//         - ContigPermSpace -- an offset table contiguous space for perm gen

// Forward decls.
class Space;
class BlockOffsetArray;
class BlockOffsetArrayContigSpace;
class Generation;
class CompactibleSpace;
class BlockOffsetTable;
class GenRemSet;
class CardTableRS;
class DirtyCardToOopClosure;

// An oop closure that is circumscribed by a filtering memory region.
82 83 84 85 86 87 88 89
class SpaceMemRegionOopsIterClosure: public OopClosure {
 private:
  OopClosure* _cl;
  MemRegion   _mr;
 protected:
  template <class T> void do_oop_work(T* p) {
    if (_mr.contains(p)) {
      _cl->do_oop(p);
D
duke 已提交
90 91
    }
  }
92 93 94 95 96
 public:
  SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
    _cl(cl), _mr(mr) {}
  virtual void do_oop(oop* p);
  virtual void do_oop(narrowOop* p);
D
duke 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
};

// A Space describes a heap area. Class Space is an abstract
// base class.
//
// Space supports allocation, size computation and GC support is provided.
//
// Invariant: bottom() and end() are on page_size boundaries and
// bottom() <= top() <= end()
// top() is inclusive and end() is exclusive.

class Space: public CHeapObj {
  friend class VMStructs;
 protected:
  HeapWord* _bottom;
  HeapWord* _end;

  // Used in support of save_marks()
  HeapWord* _saved_mark_word;

  MemRegionClosure* _preconsumptionDirtyCardClosure;

  // A sequential tasks done structure. This supports
  // parallel GC, where we have threads dynamically
  // claiming sub-tasks from a larger parallel task.
  SequentialSubTasksDone _par_seq_tasks;

  Space():
    _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }

 public:
  // Accessors
  HeapWord* bottom() const         { return _bottom; }
  HeapWord* end() const            { return _end;    }
  virtual void set_bottom(HeapWord* value) { _bottom = value; }
  virtual void set_end(HeapWord* value)    { _end = value; }

134
  virtual HeapWord* saved_mark_word() const  { return _saved_mark_word; }
135

D
duke 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
  void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }

  MemRegionClosure* preconsumptionDirtyCardClosure() const {
    return _preconsumptionDirtyCardClosure;
  }
  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
    _preconsumptionDirtyCardClosure = cl;
  }

  // Returns a subregion of the space containing all the objects in
  // the space.
  virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }

  // Returns a region that is guaranteed to contain (at least) all objects
  // allocated at the time of the last call to "save_marks".  If the space
  // initializes its DirtyCardToOopClosure's specifying the "contig" option
  // (that is, if the space is contiguous), then this region must contain only
  // such objects: the memregion will be from the bottom of the region to the
  // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
  // the space must distiguish between objects in the region allocated before
  // and after the call to save marks.
  virtual MemRegion used_region_at_save_marks() const {
    return MemRegion(bottom(), saved_mark_word());
  }

161 162 163 164 165
  // Initialization.
  // "initialize" should be called once on a space, before it is used for
  // any purpose.  The "mr" arguments gives the bounds of the space, and
  // the "clear_space" argument should be true unless the memory in "mr" is
  // known to be zeroed.
166
  virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
167 168 169

  // The "clear" method must be called on a region that may have
  // had allocation performed in it, but is now to be considered empty.
170
  virtual void clear(bool mangle_space);
D
duke 已提交
171 172 173 174 175 176

  // For detecting GC bugs.  Should only be called at GC boundaries, since
  // some unused space may be used as scratch space during GC's.
  // Default implementation does nothing. We also call this when expanding
  // a space to satisfy an allocation request. See bug #4668531
  virtual void mangle_unused_area() {}
177
  virtual void mangle_unused_area_complete() {}
D
duke 已提交
178 179 180 181 182 183 184 185 186 187 188 189
  virtual void mangle_region(MemRegion mr) {}

  // Testers
  bool is_empty() const              { return used() == 0; }
  bool not_empty() const             { return used() > 0; }

  // Returns true iff the given the space contains the
  // given address as part of an allocated object. For
  // ceratin kinds of spaces, this might be a potentially
  // expensive operation. To prevent performance problems
  // on account of its inadvertent use in product jvm's,
  // we restrict its use to assertion checks only.
S
stefank 已提交
190
  virtual bool is_in(const void* p) const = 0;
D
duke 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222

  // Returns true iff the given reserved memory of the space contains the
  // given address.
  bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }

  // Returns true iff the given block is not allocated.
  virtual bool is_free_block(const HeapWord* p) const = 0;

  // Test whether p is double-aligned
  static bool is_aligned(void* p) {
    return ((intptr_t)p & (sizeof(double)-1)) == 0;
  }

  // Size computations.  Sizes are in bytes.
  size_t capacity()     const { return byte_size(bottom(), end()); }
  virtual size_t used() const = 0;
  virtual size_t free() const = 0;

  // Iterate over all the ref-containing fields of all objects in the
  // space, calling "cl.do_oop" on each.  Fields in objects allocated by
  // applications of the closure are not included in the iteration.
  virtual void oop_iterate(OopClosure* cl);

  // Same as above, restricted to the intersection of a memory region and
  // the space.  Fields in objects allocated by applications of the closure
  // are not included in the iteration.
  virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0;

  // Iterate over all objects in the space, calling "cl.do_object" on
  // each.  Objects allocated by applications of the closure are not
  // included in the iteration.
  virtual void object_iterate(ObjectClosure* blk) = 0;
223 224 225
  // Similar to object_iterate() except only iterates over
  // objects whose internal references point to objects in the space.
  virtual void safe_object_iterate(ObjectClosure* blk) = 0;
D
duke 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

  // Iterate over all objects that intersect with mr, calling "cl->do_object"
  // on each.  There is an exception to this: if this closure has already
  // been invoked on an object, it may skip such objects in some cases.  This is
  // Most likely to happen in an "upwards" (ascending address) iteration of
  // MemRegions.
  virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);

  // Iterate over as many initialized objects in the space as possible,
  // calling "cl.do_object_careful" on each. Return NULL if all objects
  // in the space (at the start of the iteration) were iterated over.
  // Return an address indicating the extent of the iteration in the
  // event that the iteration had to return because of finding an
  // uninitialized object in the space, or if the closure "cl"
  // signalled early termination.
  virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
  virtual HeapWord* object_iterate_careful_m(MemRegion mr,
                                             ObjectClosureCareful* cl);

  // Create and return a new dirty card to oop closure. Can be
  // overriden to return the appropriate type of closure
  // depending on the type of space in which the closure will
  // operate. ResourceArea allocated.
  virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
                                             CardTableModRefBS::PrecisionStyle precision,
                                             HeapWord* boundary = NULL);

  // If "p" is in the space, returns the address of the start of the
  // "block" that contains "p".  We say "block" instead of "object" since
  // some heaps may not pack objects densely; a chunk may either be an
  // object or a non-object.  If "p" is not in the space, return NULL.
257 258 259 260 261 262 263
  virtual HeapWord* block_start_const(const void* p) const = 0;

  // The non-const version may have benevolent side effects on the data
  // structure supporting these calls, possibly speeding up future calls.
  // The default implementation, however, is simply to call the const
  // version.
  inline virtual HeapWord* block_start(const void* p);
D
duke 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308

  // Requires "addr" to be the start of a chunk, and returns its size.
  // "addr + size" is required to be the start of a new chunk, or the end
  // of the active area of the heap.
  virtual size_t block_size(const HeapWord* addr) const = 0;

  // Requires "addr" to be the start of a block, and returns "TRUE" iff
  // the block is an object.
  virtual bool block_is_obj(const HeapWord* addr) const = 0;

  // Requires "addr" to be the start of a block, and returns "TRUE" iff
  // the block is an object and the object is alive.
  virtual bool obj_is_alive(const HeapWord* addr) const;

  // Allocation (return NULL if full).  Assumes the caller has established
  // mutually exclusive access to the space.
  virtual HeapWord* allocate(size_t word_size) = 0;

  // Allocation (return NULL if full).  Enforces mutual exclusion internally.
  virtual HeapWord* par_allocate(size_t word_size) = 0;

  // Returns true if this object has been allocated since a
  // generation's "save_marks" call.
  virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;

  // Mark-sweep-compact support: all spaces can update pointers to objects
  // moving as a part of compaction.
  virtual void adjust_pointers();

  // PrintHeapAtGC support
  virtual void print() const;
  virtual void print_on(outputStream* st) const;
  virtual void print_short() const;
  virtual void print_short_on(outputStream* st) const;


  // Accessor for parallel sequential tasks.
  SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }

  // IF "this" is a ContiguousSpace, return it, else return NULL.
  virtual ContiguousSpace* toContiguousSpace() {
    return NULL;
  }

  // Debugging
309
  virtual void verify() const = 0;
D
duke 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
};

// A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
// OopClosure to (the addresses of) all the ref-containing fields that could
// be modified by virtue of the given MemRegion being dirty. (Note that
// because of the imprecise nature of the write barrier, this may iterate
// over oops beyond the region.)
// This base type for dirty card to oop closures handles memory regions
// in non-contiguous spaces with no boundaries, and should be sub-classed
// to support other space types. See ContiguousDCTOC for a sub-class
// that works with ContiguousSpaces.

class DirtyCardToOopClosure: public MemRegionClosureRO {
protected:
  OopClosure* _cl;
  Space* _sp;
  CardTableModRefBS::PrecisionStyle _precision;
  HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
                                // pointing below boundary.
329
  HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
D
duke 已提交
330 331 332 333 334
                                // a downwards traversal; this is the
                                // lowest location already done (or,
                                // alternatively, the lowest address that
                                // shouldn't be done again.  NULL means infinity.)
  NOT_PRODUCT(HeapWord* _last_bottom;)
335
  NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
D
duke 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

  // Get the actual top of the area on which the closure will
  // operate, given where the top is assumed to be (the end of the
  // memory region passed to do_MemRegion) and where the object
  // at the top is assumed to start. For example, an object may
  // start at the top but actually extend past the assumed top,
  // in which case the top becomes the end of the object.
  virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);

  // Walk the given memory region from bottom to (actual) top
  // looking for objects and applying the oop closure (_cl) to
  // them. The base implementation of this treats the area as
  // blocks, where a block may or may not be an object. Sub-
  // classes should override this to provide more accurate
  // or possibly more efficient walking.
  virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);

public:
  DirtyCardToOopClosure(Space* sp, OopClosure* cl,
                        CardTableModRefBS::PrecisionStyle precision,
                        HeapWord* boundary) :
    _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
    _min_done(NULL) {
359 360
    NOT_PRODUCT(_last_bottom = NULL);
    NOT_PRODUCT(_last_explicit_min_done = NULL);
D
duke 已提交
361 362 363 364 365 366
  }

  void do_MemRegion(MemRegion mr);

  void set_min_done(HeapWord* min_done) {
    _min_done = min_done;
367
    NOT_PRODUCT(_last_explicit_min_done = _min_done);
D
duke 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
  }
#ifndef PRODUCT
  void set_last_bottom(HeapWord* last_bottom) {
    _last_bottom = last_bottom;
  }
#endif
};

// A structure to represent a point at which objects are being copied
// during compaction.
class CompactPoint : public StackObj {
public:
  Generation* gen;
  CompactibleSpace* space;
  HeapWord* threshold;
  CompactPoint(Generation* _gen, CompactibleSpace* _space,
               HeapWord* _threshold) :
    gen(_gen), space(_space), threshold(_threshold) {}
};


// A space that supports compaction operations.  This is usually, but not
// necessarily, a space that is normally contiguous.  But, for example, a
// free-list-based space whose normal collection is a mark-sweep without
// compaction could still support compaction in full GC's.

class CompactibleSpace: public Space {
  friend class VMStructs;
  friend class CompactibleFreeListSpace;
  friend class CompactingPermGenGen;
  friend class CMSPermGenGen;
private:
  HeapWord* _compaction_top;
  CompactibleSpace* _next_compaction_space;

public:
404 405 406
  CompactibleSpace() :
   _compaction_top(NULL), _next_compaction_space(NULL) {}

407
  virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
T
Merge  
tonyp 已提交
408
  virtual void clear(bool mangle_space);
D
duke 已提交
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453

  // Used temporarily during a compaction phase to hold the value
  // top should have when compaction is complete.
  HeapWord* compaction_top() const { return _compaction_top;    }

  void set_compaction_top(HeapWord* value) {
    assert(value == NULL || (value >= bottom() && value <= end()),
      "should point inside space");
    _compaction_top = value;
  }

  // Perform operations on the space needed after a compaction
  // has been performed.
  virtual void reset_after_compaction() {}

  // Returns the next space (in the current generation) to be compacted in
  // the global compaction order.  Also is used to select the next
  // space into which to compact.

  virtual CompactibleSpace* next_compaction_space() const {
    return _next_compaction_space;
  }

  void set_next_compaction_space(CompactibleSpace* csp) {
    _next_compaction_space = csp;
  }

  // MarkSweep support phase2

  // Start the process of compaction of the current space: compute
  // post-compaction addresses, and insert forwarding pointers.  The fields
  // "cp->gen" and "cp->compaction_space" are the generation and space into
  // which we are currently compacting.  This call updates "cp" as necessary,
  // and leaves the "compaction_top" of the final value of
  // "cp->compaction_space" up-to-date.  Offset tables may be updated in
  // this phase as if the final copy had occurred; if so, "cp->threshold"
  // indicates when the next such action should be taken.
  virtual void prepare_for_compaction(CompactPoint* cp);
  // MarkSweep support phase3
  virtual void adjust_pointers();
  // MarkSweep support phase4
  virtual void compact();

  // The maximum percentage of objects that can be dead in the compacted
  // live part of a compacted space ("deadwood" support.)
454
  virtual size_t allowed_dead_ratio() const { return 0; };
D
duke 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535

  // Some contiguous spaces may maintain some data structures that should
  // be updated whenever an allocation crosses a boundary.  This function
  // returns the first such boundary.
  // (The default implementation returns the end of the space, so the
  // boundary is never crossed.)
  virtual HeapWord* initialize_threshold() { return end(); }

  // "q" is an object of the given "size" that should be forwarded;
  // "cp" names the generation ("gen") and containing "this" (which must
  // also equal "cp->space").  "compact_top" is where in "this" the
  // next object should be forwarded to.  If there is room in "this" for
  // the object, insert an appropriate forwarding pointer in "q".
  // If not, go to the next compaction space (there must
  // be one, since compaction must succeed -- we go to the first space of
  // the previous generation if necessary, updating "cp"), reset compact_top
  // and then forward.  In either case, returns the new value of "compact_top".
  // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
  // function of the then-current compaction space, and updates "cp->threshold
  // accordingly".
  virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
                    HeapWord* compact_top);

  // Return a size with adjusments as required of the space.
  virtual size_t adjust_object_size_v(size_t size) const { return size; }

protected:
  // Used during compaction.
  HeapWord* _first_dead;
  HeapWord* _end_of_live;

  // Minimum size of a free block.
  virtual size_t minimum_free_block_size() const = 0;

  // This the function is invoked when an allocation of an object covering
  // "start" to "end occurs crosses the threshold; returns the next
  // threshold.  (The default implementation does nothing.)
  virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
    return end();
  }

  // Requires "allowed_deadspace_words > 0", that "q" is the start of a
  // free block of the given "word_len", and that "q", were it an object,
  // would not move if forwared.  If the size allows, fill the free
  // block with an object, to prevent excessive compaction.  Returns "true"
  // iff the free region was made deadspace, and modifies
  // "allowed_deadspace_words" to reflect the number of available deadspace
  // words remaining after this operation.
  bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
                        size_t word_len);
};

#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
  /* Compute the new addresses for the live objects and store it in the mark \
   * Used by universe::mark_sweep_phase2()                                   \
   */                                                                        \
  HeapWord* compact_top; /* This is where we are currently compacting to. */ \
                                                                             \
  /* We're sure to be here before any objects are compacted into this        \
   * space, so this is a good time to initialize this:                       \
   */                                                                        \
  set_compaction_top(bottom());                                              \
                                                                             \
  if (cp->space == NULL) {                                                   \
    assert(cp->gen != NULL, "need a generation");                            \
    assert(cp->threshold == NULL, "just checking");                          \
    assert(cp->gen->first_compaction_space() == this, "just checking");      \
    cp->space = cp->gen->first_compaction_space();                           \
    compact_top = cp->space->bottom();                                       \
    cp->space->set_compaction_top(compact_top);                              \
    cp->threshold = cp->space->initialize_threshold();                       \
  } else {                                                                   \
    compact_top = cp->space->compaction_top();                               \
  }                                                                          \
                                                                             \
  /* We allow some amount of garbage towards the bottom of the space, so     \
   * we don't start compacting before there is a significant gain to be made.\
   * Occasionally, we want to ensure a full compaction, which is determined  \
   * by the MarkSweepAlwaysCompactCount parameter.                           \
   */                                                                        \
  int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
536 537
  bool skip_dead = (MarkSweepAlwaysCompactCount < 1)                         \
    ||((invocations % MarkSweepAlwaysCompactCount) != 0);                    \
D
duke 已提交
538 539 540
                                                                             \
  size_t allowed_deadspace = 0;                                              \
  if (skip_dead) {                                                           \
541
    const size_t ratio = allowed_dead_ratio();                               \
D
duke 已提交
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
    allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
  }                                                                          \
                                                                             \
  HeapWord* q = bottom();                                                    \
  HeapWord* t = scan_limit();                                                \
                                                                             \
  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
                                   live object. */                           \
  HeapWord*  first_dead = end();/* The first dead object. */                 \
  LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
                                   first header of preceding free area. */   \
  _first_dead = first_dead;                                                  \
                                                                             \
  const intx interval = PrefetchScanIntervalInBytes;                         \
                                                                             \
  while (q < t) {                                                            \
    assert(!block_is_obj(q) ||                                               \
           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
           oop(q)->mark()->has_bias_pattern(),                               \
           "these are the only valid states during a mark sweep");           \
    if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
      /* prefetch beyond q */                                                \
      Prefetch::write(q, interval);                                          \
      /* size_t size = oop(q)->size();  changing this for cms for perm gen */\
566
      size_t size = block_size(q);                                           \
D
duke 已提交
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
      compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
      q += size;                                                             \
      end_of_live = q;                                                       \
    } else {                                                                 \
      /* run over all the contiguous dead objects */                         \
      HeapWord* end = q;                                                     \
      do {                                                                   \
        /* prefetch beyond end */                                            \
        Prefetch::write(end, interval);                                      \
        end += block_size(end);                                              \
      } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
                                                                             \
      /* see if we might want to pretend this object is alive so that        \
       * we don't have to compact quite as often.                            \
       */                                                                    \
      if (allowed_deadspace > 0 && q == compact_top) {                       \
        size_t sz = pointer_delta(end, q);                                   \
        if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
          q = end;                                                           \
          end_of_live = end;                                                 \
          continue;                                                          \
        }                                                                    \
      }                                                                      \
                                                                             \
      /* otherwise, it really is a free region. */                           \
                                                                             \
      /* for the previous LiveRange, record the end of the live objects. */  \
      if (liveRange) {                                                       \
        liveRange->set_end(q);                                               \
      }                                                                      \
                                                                             \
      /* record the current LiveRange object.                                \
       * liveRange->start() is overlaid on the mark word.                    \
       */                                                                    \
      liveRange = (LiveRange*)q;                                             \
      liveRange->set_start(end);                                             \
      liveRange->set_end(end);                                               \
                                                                             \
      /* see if this is the first dead region. */                            \
      if (q < first_dead) {                                                  \
        first_dead = q;                                                      \
      }                                                                      \
                                                                             \
      /* move on to the next object */                                       \
      q = end;                                                               \
    }                                                                        \
  }                                                                          \
                                                                             \
  assert(q == t, "just checking");                                           \
  if (liveRange != NULL) {                                                   \
    liveRange->set_end(q);                                                   \
  }                                                                          \
  _end_of_live = end_of_live;                                                \
  if (end_of_live < first_dead) {                                            \
    first_dead = end_of_live;                                                \
  }                                                                          \
  _first_dead = first_dead;                                                  \
                                                                             \
  /* save the compaction_top of the compaction space. */                     \
  cp->space->set_compaction_top(compact_top);                                \
}

630 631 632
#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
  /* adjust all the interior pointers to point at the new locations of objects  \
   * Used by MarkSweep::mark_sweep_phase3() */                                  \
D
duke 已提交
633
                                                                                \
634 635
  HeapWord* q = bottom();                                                       \
  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
D
duke 已提交
636
                                                                                \
637
  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
D
duke 已提交
638
                                                                                \
639
  if (q < t && _first_dead > q &&                                               \
D
duke 已提交
640 641 642
      !oop(q)->is_gc_marked()) {                                                \
    /* we have a chunk of the space which hasn't moved and we've                \
     * reinitialized the mark word during the previous pass, so we can't        \
643
     * use is_gc_marked for the traversal. */                                   \
D
duke 已提交
644 645
    HeapWord* end = _first_dead;                                                \
                                                                                \
646 647 648 649 650 651 652 653
    while (q < end) {                                                           \
      /* I originally tried to conjoin "block_start(q) == q" to the             \
       * assertion below, but that doesn't work, because you can't              \
       * accurately traverse previous objects to get to the current one         \
       * after their pointers (including pointers into permGen) have been       \
       * updated, until the actual compaction is done.  dld, 4/00 */            \
      assert(block_is_obj(q),                                                   \
             "should be at block boundaries, and should be looking at objs");   \
D
duke 已提交
654
                                                                                \
655
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
D
duke 已提交
656
                                                                                \
657 658 659
      /* point all the oops to the new location */                              \
      size_t size = oop(q)->adjust_pointers();                                  \
      size = adjust_obj_size(size);                                             \
D
duke 已提交
660
                                                                                \
661
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
662
                                                                                \
663
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
664
                                                                                \
D
duke 已提交
665
      q += size;                                                                \
666
    }                                                                           \
D
duke 已提交
667
                                                                                \
668 669 670 671 672
    if (_first_dead == t) {                                                     \
      q = t;                                                                    \
    } else {                                                                    \
      /* $$$ This is funky.  Using this to read the previously written          \
       * LiveRange.  See also use below. */                                     \
D
duke 已提交
673
      q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
674 675
    }                                                                           \
  }                                                                             \
D
duke 已提交
676 677 678
                                                                                \
  const intx interval = PrefetchScanIntervalInBytes;                            \
                                                                                \
679 680 681
  debug_only(HeapWord* prev_q = NULL);                                          \
  while (q < t) {                                                               \
    /* prefetch beyond q */                                                     \
D
duke 已提交
682
    Prefetch::write(q, interval);                                               \
683 684
    if (oop(q)->is_gc_marked()) {                                               \
      /* q is alive */                                                          \
685
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
686 687 688 689
      /* point all the oops to the new location */                              \
      size_t size = oop(q)->adjust_pointers();                                  \
      size = adjust_obj_size(size);                                             \
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
690
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
691
      debug_only(prev_q = q);                                                   \
D
duke 已提交
692
      q += size;                                                                \
T
Merge  
tonyp 已提交
693 694 695 696 697 698 699 700
    } else {                                                                    \
      /* q is not a live object, so its mark should point at the next           \
       * live object */                                                         \
      debug_only(prev_q = q);                                                   \
      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
      assert(q > prev_q, "we should be moving forward through memory");         \
    }                                                                           \
  }                                                                             \
D
duke 已提交
701
                                                                                \
T
Merge  
tonyp 已提交
702
  assert(q == t, "just checking");                                              \
D
duke 已提交
703 704
}

T
Merge  
tonyp 已提交
705
#define SCAN_AND_COMPACT(obj_size) {                                            \
D
duke 已提交
706
  /* Copy all live objects to their new location                                \
T
Merge  
tonyp 已提交
707
   * Used by MarkSweep::mark_sweep_phase4() */                                  \
D
duke 已提交
708
                                                                                \
T
Merge  
tonyp 已提交
709 710 711
  HeapWord*       q = bottom();                                                 \
  HeapWord* const t = _end_of_live;                                             \
  debug_only(HeapWord* prev_q = NULL);                                          \
D
duke 已提交
712
                                                                                \
T
Merge  
tonyp 已提交
713
  if (q < t && _first_dead > q &&                                               \
D
duke 已提交
714
      !oop(q)->is_gc_marked()) {                                                \
T
Merge  
tonyp 已提交
715
    debug_only(                                                                 \
716 717 718
    /* we have a chunk of the space which hasn't moved and we've reinitialized  \
     * the mark word during the previous pass, so we can't use is_gc_marked for \
     * the traversal. */                                                        \
T
Merge  
tonyp 已提交
719 720 721
    HeapWord* const end = _first_dead;                                          \
                                                                                \
    while (q < end) {                                                           \
D
duke 已提交
722
      size_t size = obj_size(q);                                                \
723 724
      assert(!oop(q)->is_gc_marked(),                                           \
             "should be unmarked (special dense prefix handling)");             \
T
Merge  
tonyp 已提交
725 726
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));       \
      debug_only(prev_q = q);                                                   \
D
duke 已提交
727
      q += size;                                                                \
T
Merge  
tonyp 已提交
728 729
    }                                                                           \
    )  /* debug_only */                                                         \
D
duke 已提交
730
                                                                                \
T
Merge  
tonyp 已提交
731 732 733 734 735 736 737
    if (_first_dead == t) {                                                     \
      q = t;                                                                    \
    } else {                                                                    \
      /* $$$ Funky */                                                           \
      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
    }                                                                           \
  }                                                                             \
D
duke 已提交
738
                                                                                \
T
Merge  
tonyp 已提交
739 740 741 742 743 744 745 746 747 748
  const intx scan_interval = PrefetchScanIntervalInBytes;                       \
  const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
  while (q < t) {                                                               \
    if (!oop(q)->is_gc_marked()) {                                              \
      /* mark is pointer to next marked oop */                                  \
      debug_only(prev_q = q);                                                   \
      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
      assert(q > prev_q, "we should be moving forward through memory");         \
    } else {                                                                    \
      /* prefetch beyond q */                                                   \
D
duke 已提交
749 750 751 752 753 754
      Prefetch::read(q, scan_interval);                                         \
                                                                                \
      /* size and destination */                                                \
      size_t size = obj_size(q);                                                \
      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
                                                                                \
T
Merge  
tonyp 已提交
755
      /* prefetch beyond compaction_top */                                      \
D
duke 已提交
756 757
      Prefetch::write(compaction_top, copy_interval);                           \
                                                                                \
T
Merge  
tonyp 已提交
758
      /* copy object and reinit its mark */                                     \
759 760
      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size,            \
                                                            compaction_top));   \
T
Merge  
tonyp 已提交
761 762 763 764
      assert(q != compaction_top, "everything in this pass should be moving");  \
      Copy::aligned_conjoint_words(q, compaction_top, size);                    \
      oop(compaction_top)->init_mark();                                         \
      assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
D
duke 已提交
765
                                                                                \
T
Merge  
tonyp 已提交
766
      debug_only(prev_q = q);                                                   \
D
duke 已提交
767
      q += size;                                                                \
T
Merge  
tonyp 已提交
768 769
    }                                                                           \
  }                                                                             \
D
duke 已提交
770
                                                                                \
771 772
  /* Let's remember if we were empty before we did the compaction. */           \
  bool was_empty = used_region().is_empty();                                    \
D
duke 已提交
773
  /* Reset space after compaction is complete */                                \
T
Merge  
tonyp 已提交
774
  reset_after_compaction();                                                     \
D
duke 已提交
775 776 777 778 779
  /* We do this clear, below, since it has overloaded meanings for some */      \
  /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
  /* compacted into will have had their offset table thresholds updated */      \
  /* continuously, but those that weren't need to have their thresholds */      \
  /* re-initialized.  Also mangles unused area for debugging.           */      \
780
  if (used_region().is_empty()) {                                               \
T
Merge  
tonyp 已提交
781
    if (!was_empty) clear(SpaceDecorator::Mangle);                              \
D
duke 已提交
782 783 784 785 786
  } else {                                                                      \
    if (ZapUnusedHeapArea) mangle_unused_area();                                \
  }                                                                             \
}

787 788
class GenSpaceMangler;

D
duke 已提交
789 790 791 792 793 794 795 796
// A space in which the free area is contiguous.  It therefore supports
// faster allocation, and compaction.
class ContiguousSpace: public CompactibleSpace {
  friend class OneContigSpaceCardGeneration;
  friend class VMStructs;
 protected:
  HeapWord* _top;
  HeapWord* _concurrent_iteration_safe_limit;
797 798 799 800
  // A helper for mangling the unused area of the space in debug builds.
  GenSpaceMangler* _mangler;

  GenSpaceMangler* mangler() { return _mangler; }
D
duke 已提交
801 802 803 804 805 806

  // Allocation helpers (return NULL if full).
  inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
  inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);

 public:
807 808
  ContiguousSpace();
  ~ContiguousSpace();
809

810
  virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
T
Merge  
tonyp 已提交
811
  virtual void clear(bool mangle_space);
D
duke 已提交
812 813 814 815 816

  // Accessors
  HeapWord* top() const            { return _top;    }
  void set_top(HeapWord* value)    { _top = value; }

817 818
  virtual void set_saved_mark()    { _saved_mark_word = top();    }
  void reset_saved_mark()          { _saved_mark_word = bottom(); }
D
duke 已提交
819 820 821 822 823 824

  WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
  WaterMark top_mark()        { return WaterMark(this, top()); }
  WaterMark saved_mark()      { return WaterMark(this, saved_mark_word()); }
  bool saved_mark_at_top() const { return saved_mark_word() == top(); }

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
  // In debug mode mangle (write it with a particular bit
  // pattern) the unused part of a space.

  // Used to save the an address in a space for later use during mangling.
  void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
  // Used to save the space's current top for later use during mangling.
  void set_top_for_allocations() PRODUCT_RETURN;

  // Mangle regions in the space from the current top up to the
  // previously mangled part of the space.
  void mangle_unused_area() PRODUCT_RETURN;
  // Mangle [top, end)
  void mangle_unused_area_complete() PRODUCT_RETURN;
  // Mangle the given MemRegion.
  void mangle_region(MemRegion mr) PRODUCT_RETURN;

  // Do some sparse checking on the area that should have been mangled.
  void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
  // Check the complete area that should have been mangled.
  // This code may be NULL depending on the macro DEBUG_MANGLING.
  void check_mangled_unused_area_complete() PRODUCT_RETURN;
D
duke 已提交
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876

  // Size computations: sizes in bytes.
  size_t capacity() const        { return byte_size(bottom(), end()); }
  size_t used() const            { return byte_size(bottom(), top()); }
  size_t free() const            { return byte_size(top(),    end()); }

  // Override from space.
  bool is_in(const void* p) const;

  virtual bool is_free_block(const HeapWord* p) const;

  // In a contiguous space we have a more obvious bound on what parts
  // contain objects.
  MemRegion used_region() const { return MemRegion(bottom(), top()); }

  MemRegion used_region_at_save_marks() const {
    return MemRegion(bottom(), saved_mark_word());
  }

  // Allocation (return NULL if full)
  virtual HeapWord* allocate(size_t word_size);
  virtual HeapWord* par_allocate(size_t word_size);

  virtual bool obj_allocated_since_save_marks(const oop obj) const {
    return (HeapWord*)obj >= saved_mark_word();
  }

  // Iteration
  void oop_iterate(OopClosure* cl);
  void oop_iterate(MemRegion mr, OopClosure* cl);
  void object_iterate(ObjectClosure* blk);
877 878 879
  // For contiguous spaces this method will iterate safely over objects
  // in the space (i.e., between bottom and top) when at a safepoint.
  void safe_object_iterate(ObjectClosure* blk);
D
duke 已提交
880 881 882
  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
  // iterates on objects up to the safe limit
  HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
883 884 885 886 887
  HeapWord* concurrent_iteration_safe_limit() {
    assert(_concurrent_iteration_safe_limit <= top(),
           "_concurrent_iteration_safe_limit update missed");
    return _concurrent_iteration_safe_limit;
  }
D
duke 已提交
888 889
  // changes the safe limit, all objects from bottom() to the new
  // limit should be properly initialized
890 891 892 893
  void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
    assert(new_limit <= top(), "uninitialized objects in the safe range");
    _concurrent_iteration_safe_limit = new_limit;
  }
D
duke 已提交
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936

#ifndef SERIALGC
  // In support of parallel oop_iterate.
  #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
    void par_oop_iterate(MemRegion mr, OopClosureType* blk);

    ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
  #undef ContigSpace_PAR_OOP_ITERATE_DECL
#endif // SERIALGC

  // Compaction support
  virtual void reset_after_compaction() {
    assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
    set_top(compaction_top());
    // set new iteration safe limit
    set_concurrent_iteration_safe_limit(compaction_top());
  }
  virtual size_t minimum_free_block_size() const { return 0; }

  // Override.
  DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
                                     CardTableModRefBS::PrecisionStyle precision,
                                     HeapWord* boundary = NULL);

  // Apply "blk->do_oop" to the addresses of all reference fields in objects
  // starting with the _saved_mark_word, which was noted during a generation's
  // save_marks and is required to denote the head of an object.
  // Fields in objects allocated by applications of the closure
  // *are* included in the iteration.
  // Updates _saved_mark_word to point to just after the last object
  // iterated over.
#define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
  void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);

  ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL

  // Same as object_iterate, but starting from "mark", which is required
  // to denote the start of an object.  Objects allocated by
  // applications of the closure *are* included in the iteration.
  virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);

  // Very inefficient implementation.
937
  virtual HeapWord* block_start_const(const void* p) const;
D
duke 已提交
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
  size_t block_size(const HeapWord* p) const;
  // If a block is in the allocated area, it is an object.
  bool block_is_obj(const HeapWord* p) const { return p < top(); }

  // Addresses for inlined allocation
  HeapWord** top_addr() { return &_top; }
  HeapWord** end_addr() { return &_end; }

  // Overrides for more efficient compaction support.
  void prepare_for_compaction(CompactPoint* cp);

  // PrintHeapAtGC support.
  virtual void print_on(outputStream* st) const;

  // Checked dynamic downcasts.
  virtual ContiguousSpace* toContiguousSpace() {
    return this;
  }

  // Debugging
958
  virtual void verify() const;
D
duke 已提交
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041

  // Used to increase collection frequency.  "factor" of 0 means entire
  // space.
  void allocate_temporary_filler(int factor);

};


// A dirty card to oop closure that does filtering.
// It knows how to filter out objects that are outside of the _boundary.
class Filtering_DCTOC : public DirtyCardToOopClosure {
protected:
  // Override.
  void walk_mem_region(MemRegion mr,
                       HeapWord* bottom, HeapWord* top);

  // Walk the given memory region, from bottom to top, applying
  // the given oop closure to (possibly) all objects found. The
  // given oop closure may or may not be the same as the oop
  // closure with which this closure was created, as it may
  // be a filtering closure which makes use of the _boundary.
  // We offer two signatures, so the FilteringClosure static type is
  // apparent.
  virtual void walk_mem_region_with_cl(MemRegion mr,
                                       HeapWord* bottom, HeapWord* top,
                                       OopClosure* cl) = 0;
  virtual void walk_mem_region_with_cl(MemRegion mr,
                                       HeapWord* bottom, HeapWord* top,
                                       FilteringClosure* cl) = 0;

public:
  Filtering_DCTOC(Space* sp, OopClosure* cl,
                  CardTableModRefBS::PrecisionStyle precision,
                  HeapWord* boundary) :
    DirtyCardToOopClosure(sp, cl, precision, boundary) {}
};

// A dirty card to oop closure for contiguous spaces
// (ContiguousSpace and sub-classes).
// It is a FilteringClosure, as defined above, and it knows:
//
// 1. That the actual top of any area in a memory region
//    contained by the space is bounded by the end of the contiguous
//    region of the space.
// 2. That the space is really made up of objects and not just
//    blocks.

class ContiguousSpaceDCTOC : public Filtering_DCTOC {
protected:
  // Overrides.
  HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);

  virtual void walk_mem_region_with_cl(MemRegion mr,
                                       HeapWord* bottom, HeapWord* top,
                                       OopClosure* cl);
  virtual void walk_mem_region_with_cl(MemRegion mr,
                                       HeapWord* bottom, HeapWord* top,
                                       FilteringClosure* cl);

public:
  ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl,
                       CardTableModRefBS::PrecisionStyle precision,
                       HeapWord* boundary) :
    Filtering_DCTOC(sp, cl, precision, boundary)
  {}
};


// Class EdenSpace describes eden-space in new generation.

class DefNewGeneration;

class EdenSpace : public ContiguousSpace {
  friend class VMStructs;
 private:
  DefNewGeneration* _gen;

  // _soft_end is used as a soft limit on allocation.  As soft limits are
  // reached, the slow-path allocation code can invoke other actions and then
  // adjust _soft_end up to a new soft limit or to end().
  HeapWord* _soft_end;

 public:
1042 1043
  EdenSpace(DefNewGeneration* gen) :
   _gen(gen), _soft_end(NULL) {}
D
duke 已提交
1044 1045 1046 1047 1048 1049 1050

  // Get/set just the 'soft' limit.
  HeapWord* soft_end()               { return _soft_end; }
  HeapWord** soft_end_addr()         { return &_soft_end; }
  void set_soft_end(HeapWord* value) { _soft_end = value; }

  // Override.
1051
  void clear(bool mangle_space);
D
duke 已提交
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094

  // Set both the 'hard' and 'soft' limits (_end and _soft_end).
  void set_end(HeapWord* value) {
    set_soft_end(value);
    ContiguousSpace::set_end(value);
  }

  // Allocation (return NULL if full)
  HeapWord* allocate(size_t word_size);
  HeapWord* par_allocate(size_t word_size);
};

// Class ConcEdenSpace extends EdenSpace for the sake of safe
// allocation while soft-end is being modified concurrently

class ConcEdenSpace : public EdenSpace {
 public:
  ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }

  // Allocation (return NULL if full)
  HeapWord* par_allocate(size_t word_size);
};


// A ContigSpace that Supports an efficient "block_start" operation via
// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
// other spaces.)  This is the abstract base class for old generation
// (tenured, perm) spaces.

class OffsetTableContigSpace: public ContiguousSpace {
  friend class VMStructs;
 protected:
  BlockOffsetArrayContigSpace _offsets;
  Mutex _par_alloc_lock;

 public:
  // Constructor
  OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
                         MemRegion mr);

  void set_bottom(HeapWord* value);
  void set_end(HeapWord* value);

1095
  void clear(bool mangle_space);
D
duke 已提交
1096

1097
  inline HeapWord* block_start_const(const void* p) const;
D
duke 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109

  // Add offset table update.
  virtual inline HeapWord* allocate(size_t word_size);
  inline HeapWord* par_allocate(size_t word_size);

  // MarkSweep support phase3
  virtual HeapWord* initialize_threshold();
  virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);

  virtual void print_on(outputStream* st) const;

  // Debugging
1110
  void verify() const;
D
duke 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

  // Shared space support
  void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
};


// Class TenuredSpace is used by TenuredGeneration

class TenuredSpace: public OffsetTableContigSpace {
  friend class VMStructs;
 protected:
  // Mark sweep support
1123
  size_t allowed_dead_ratio() const;
D
duke 已提交
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
 public:
  // Constructor
  TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
               MemRegion mr) :
    OffsetTableContigSpace(sharedOffsetArray, mr) {}
};


// Class ContigPermSpace is used by CompactingPermGen

class ContigPermSpace: public OffsetTableContigSpace {
  friend class VMStructs;
 protected:
  // Mark sweep support
1138
  size_t allowed_dead_ratio() const;
D
duke 已提交
1139 1140 1141 1142 1143
 public:
  // Constructor
  ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
    OffsetTableContigSpace(sharedOffsetArray, mr) {}
};
1144 1145

#endif // SHARE_VM_MEMORY_SPACE_HPP