提交 7b1f4848 编写于 作者: J jcoomes

Merge

......@@ -38,7 +38,7 @@
CMSPermGen::CMSPermGen(ReservedSpace rs, size_t initial_byte_size,
CardTableRS* ct,
FreeBlockDictionary::DictionaryChoice dictionaryChoice) {
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) {
CMSPermGenGen* g =
new CMSPermGenGen(rs, initial_byte_size, -1, ct);
if (g == NULL) {
......
......@@ -45,7 +45,7 @@ class CMSPermGen: public PermGen {
public:
CMSPermGen(ReservedSpace rs, size_t initial_byte_size,
CardTableRS* ct, FreeBlockDictionary::DictionaryChoice);
CardTableRS* ct, FreeBlockDictionary<FreeChunk>::DictionaryChoice);
HeapWord* mem_allocate(size_t size);
......@@ -65,7 +65,7 @@ public:
// regarding not using adaptive free lists for a perm gen.
ConcurrentMarkSweepGeneration(rs, initial_byte_size, // MinPermHeapExapnsion
level, ct, false /* use adaptive freelists */,
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice)
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice)
{}
void initialize_performance_counters();
......
......@@ -25,10 +25,10 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
#include "gc_implementation/concurrentMarkSweep/freeList.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/freeList.hpp"
#include "memory/space.hpp"
// Classes in support of keeping track of promotions into a non-Contiguous
......@@ -129,10 +129,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Linear allocation blocks
LinearAllocBlock _smallLinearAllocBlock;
FreeBlockDictionary::DictionaryChoice _dictionaryChoice;
FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks
FreeList _indexedFreeList[IndexSetSize];
FreeList<FreeChunk> _indexedFreeList[IndexSetSize];
// indexed array for small size blocks
// allocation stategy
bool _fitStrategy; // Use best fit strategy.
......@@ -169,7 +169,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// If the count of "fl" is negative, it's absolute value indicates a
// number of free chunks that had been previously "borrowed" from global
// list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl);
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
// Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists
......@@ -215,7 +215,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// and return it. The split off remainder is returned to
// the free lists. The old name for getFromListGreater
// was lookInListGreater.
FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords);
// Get a chunk in the indexed free list or dictionary,
// by considering a larger chunk and splitting it.
FreeChunk* getChunkFromGreater(size_t numWords);
......@@ -286,10 +286,10 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Constructor...
CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice);
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
// accessors
bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
FreeBlockDictionary* dictionary() const { return _dictionary; }
FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
......@@ -499,7 +499,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Verify that the given chunk is in the free lists:
// i.e. either the binary tree dictionary, the indexed free lists
// or the linear allocation block.
bool verifyChunkInFreeLists(FreeChunk* fc) const;
bool verify_chunk_in_free_list(FreeChunk* fc) const;
// Verify that the given chunk is the linear allocation block
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
// Do some basic checks on the the free lists.
......@@ -608,7 +608,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void coalDeath(size_t size);
void smallSplitBirth(size_t size);
void smallSplitDeath(size_t size);
void splitBirth(size_t size);
void split_birth(size_t size);
void splitDeath(size_t size);
void split(size_t from, size_t to1);
......@@ -622,7 +622,7 @@ class CFLS_LAB : public CHeapObj {
CompactibleFreeListSpace* _cfls;
// Our local free lists.
FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
// Initialized from a command-line arg.
......@@ -635,7 +635,7 @@ class CFLS_LAB : public CHeapObj {
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
// Internal work method
void get_from_global_pool(size_t word_sz, FreeList* fl);
void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl);
public:
CFLS_LAB(CompactibleFreeListSpace* cfls);
......
......@@ -188,7 +188,7 @@ class CMSParGCThreadState: public CHeapObj {
ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
ReservedSpace rs, size_t initial_byte_size, int level,
CardTableRS* ct, bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
CardGeneration(rs, initial_byte_size, level, ct),
_dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
_debug_collection_type(Concurrent_collection_type)
......@@ -1026,7 +1026,7 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
// its mark-bit or P-bits not yet set. Such objects need
// to be safely navigable by block_start().
assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
collector()->direct_allocated(res, adjustedSize);
_direct_allocated_words += adjustedSize;
// allocation counters
......@@ -1391,7 +1391,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop obj = oop(obj_ptr);
OrderAccess::storestore();
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
// IMPORTANT: See note on object initialization for CMS above.
// Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object.
......@@ -1400,7 +1400,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
// Restore the mark word copied above.
obj->set_mark(m);
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
OrderAccess::storestore();
if (UseCompressedOops) {
......@@ -1421,7 +1421,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
promoInfo->track((PromotedObject*)obj, old->klass());
}
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
assert(old->is_oop(), "Will use and dereference old klass ptr below");
// Finally, install the klass pointer (this should be volatile).
......@@ -2034,7 +2034,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
pointer_delta(cms_space->end(), cms_space->compaction_top())
* HeapWordSize,
"All the free space should be compacted into one chunk at top");
assert(cms_space->dictionary()->totalChunkSize(
assert(cms_space->dictionary()->total_chunk_size(
debug_only(cms_space->freelistLock())) == 0 ||
cms_space->totalSizeInIndexedFreeLists() == 0,
"All the free space should be in a single chunk");
......@@ -6131,7 +6131,7 @@ void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
double nearLargestPercent = FLSLargestBlockCoalesceProximity;
HeapWord* minAddr = _cmsSpace->bottom();
HeapWord* largestAddr =
(HeapWord*) _cmsSpace->dictionary()->findLargestDict();
(HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
if (largestAddr == NULL) {
// The dictionary appears to be empty. In this case
// try to coalesce at the end of the heap.
......@@ -7906,7 +7906,7 @@ SweepClosure::SweepClosure(CMSCollector* collector,
_last_fc = NULL;
_sp->initializeIndexedFreeListArrayReturnedBytes();
_sp->dictionary()->initializeDictReturnedBytes();
_sp->dictionary()->initialize_dict_returned_bytes();
)
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds");
......@@ -7954,13 +7954,13 @@ SweepClosure::~SweepClosure() {
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
indexListReturnedBytes);
gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
dictReturnedBytes);
dict_returned_bytes);
}
}
if (CMSTraceSweeper) {
......@@ -7985,9 +7985,9 @@ void SweepClosure::initialize_free_range(HeapWord* freeFinger,
if (CMSTestInFreeList) {
if (freeRangeInFreeLists) {
FreeChunk* fc = (FreeChunk*) freeFinger;
assert(fc->isFree(), "A chunk on the free list should be free.");
assert(fc->is_free(), "A chunk on the free list should be free.");
assert(fc->size() > 0, "Free range should have a size");
assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
}
}
}
......@@ -8057,7 +8057,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
assert(addr < _limit, "sweep invariant");
// check if we should yield
do_yield_check(addr);
if (fc->isFree()) {
if (fc->is_free()) {
// Chunk that is already free
res = fc->size();
do_already_free_chunk(fc);
......@@ -8145,7 +8145,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
// Chunks that cannot be coalesced are not in the
// free lists.
if (CMSTestInFreeList && !fc->cantCoalesce()) {
assert(_sp->verifyChunkInFreeLists(fc),
assert(_sp->verify_chunk_in_free_list(fc),
"free chunk should be in free lists");
}
// a chunk that is already free, should not have been
......@@ -8171,7 +8171,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
nextChunk->isFree() && // ... which is free...
nextChunk->is_free() && // ... which is free...
nextChunk->cantCoalesce()) { // ... but can't be coalesced
// nothing to do
} else {
......@@ -8203,7 +8203,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
assert(ffc->size() == pointer_delta(addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"free range is not in free lists");
}
_sp->removeFreeChunkFromFreeLists(ffc);
......@@ -8262,7 +8262,7 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
assert(ffc->size() == pointer_delta(addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"free range is not in free lists");
}
_sp->removeFreeChunkFromFreeLists(ffc);
......@@ -8351,11 +8351,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t chunkSize) {
// do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator.
const bool fcInFreeLists = fc->isFree();
const bool fcInFreeLists = fc->is_free();
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
assert((HeapWord*)fc <= _limit, "sweep invariant");
if (CMSTestInFreeList && fcInFreeLists) {
assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
}
if (CMSTraceSweeper) {
......@@ -8410,7 +8410,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"Chunk is not in free lists");
}
_sp->coalDeath(ffc->size());
......@@ -8459,7 +8459,7 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
if (eob >= _limit) {
assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
"[" PTR_FORMAT "," PTR_FORMAT ") in space "
......@@ -8482,8 +8482,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
if (!freeRangeInFreeLists()) {
if (CMSTestInFreeList) {
FreeChunk* fc = (FreeChunk*) chunk;
fc->setSize(size);
assert(!_sp->verifyChunkInFreeLists(fc),
fc->set_size(size);
assert(!_sp->verify_chunk_in_free_list(fc),
"chunk should not be in free lists yet");
}
if (CMSTraceSweeper) {
......@@ -8557,8 +8557,8 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
// This is actually very useful in a product build if it can
// be called from the debugger. Compile it into the product
// as needed.
bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
return debug_cms_space->verifyChunkInFreeLists(fc);
bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
return debug_cms_space->verify_chunk_in_free_list(fc);
}
#endif
......@@ -9255,7 +9255,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
size_t chunk_at_end_old_size = chunk_at_end->size();
assert(chunk_at_end_old_size >= word_size_change,
"Shrink is too large");
chunk_at_end->setSize(chunk_at_end_old_size -
chunk_at_end->set_size(chunk_at_end_old_size -
word_size_change);
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
word_size_change);
......
......@@ -25,10 +25,10 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/shared/gSpaceCounters.hpp"
#include "gc_implementation/shared/gcStats.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
......@@ -1106,7 +1106,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice);
FreeBlockDictionary<FreeChunk>::DictionaryChoice);
// Accessors
CMSCollector* collector() const { return _collector; }
......@@ -1328,7 +1328,7 @@ class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
bool use_adaptive_freelists,
FreeBlockDictionary::DictionaryChoice
FreeBlockDictionary<FreeChunk>::DictionaryChoice
dictionaryChoice) :
ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
use_adaptive_freelists, dictionaryChoice) {}
......
......@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "utilities/copy.hpp"
#ifndef PRODUCT
......
......@@ -75,20 +75,20 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
// calls. We really want the read of _mark and _prev from this pointer
// to be volatile but making the fields volatile causes all sorts of
// compilation errors.
return ((volatile FreeChunk*)addr)->isFree();
return ((volatile FreeChunk*)addr)->is_free();
}
bool isFree() const volatile {
bool is_free() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
return (((intptr_t)_prev) & 0x1) == 0x1;
}
bool cantCoalesce() const {
assert(isFree(), "can't get coalesce bit on not free");
assert(is_free(), "can't get coalesce bit on not free");
return (((intptr_t)_prev) & 0x2) == 0x2;
}
void dontCoalesce() {
// the block should be free
assert(isFree(), "Should look like a free block");
assert(is_free(), "Should look like a free block");
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
}
FreeChunk* prev() const {
......@@ -103,23 +103,23 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
return _size;
}
void setSize(size_t sz) {
void set_size(size_t sz) {
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
_size = sz;
}
FreeChunk* next() const { return _next; }
void linkAfter(FreeChunk* ptr) {
linkNext(ptr);
if (ptr != NULL) ptr->linkPrev(this);
void link_after(FreeChunk* ptr) {
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) {
void link_next(FreeChunk* ptr) { _next = ptr; }
void link_prev(FreeChunk* ptr) {
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
_prev = (FreeChunk*)((intptr_t)ptr | 0x1);
}
void clearNext() { _next = NULL; }
void clear_next() { _next = NULL; }
void markNotFree() {
// Set _prev (klass) to null before (if) clearing the mark word below
_prev = NULL;
......@@ -129,7 +129,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
set_mark(markOopDesc::prototype());
}
#endif
assert(!isFree(), "Error");
assert(!is_free(), "Error");
}
// Return the address past the end of this chunk
......
......@@ -121,7 +121,7 @@ void PromotionInfo::track(PromotedObject* trackOop) {
void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
// make a copy of header as it may need to be spooled
markOop mark = oop(trackOop)->mark();
trackOop->clearNext();
trackOop->clear_next();
if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
// save non-prototypical header, and mark oop
saveDisplacedHeader(mark);
......
......@@ -43,7 +43,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
// whose position will depend on endian-ness of the platform.
// This is so that there is no interference with the
// cms_free_bit occupying bit position 7 (lsb == 0)
// when we are using compressed oops; see FreeChunk::isFree().
// when we are using compressed oops; see FreeChunk::is_free().
// We cannot move the cms_free_bit down because currently
// biased locking code assumes that age bits are contiguous
// with the lock bits. Even if that assumption were relaxed,
......@@ -65,7 +65,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
};
public:
inline PromotedObject* next() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
......@@ -85,27 +85,27 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline void setPromotedMark() {
_next |= promoted_mask;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline bool hasPromotedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
return (_next & promoted_mask) == promoted_mask;
}
inline void setDisplacedMark() {
_next |= displaced_mark;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline bool hasDisplacedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
return (_next & displaced_mark) != 0;
}
inline void clearNext() {
inline void clear_next() {
_next = 0;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
debug_only(void *next_addr() { return (void *) &_next; })
};
......
......@@ -44,11 +44,11 @@
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList, _size, size_t) \
nonstatic_field(FreeList, _count, ssize_t) \
nonstatic_field(BinaryTreeDictionary, _totalSize, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary*) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary<FreeChunk>*) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
......@@ -70,13 +70,13 @@
declare_toplevel_type(CompactibleFreeListSpace*) \
declare_toplevel_type(CMSCollector*) \
declare_toplevel_type(FreeChunk*) \
declare_toplevel_type(BinaryTreeDictionary*) \
declare_toplevel_type(FreeBlockDictionary*) \
declare_toplevel_type(FreeList*) \
declare_toplevel_type(FreeList) \
declare_toplevel_type(BinaryTreeDictionary<FreeChunk>*) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>) \
declare_toplevel_type(LinearAllocBlock) \
declare_toplevel_type(FreeBlockDictionary) \
declare_type(BinaryTreeDictionary, FreeBlockDictionary)
declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \
declare_type(BinaryTreeDictionary<FreeChunk>, FreeBlockDictionary<FreeChunk>)
#define VM_INT_CONSTANTS_CMS(declare_constant) \
declare_constant(Generation::ConcurrentMarkSweep) \
......
......@@ -1183,35 +1183,31 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
g1p->record_concurrent_mark_remark_end();
}
// Used to calculate the # live objects per region
// for verification purposes
class CalcLiveObjectsClosure: public HeapRegionClosure {
CMBitMapRO* _bm;
// Base class of the closures that finalize and verify the
// liveness counting data.
class CMCountDataClosureBase: public HeapRegionClosure {
protected:
ConcurrentMark* _cm;
BitMap* _region_bm;
BitMap* _card_bm;
size_t _region_marked_bytes;
intptr_t _bottom_card_num;
void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
assert(start_card_num <= last_card_num, "sanity");
BitMap::idx_t start_idx = start_card_num - _bottom_card_num;
BitMap::idx_t last_idx = last_card_num - _bottom_card_num;
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
assert(start_idx <= last_idx, "sanity");
for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
_card_bm->par_at_put(i, 1);
// Set the inclusive bit range [start_idx, last_idx].
// For small ranges (up to 8 cards) use a simple loop; otherwise
// use par_at_put_range.
if ((last_idx - start_idx) < 8) {
for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
_card_bm->par_set_bit(i);
}
} else {
assert(last_idx < _card_bm->size(), "sanity");
// Note BitMap::par_at_put_range() is exclusive.
_card_bm->par_at_put_range(start_idx, last_idx+1, true);
}
}
public:
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
BitMap* region_bm, BitMap* card_bm) :
_bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm),
_region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { }
// It takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
// bitmap to 1. If the region is "starts humongous" it will also set
......@@ -1234,6 +1230,24 @@ public:
}
}
public:
CMCountDataClosureBase(ConcurrentMark *cm,
BitMap* region_bm, BitMap* card_bm):
_cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
};
// Closure that calculates the # live objects per region. Used
// for verification purposes during the cleanup pause.
class CalcLiveObjectsClosure: public CMCountDataClosureBase {
CMBitMapRO* _bm;
size_t _region_marked_bytes;
public:
CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
BitMap* region_bm, BitMap* card_bm) :
CMCountDataClosureBase(cm, region_bm, card_bm),
_bm(bm), _region_marked_bytes(0) { }
bool doHeapRegion(HeapRegion* hr) {
if (hr->continuesHumongous()) {
......@@ -1260,65 +1274,31 @@ public:
size_t marked_bytes = 0;
// Below, the term "card num" means the result of shifting an address
// by the card shift -- address 0 corresponds to card number 0. One
// must subtract the card num of the bottom of the heap to obtain a
// card table index.
// The first card num of the sequence of live cards currently being
// constructed. -1 ==> no sequence.
intptr_t start_card_num = -1;
// The last card num of the sequence of live cards currently being
// constructed. -1 ==> no sequence.
intptr_t last_card_num = -1;
while (start < nextTop) {
oop obj = oop(start);
int obj_sz = obj->size();
// The card num of the start of the current object.
intptr_t obj_card_num =
intptr_t(uintptr_t(start) >> CardTableModRefBS::card_shift);
HeapWord* obj_last = start + obj_sz - 1;
intptr_t obj_last_card_num =
intptr_t(uintptr_t(obj_last) >> CardTableModRefBS::card_shift);
if (obj_card_num != last_card_num) {
if (start_card_num == -1) {
assert(last_card_num == -1, "Both or neither.");
start_card_num = obj_card_num;
} else {
assert(last_card_num != -1, "Both or neither.");
assert(obj_card_num >= last_card_num, "Inv");
if ((obj_card_num - last_card_num) > 1) {
// Mark the last run, and start a new one.
mark_card_num_range(start_card_num, last_card_num);
start_card_num = obj_card_num;
}
}
}
// In any case, we set the last card num.
last_card_num = obj_last_card_num;
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
BitMap::idx_t last_idx = _cm->card_bitmap_index_for(obj_last);
// Set the bits in the card BM for this object (inclusive).
set_card_bitmap_range(start_idx, last_idx);
// Add the size of this object to the number of marked bytes.
marked_bytes += (size_t)obj_sz * HeapWordSize;
// Find the next marked object after this one.
start = _bm->getNextMarkedWordAddress(start + 1, nextTop);
}
// Handle the last range, if any.
if (start_card_num != -1) {
mark_card_num_range(start_card_num, last_card_num);
start = _bm->getNextMarkedWordAddress(obj_last + 1, nextTop);
}
// Mark the allocated-since-marking portion...
HeapWord* top = hr->top();
if (nextTop < top) {
start_card_num = intptr_t(uintptr_t(nextTop) >> CardTableModRefBS::card_shift);
last_card_num = intptr_t(uintptr_t(top) >> CardTableModRefBS::card_shift);
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(nextTop);
BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top - 1);
mark_card_num_range(start_card_num, last_card_num);
set_card_bitmap_range(start_idx, last_idx);
// This definitely means the region has live objects.
set_bit_for_region(hr);
......@@ -1394,17 +1374,6 @@ public:
MutexLockerEx x((_verbose ? ParGCRareEvent_lock : NULL),
Mutex::_no_safepoint_check_flag);
// Verify that _top_at_conc_count == ntams
if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
if (_verbose) {
gclog_or_tty->print_cr("Region %u: top at conc count incorrect: "
"expected " PTR_FORMAT ", actual: " PTR_FORMAT,
hr->hrs_index(), hr->next_top_at_mark_start(),
hr->top_at_conc_mark_count());
}
failures += 1;
}
// Verify the marked bytes for this region.
size_t exp_marked_bytes = _calc_cl.region_marked_bytes();
size_t act_marked_bytes = hr->next_marked_bytes();
......@@ -1470,7 +1439,7 @@ public:
_failures += failures;
// We could stop iteration over the heap when we
// find the first voilating region by returning true.
// find the first violating region by returning true.
return false;
}
};
......@@ -1543,62 +1512,19 @@ public:
int failures() const { return _failures; }
};
// Final update of count data (during cleanup).
// Adds [top_at_count, NTAMS) to the marked bytes for each
// region. Sets the bits in the card bitmap corresponding
// to the interval [top_at_count, top], and sets the
// liveness bit for each region containing live data
// in the region bitmap.
class FinalCountDataUpdateClosure: public HeapRegionClosure {
ConcurrentMark* _cm;
BitMap* _region_bm;
BitMap* _card_bm;
void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
assert(start_idx <= last_idx, "sanity");
// Set the inclusive bit range [start_idx, last_idx].
// For small ranges (up to 8 cards) use a simple loop; otherwise
// use par_at_put_range.
if ((last_idx - start_idx) <= 8) {
for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
_card_bm->par_set_bit(i);
}
} else {
assert(last_idx < _card_bm->size(), "sanity");
// Note BitMap::par_at_put_range() is exclusive.
_card_bm->par_at_put_range(start_idx, last_idx+1, true);
}
}
// It takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
// bitmap to 1. If the region is "starts humongous" it will also set
// to 1 the bits on the region bitmap that correspond to its
// associated "continues humongous" regions.
void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out");
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit.
_region_bm->par_set_bit(index);
} else {
// Starts humongous case: calculate how many regions are part of
// this humongous region and then set the bit range.
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
_region_bm->par_at_put_range(index, end_index, true);
}
}
// Closure that finalizes the liveness counting data.
// Used during the cleanup pause.
// Sets the bits corresponding to the interval [NTAMS, top]
// (which contains the implicitly live objects) in the
// card liveness bitmap. Also sets the bit for each region,
// containing live data, in the region liveness bitmap.
class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
public:
FinalCountDataUpdateClosure(ConcurrentMark* cm,
BitMap* region_bm,
BitMap* card_bm) :
_cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
CMCountDataClosureBase(cm, region_bm, card_bm) { }
bool doHeapRegion(HeapRegion* hr) {
......@@ -1613,26 +1539,10 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
return false;
}
HeapWord* start = hr->top_at_conc_mark_count();
HeapWord* ntams = hr->next_top_at_mark_start();
HeapWord* top = hr->top();
assert(hr->bottom() <= start && start <= hr->end() &&
hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
if (start < ntams) {
// Region was changed between remark and cleanup pauses
// We need to add (ntams - start) to the marked bytes
// for this region, and set bits for the range
// [ card_idx(start), card_idx(ntams) ) in the card bitmap.
size_t live_bytes = (ntams - start) * HeapWordSize;
hr->add_to_marked_bytes(live_bytes);
// Record the new top at conc count
hr->set_top_at_conc_mark_count(ntams);
// The setting of the bits in the card bitmap takes place below
}
assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
// Mark the allocated-since-marking portion...
if (ntams < top) {
......@@ -1640,8 +1550,8 @@ class FinalCountDataUpdateClosure: public HeapRegionClosure {
set_bit_for_region(hr);
}
// Now set the bits for [start, top]
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
// Now set the bits for [ntams, top]
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top);
set_card_bitmap_range(start_idx, last_idx);
......@@ -3072,9 +2982,6 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
// Update the marked bytes for this region.
hr->add_to_marked_bytes(marked_bytes);
// Now set the top at count to NTAMS.
hr->set_top_at_conc_mark_count(limit);
// Next heap region
return false;
}
......
......@@ -368,16 +368,11 @@ void YoungList::print() {
if (curr == NULL)
gclog_or_tty->print_cr(" empty");
while (curr != NULL) {
gclog_or_tty->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
"age: %4d, y: %d, surv: %d",
curr->bottom(), curr->end(),
curr->top(),
gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
HR_FORMAT_PARAMS(curr),
curr->prev_top_at_mark_start(),
curr->next_top_at_mark_start(),
curr->top_at_conc_mark_count(),
curr->age_in_surv_rate_group_cond(),
curr->is_young(),
curr->is_survivor());
curr->age_in_surv_rate_group_cond());
curr = curr->get_next_young_region();
}
}
......@@ -1253,12 +1248,13 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
IsGCActiveMark x;
// Timing
bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc);
assert(!system_gc || explicit_gc, "invariant");
assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
G1Log::fine(), true, gclog_or_tty);
char verbose_str[128];
sprintf(verbose_str, "Full GC (%s)", GCCause::to_string(gc_cause()));
TraceTime t(verbose_str, G1Log::fine(), true, gclog_or_tty);
TraceCollectorStats tcs(g1mm()->full_collection_counters());
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
......@@ -3593,25 +3589,22 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// Inner scope for scope based logging, timers, and stats collection
{
char verbose_str[128];
sprintf(verbose_str, "GC pause ");
if (g1_policy()->gcs_are_young()) {
strcat(verbose_str, "(young)");
} else {
strcat(verbose_str, "(mixed)");
}
if (g1_policy()->during_initial_mark_pause()) {
strcat(verbose_str, " (initial-mark)");
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_total_full_collections();
}
// if the log level is "finer" is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output
// is messy if we do.
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
char verbose_str[128];
sprintf(verbose_str, "GC pause (%s) (%s)%s",
GCCause::to_string(gc_cause()),
g1_policy()->gcs_are_young() ? "young" : "mixed",
g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
TraceTime t(verbose_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty);
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
......
......@@ -886,8 +886,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
size_t start_used) {
if (G1Log::finer()) {
gclog_or_tty->stamp(PrintGCTimeStamps);
gclog_or_tty->print("[GC pause");
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
gclog_or_tty->print("[GC pause (%s) (%s)",
GCCause::to_string(_g1->gc_cause()),
gcs_are_young() ? "young" : "mixed");
}
// We only need to do this here as the policy will only be applied
......@@ -2459,16 +2460,10 @@ void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream
while (csr != NULL) {
HeapRegion* next = csr->next_in_collection_set();
assert(csr->in_collection_set(), "bad CS");
st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
"age: %4d, y: %d, surv: %d",
csr->bottom(), csr->end(),
csr->top(),
csr->prev_top_at_mark_start(),
csr->next_top_at_mark_start(),
csr->top_at_conc_mark_count(),
csr->age_in_surv_rate_group_cond(),
csr->is_young(),
csr->is_survivor());
st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
HR_FORMAT_PARAMS(csr),
csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
csr->age_in_surv_rate_group_cond());
csr = next;
}
}
......
......@@ -510,9 +510,6 @@ HeapRegion::HeapRegion(uint hrs_index,
_rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
// In case the region is allocated during a pause, note the top.
// We haven't done any counting on a brand new region.
_top_at_conc_mark_count = bottom();
}
class NextCompactionHeapRegionClosure: public HeapRegionClosure {
......@@ -585,14 +582,12 @@ void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
// we find to be self-forwarded on the next bitmap. So all
// objects need to be below NTAMS.
_next_top_at_mark_start = top();
set_top_at_conc_mark_count(bottom());
_next_marked_bytes = 0;
} else if (during_conc_mark) {
// During concurrent mark, all objects in the CSet (including
// the ones we find to be self-forwarded) are implicitly live.
// So all objects need to be above NTAMS.
_next_top_at_mark_start = bottom();
set_top_at_conc_mark_count(bottom());
_next_marked_bytes = 0;
}
}
......
......@@ -306,9 +306,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If a collection pause is in progress, this is the top at the start
// of that pause.
// We've counted the marked bytes of objects below here.
HeapWord* _top_at_conc_mark_count;
void init_top_at_mark_start() {
assert(_prev_marked_bytes == 0 &&
_next_marked_bytes == 0,
......@@ -316,7 +313,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
HeapWord* bot = bottom();
_prev_top_at_mark_start = bot;
_next_top_at_mark_start = bot;
_top_at_conc_mark_count = bot;
}
void set_young_type(YoungType new_type) {
......@@ -625,19 +621,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// last mark phase ended.
bool is_marked() { return _prev_top_at_mark_start != bottom(); }
void init_top_at_conc_mark_count() {
_top_at_conc_mark_count = bottom();
}
void set_top_at_conc_mark_count(HeapWord *cur) {
assert(bottom() <= cur && cur <= end(), "Sanity.");
_top_at_conc_mark_count = cur;
}
HeapWord* top_at_conc_mark_count() {
return _top_at_conc_mark_count;
}
void reset_during_compaction() {
guarantee( isHumongous() && startsHumongous(),
"should only be called for humongous regions");
......@@ -733,7 +716,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
_evacuation_failed = b;
if (b) {
init_top_at_conc_mark_count();
_next_marked_bytes = 0;
}
}
......
......@@ -56,7 +56,6 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
}
inline void HeapRegion::note_start_of_marking() {
init_top_at_conc_mark_count();
_next_marked_bytes = 0;
_next_top_at_mark_start = top();
}
......
......@@ -39,7 +39,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// We measure the demand between the end of the previous sweep and
// beginning of this sweep:
// Count(end_last_sweep) - Count(start_this_sweep)
// + splitBirths(between) - splitDeaths(between)
// + split_births(between) - split_deaths(between)
// The above number divided by the time since the end of the
// previous sweep gives us a time rate of demand for blocks
// of this size. We compute a padded average of this rate as
......@@ -51,34 +51,34 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
AdaptivePaddedAverage _demand_rate_estimate;
ssize_t _desired; // Demand stimate computed as described above
ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing
ssize_t _coal_desired; // desired +/- small-percent for tuning coalescing
ssize_t _surplus; // count - (desired +/- small-percent),
// used to tune splitting in best fit
ssize_t _bfrSurp; // surplus at start of current sweep
ssize_t _prevSweep; // count from end of previous sweep
ssize_t _beforeSweep; // count from before current sweep
ssize_t _coalBirths; // additional chunks from coalescing
ssize_t _coalDeaths; // loss from coalescing
ssize_t _splitBirths; // additional chunks from splitting
ssize_t _splitDeaths; // loss from splitting
size_t _returnedBytes; // number of bytes returned to list.
ssize_t _bfr_surp; // surplus at start of current sweep
ssize_t _prev_sweep; // count from end of previous sweep
ssize_t _before_sweep; // count from before current sweep
ssize_t _coal_births; // additional chunks from coalescing
ssize_t _coal_deaths; // loss from coalescing
ssize_t _split_births; // additional chunks from splitting
ssize_t _split_deaths; // loss from splitting
size_t _returned_bytes; // number of bytes returned to list.
public:
void initialize(bool split_birth = false) {
AdaptivePaddedAverage* dummy =
new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
CMS_FLSPadding);
_desired = 0;
_coalDesired = 0;
_coal_desired = 0;
_surplus = 0;
_bfrSurp = 0;
_prevSweep = 0;
_beforeSweep = 0;
_coalBirths = 0;
_coalDeaths = 0;
_splitBirths = (split_birth ? 1 : 0);
_splitDeaths = 0;
_returnedBytes = 0;
_bfr_surp = 0;
_prev_sweep = 0;
_before_sweep = 0;
_coal_births = 0;
_coal_deaths = 0;
_split_births = (split_birth ? 1 : 0);
_split_deaths = 0;
_returned_bytes = 0;
}
AllocationStats() {
......@@ -99,12 +99,12 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// vulnerable to noisy glitches. In such cases, we
// ignore the current sample and use currently available
// historical estimates.
assert(prevSweep() + splitBirths() + coalBirths() // "Total Production Stock"
>= splitDeaths() + coalDeaths() + (ssize_t)count, // "Current stock + depletion"
assert(prev_sweep() + split_births() + coal_births() // "Total Production Stock"
>= split_deaths() + coal_deaths() + (ssize_t)count, // "Current stock + depletion"
"Conservation Principle");
if (inter_sweep_current > _threshold) {
ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() + coalBirths()
- splitDeaths() - coalDeaths();
ssize_t demand = prev_sweep() - (ssize_t)count + split_births() + coal_births()
- split_deaths() - coal_deaths();
assert(demand >= 0,
err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for "
PTR_FORMAT " (size=" SIZE_FORMAT ")",
......@@ -130,40 +130,40 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
ssize_t desired() const { return _desired; }
void set_desired(ssize_t v) { _desired = v; }
ssize_t coalDesired() const { return _coalDesired; }
void set_coalDesired(ssize_t v) { _coalDesired = v; }
ssize_t coal_desired() const { return _coal_desired; }
void set_coal_desired(ssize_t v) { _coal_desired = v; }
ssize_t surplus() const { return _surplus; }
void set_surplus(ssize_t v) { _surplus = v; }
void increment_surplus() { _surplus++; }
void decrement_surplus() { _surplus--; }
ssize_t bfrSurp() const { return _bfrSurp; }
void set_bfrSurp(ssize_t v) { _bfrSurp = v; }
ssize_t prevSweep() const { return _prevSweep; }
void set_prevSweep(ssize_t v) { _prevSweep = v; }
ssize_t beforeSweep() const { return _beforeSweep; }
void set_beforeSweep(ssize_t v) { _beforeSweep = v; }
ssize_t bfr_surp() const { return _bfr_surp; }
void set_bfr_surp(ssize_t v) { _bfr_surp = v; }
ssize_t prev_sweep() const { return _prev_sweep; }
void set_prev_sweep(ssize_t v) { _prev_sweep = v; }
ssize_t before_sweep() const { return _before_sweep; }
void set_before_sweep(ssize_t v) { _before_sweep = v; }
ssize_t coalBirths() const { return _coalBirths; }
void set_coalBirths(ssize_t v) { _coalBirths = v; }
void increment_coalBirths() { _coalBirths++; }
ssize_t coal_births() const { return _coal_births; }
void set_coal_births(ssize_t v) { _coal_births = v; }
void increment_coal_births() { _coal_births++; }
ssize_t coalDeaths() const { return _coalDeaths; }
void set_coalDeaths(ssize_t v) { _coalDeaths = v; }
void increment_coalDeaths() { _coalDeaths++; }
ssize_t coal_deaths() const { return _coal_deaths; }
void set_coal_deaths(ssize_t v) { _coal_deaths = v; }
void increment_coal_deaths() { _coal_deaths++; }
ssize_t splitBirths() const { return _splitBirths; }
void set_splitBirths(ssize_t v) { _splitBirths = v; }
void increment_splitBirths() { _splitBirths++; }
ssize_t split_births() const { return _split_births; }
void set_split_births(ssize_t v) { _split_births = v; }
void increment_split_births() { _split_births++; }
ssize_t splitDeaths() const { return _splitDeaths; }
void set_splitDeaths(ssize_t v) { _splitDeaths = v; }
void increment_splitDeaths() { _splitDeaths++; }
ssize_t split_deaths() const { return _split_deaths; }
void set_split_deaths(ssize_t v) { _split_deaths = v; }
void increment_split_deaths() { _split_deaths++; }
NOT_PRODUCT(
size_t returnedBytes() const { return _returnedBytes; }
void set_returnedBytes(size_t v) { _returnedBytes = v; }
size_t returned_bytes() const { return _returned_bytes; }
void set_returned_bytes(size_t v) { _returned_bytes = v; }
)
};
......
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,87 +22,101 @@
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_BINARYTREEDICTIONARY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_BINARYTREEDICTIONARY_HPP
#ifndef SHARE_VM_MEMORY_BINARYTREEDICTIONARY_HPP
#define SHARE_VM_MEMORY_BINARYTREEDICTIONARY_HPP
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/concurrentMarkSweep/freeList.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/freeList.hpp"
/*
* A binary tree based search structure for free blocks.
* This is currently used in the Concurrent Mark&Sweep implementation.
* This is currently used in the Concurrent Mark&Sweep implementation, but
* will be used for free block management for metadata.
*/
// A TreeList is a FreeList which can be used to maintain a
// binary tree of free lists.
class TreeChunk;
class BinaryTreeDictionary;
class AscendTreeCensusClosure;
class DescendTreeCensusClosure;
class DescendTreeSearchClosure;
template <class Chunk> class TreeChunk;
template <class Chunk> class BinaryTreeDictionary;
template <class Chunk> class AscendTreeCensusClosure;
template <class Chunk> class DescendTreeCensusClosure;
template <class Chunk> class DescendTreeSearchClosure;
class TreeList: public FreeList {
friend class TreeChunk;
friend class BinaryTreeDictionary;
friend class AscendTreeCensusClosure;
friend class DescendTreeCensusClosure;
friend class DescendTreeSearchClosure;
template <class Chunk>
class TreeList: public FreeList<Chunk> {
friend class TreeChunk<Chunk>;
friend class BinaryTreeDictionary<Chunk>;
friend class AscendTreeCensusClosure<Chunk>;
friend class DescendTreeCensusClosure<Chunk>;
friend class DescendTreeSearchClosure<Chunk>;
TreeList<Chunk>* _parent;
TreeList<Chunk>* _left;
TreeList<Chunk>* _right;
protected:
TreeList* parent() const { return _parent; }
TreeList* left() const { return _left; }
TreeList* right() const { return _right; }
TreeList<Chunk>* parent() const { return _parent; }
TreeList<Chunk>* left() const { return _left; }
TreeList<Chunk>* right() const { return _right; }
// Wrapper on call to base class, to get the template to compile.
Chunk* head() const { return FreeList<Chunk>::head(); }
Chunk* tail() const { return FreeList<Chunk>::tail(); }
void set_head(Chunk* head) { FreeList<Chunk>::set_head(head); }
void set_tail(Chunk* tail) { FreeList<Chunk>::set_tail(tail); }
size_t size() const { return FreeList<Chunk>::size(); }
// Accessors for links in tree.
void setLeft(TreeList* tl) {
void set_left(TreeList<Chunk>* tl) {
_left = tl;
if (tl != NULL)
tl->setParent(this);
tl->set_parent(this);
}
void setRight(TreeList* tl) {
void set_right(TreeList<Chunk>* tl) {
_right = tl;
if (tl != NULL)
tl->setParent(this);
tl->set_parent(this);
}
void setParent(TreeList* tl) { _parent = tl; }
void set_parent(TreeList<Chunk>* tl) { _parent = tl; }
void clearLeft() { _left = NULL; }
void clearRight() { _right = NULL; }
void clearParent() { _parent = NULL; }
void initialize() { clearLeft(); clearRight(), clearParent(); }
void clear_right() { _right = NULL; }
void clear_parent() { _parent = NULL; }
void initialize() { clearLeft(); clear_right(), clear_parent(); }
// For constructing a TreeList from a Tree chunk or
// address and size.
static TreeList* as_TreeList(TreeChunk* tc);
static TreeList* as_TreeList(HeapWord* addr, size_t size);
static TreeList<Chunk>* as_TreeList(TreeChunk<Chunk>* tc);
static TreeList<Chunk>* as_TreeList(HeapWord* addr, size_t size);
// Returns the head of the free list as a pointer to a TreeChunk.
TreeChunk* head_as_TreeChunk();
TreeChunk<Chunk>* head_as_TreeChunk();
// Returns the first available chunk in the free list as a pointer
// to a TreeChunk.
TreeChunk* first_available();
TreeChunk<Chunk>* first_available();
// Returns the block with the largest heap address amongst
// those in the list for this size; potentially slow and expensive,
// use with caution!
TreeChunk* largest_address();
TreeChunk<Chunk>* largest_address();
// removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
// remove_chunk_replace_if_needed() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the
// TreeList that is the node in the tree. removeChunkReplaceIfNeeded()
// TreeList that is the node in the tree. remove_chunk_replace_if_needed()
// returns the possibly replaced TreeList* for the node in
// the tree. It also updates the parent of the original
// node to point to the new node.
TreeList* removeChunkReplaceIfNeeded(TreeChunk* tc);
TreeList<Chunk>* remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc);
// See FreeList.
void returnChunkAtHead(TreeChunk* tc);
void returnChunkAtTail(TreeChunk* tc);
void return_chunk_at_head(TreeChunk<Chunk>* tc);
void return_chunk_at_tail(TreeChunk<Chunk>* tc);
};
// A TreeChunk is a subclass of a FreeChunk that additionally
// A TreeChunk is a subclass of a Chunk that additionally
// maintains a pointer to the free list on which it is currently
// linked.
// A TreeChunk is also used as a node in the binary tree. This
......@@ -115,92 +129,111 @@ class TreeList: public FreeList {
// on the free list for a node in the tree and is only removed if
// it is the last chunk on the free list.
class TreeChunk : public FreeChunk {
friend class TreeList;
TreeList* _list;
TreeList _embedded_list; // if non-null, this chunk is on _list
template <class Chunk>
class TreeChunk : public Chunk {
friend class TreeList<Chunk>;
TreeList<Chunk>* _list;
TreeList<Chunk> _embedded_list; // if non-null, this chunk is on _list
protected:
TreeList* embedded_list() const { return (TreeList*) &_embedded_list; }
void set_embedded_list(TreeList* v) { _embedded_list = *v; }
TreeList<Chunk>* embedded_list() const { return (TreeList<Chunk>*) &_embedded_list; }
void set_embedded_list(TreeList<Chunk>* v) { _embedded_list = *v; }
public:
TreeList* list() { return _list; }
void set_list(TreeList* v) { _list = v; }
static TreeChunk* as_TreeChunk(FreeChunk* fc);
TreeList<Chunk>* list() { return _list; }
void set_list(TreeList<Chunk>* v) { _list = v; }
static TreeChunk<Chunk>* as_TreeChunk(Chunk* fc);
// Initialize fields in a TreeChunk that should be
// initialized when the TreeChunk is being added to
// a free list in the tree.
void initialize() { embedded_list()->initialize(); }
Chunk* next() const { return Chunk::next(); }
Chunk* prev() const { return Chunk::prev(); }
size_t size() const volatile { return Chunk::size(); }
// debugging
void verifyTreeChunkList() const;
void verify_tree_chunk_list() const;
};
const size_t MIN_TREE_CHUNK_SIZE = sizeof(TreeChunk)/HeapWordSize;
class BinaryTreeDictionary: public FreeBlockDictionary {
template <class Chunk>
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
friend class VMStructs;
bool _splay;
size_t _totalSize;
size_t _totalFreeBlocks;
TreeList* _root;
size_t _total_size;
size_t _total_free_blocks;
TreeList<Chunk>* _root;
bool _adaptive_freelists;
// private accessors
bool splay() const { return _splay; }
void set_splay(bool v) { _splay = v; }
size_t totalSize() const { return _totalSize; }
void set_totalSize(size_t v) { _totalSize = v; }
virtual void inc_totalSize(size_t v);
virtual void dec_totalSize(size_t v);
size_t totalFreeBlocks() const { return _totalFreeBlocks; }
void set_totalFreeBlocks(size_t v) { _totalFreeBlocks = v; }
TreeList* root() const { return _root; }
void set_root(TreeList* v) { _root = v; }
void set_total_size(size_t v) { _total_size = v; }
virtual void inc_total_size(size_t v);
virtual void dec_total_size(size_t v);
size_t total_free_blocks() const { return _total_free_blocks; }
void set_total_free_blocks(size_t v) { _total_free_blocks = v; }
TreeList<Chunk>* root() const { return _root; }
void set_root(TreeList<Chunk>* v) { _root = v; }
bool adaptive_freelists() { return _adaptive_freelists; }
// This field is added and can be set to point to the
// the Mutex used to synchronize access to the
// dictionary so that assertion checking can be done.
// For example it is set to point to _parDictionaryAllocLock.
NOT_PRODUCT(Mutex* _lock;)
// Remove a chunk of size "size" or larger from the tree and
// return it. If the chunk
// is the last chunk of that size, remove the node for that size
// from the tree.
TreeChunk* getChunkFromTree(size_t size, Dither dither, bool splay);
TreeChunk<Chunk>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay);
// Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList* findList (size_t size) const;
TreeList<Chunk>* find_list (size_t size) const;
// Remove this chunk from the tree. If the removal results
// in an empty list in the tree, remove the empty list.
TreeChunk* removeChunkFromTree(TreeChunk* tc);
TreeChunk<Chunk>* remove_chunk_from_tree(TreeChunk<Chunk>* tc);
// Remove the node in the trees starting at tl that has the
// minimum value and return it. Repair the tree as needed.
TreeList* removeTreeMinimum(TreeList* tl);
void semiSplayStep(TreeList* tl);
TreeList<Chunk>* remove_tree_minimum(TreeList<Chunk>* tl);
void semi_splay_step(TreeList<Chunk>* tl);
// Add this free chunk to the tree.
void insertChunkInTree(FreeChunk* freeChunk);
void insert_chunk_in_tree(Chunk* freeChunk);
public:
void verifyTree() const;
static const size_t min_tree_chunk_size = sizeof(TreeChunk<Chunk>)/HeapWordSize;
void verify_tree() const;
// verify that the given chunk is in the tree.
bool verifyChunkInFreeLists(FreeChunk* tc) const;
bool verify_chunk_in_free_list(Chunk* tc) const;
private:
void verifyTreeHelper(TreeList* tl) const;
static size_t verifyPrevFreePtrs(TreeList* tl);
void verify_tree_helper(TreeList<Chunk>* tl) const;
static size_t verify_prev_free_ptrs(TreeList<Chunk>* tl);
// Returns the total number of chunks in the list.
size_t totalListLength(TreeList* tl) const;
size_t total_list_length(TreeList<Chunk>* tl) const;
// Returns the total number of words in the chunks in the tree
// starting at "tl".
size_t totalSizeInTree(TreeList* tl) const;
size_t total_size_in_tree(TreeList<Chunk>* tl) const;
// Returns the sum of the square of the size of each block
// in the tree starting at "tl".
double sum_of_squared_block_sizes(TreeList* const tl) const;
double sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const;
// Returns the total number of free blocks in the tree starting
// at "tl".
size_t totalFreeBlocksInTree(TreeList* tl) const;
size_t numFreeBlocks() const;
size_t total_free_blocks_in_tree(TreeList<Chunk>* tl) const;
size_t num_free_blocks() const;
size_t treeHeight() const;
size_t treeHeightHelper(TreeList* tl) const;
size_t totalNodesInTree(TreeList* tl) const;
size_t totalNodesHelper(TreeList* tl) const;
size_t tree_height_helper(TreeList<Chunk>* tl) const;
size_t total_nodes_in_tree(TreeList<Chunk>* tl) const;
size_t total_nodes_helper(TreeList<Chunk>* tl) const;
public:
// Constructor
BinaryTreeDictionary(MemRegion mr, bool splay = false);
BinaryTreeDictionary(bool adaptive_freelists, bool splay = false);
BinaryTreeDictionary(MemRegion mr, bool adaptive_freelists, bool splay = false);
// Public accessors
size_t total_size() const { return _total_size; }
// Reset the dictionary to the initial conditions with
// a single free chunk.
......@@ -212,85 +245,85 @@ class BinaryTreeDictionary: public FreeBlockDictionary {
// Return a chunk of size "size" or greater from
// the tree.
// want a better dynamic splay strategy for the future.
FreeChunk* getChunk(size_t size, Dither dither) {
verify_par_locked();
FreeChunk* res = getChunkFromTree(size, dither, splay());
assert(res == NULL || res->isFree(),
Chunk* get_chunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) {
FreeBlockDictionary<Chunk>::verify_par_locked();
Chunk* res = get_chunk_from_tree(size, dither, splay());
assert(res == NULL || res->is_free(),
"Should be returning a free chunk");
return res;
}
void returnChunk(FreeChunk* chunk) {
verify_par_locked();
insertChunkInTree(chunk);
void return_chunk(Chunk* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked();
insert_chunk_in_tree(chunk);
}
void removeChunk(FreeChunk* chunk) {
verify_par_locked();
removeChunkFromTree((TreeChunk*)chunk);
assert(chunk->isFree(), "Should still be a free chunk");
void remove_chunk(Chunk* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked();
remove_chunk_from_tree((TreeChunk<Chunk>*)chunk);
assert(chunk->is_free(), "Should still be a free chunk");
}
size_t maxChunkSize() const;
size_t totalChunkSize(debug_only(const Mutex* lock)) const {
size_t max_chunk_size() const;
size_t total_chunk_size(debug_only(const Mutex* lock)) const {
debug_only(
if (lock != NULL && lock->owned_by_self()) {
assert(totalSizeInTree(root()) == totalSize(),
"_totalSize inconsistency");
assert(total_size_in_tree(root()) == total_size(),
"_total_size inconsistency");
}
)
return totalSize();
return total_size();
}
size_t minSize() const {
return MIN_TREE_CHUNK_SIZE;
size_t min_size() const {
return min_tree_chunk_size;
}
double sum_of_squared_block_sizes() const {
return sum_of_squared_block_sizes(root());
}
FreeChunk* find_chunk_ends_at(HeapWord* target) const;
Chunk* find_chunk_ends_at(HeapWord* target) const;
// Find the list with size "size" in the binary tree and update
// the statistics in the list according to "split" (chunk was
// split or coalesce) and "birth" (chunk was added or removed).
void dictCensusUpdate(size_t size, bool split, bool birth);
void dict_census_udpate(size_t size, bool split, bool birth);
// Return true if the dictionary is overpopulated (more chunks of
// this size than desired) for size "size".
bool coalDictOverPopulated(size_t size);
bool coal_dict_over_populated(size_t size);
// Methods called at the beginning of a sweep to prepare the
// statistics for the sweep.
void beginSweepDictCensus(double coalSurplusPercent,
void begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current,
float inter_sweep_estimate,
float intra_sweep_estimate);
// Methods called after the end of a sweep to modify the
// statistics for the sweep.
void endSweepDictCensus(double splitSurplusPercent);
void end_sweep_dict_census(double splitSurplusPercent);
// Return the largest free chunk in the tree.
FreeChunk* findLargestDict() const;
Chunk* find_largest_dict() const;
// Accessors for statistics
void setTreeSurplus(double splitSurplusPercent);
void setTreeHints(void);
void set_tree_surplus(double splitSurplusPercent);
void set_tree_hints(void);
// Reset statistics for all the lists in the tree.
void clearTreeCensus(void);
void clear_tree_census(void);
// Print the statistcis for all the lists in the tree. Also may
// print out summaries.
void printDictCensus(void) const;
void print_dict_census(void) const;
void print_free_lists(outputStream* st) const;
// For debugging. Returns the sum of the _returnedBytes for
// For debugging. Returns the sum of the _returned_bytes for
// all lists in the tree.
size_t sumDictReturnedBytes() PRODUCT_RETURN0;
// Sets the _returnedBytes for all the lists in the tree to zero.
void initializeDictReturnedBytes() PRODUCT_RETURN;
size_t sum_dict_returned_bytes() PRODUCT_RETURN0;
// Sets the _returned_bytes for all the lists in the tree to zero.
void initialize_dict_returned_bytes() PRODUCT_RETURN;
// For debugging. Return the total number of chunks in the dictionary.
size_t totalCount() PRODUCT_RETURN0;
size_t total_count() PRODUCT_RETURN0;
void reportStatistics() const;
void report_statistics() const;
void verify() const;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_BINARYTREEDICTIONARY_HPP
#endif // SHARE_VM_MEMORY_BINARYTREEDICTIONARY_HPP
......@@ -23,7 +23,10 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC
#include "memory/freeBlockDictionary.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
......@@ -38,19 +41,19 @@
#endif
#ifndef PRODUCT
Mutex* FreeBlockDictionary::par_lock() const {
template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
return _lock;
}
void FreeBlockDictionary::set_par_lock(Mutex* lock) {
template <class Chunk> void FreeBlockDictionary<Chunk>::set_par_lock(Mutex* lock) {
_lock = lock;
}
void FreeBlockDictionary::verify_par_locked() const {
template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() const {
#ifdef ASSERT
if (ParallelGCThreads > 0) {
Thread* myThread = Thread::current();
if (myThread->is_GC_task_thread()) {
Thread* my_thread = Thread::current();
if (my_thread->is_GC_task_thread()) {
assert(par_lock() != NULL, "Should be using locking?");
assert_lock_strong(par_lock());
}
......@@ -58,3 +61,8 @@ void FreeBlockDictionary::verify_par_locked() const {
#endif // ASSERT
}
#endif
#ifndef SERIALGC
// Explicitly instantiate for FreeChunk
template class FreeBlockDictionary<FreeChunk>;
#endif // SERIALGC
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,12 +22,10 @@
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREEBLOCKDICTIONARY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREEBLOCKDICTIONARY_HPP
#ifndef SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
#define SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "runtime/mutex.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
......@@ -35,6 +33,7 @@
// A FreeBlockDictionary is an abstract superclass that will allow
// a number of alternative implementations in the future.
template <class Chunk>
class FreeBlockDictionary: public CHeapObj {
public:
enum Dither {
......@@ -52,45 +51,45 @@ class FreeBlockDictionary: public CHeapObj {
NOT_PRODUCT(Mutex* _lock;)
public:
virtual void removeChunk(FreeChunk* fc) = 0;
virtual FreeChunk* getChunk(size_t size, Dither dither = atLeast) = 0;
virtual void returnChunk(FreeChunk* chunk) = 0;
virtual size_t totalChunkSize(debug_only(const Mutex* lock)) const = 0;
virtual size_t maxChunkSize() const = 0;
virtual size_t minSize() const = 0;
virtual void remove_chunk(Chunk* fc) = 0;
virtual Chunk* get_chunk(size_t size, Dither dither = atLeast) = 0;
virtual void return_chunk(Chunk* chunk) = 0;
virtual size_t total_chunk_size(debug_only(const Mutex* lock)) const = 0;
virtual size_t max_chunk_size() const = 0;
virtual size_t min_size() const = 0;
// Reset the dictionary to the initial conditions for a single
// block.
virtual void reset(HeapWord* addr, size_t size) = 0;
virtual void reset() = 0;
virtual void dictCensusUpdate(size_t size, bool split, bool birth) = 0;
virtual bool coalDictOverPopulated(size_t size) = 0;
virtual void beginSweepDictCensus(double coalSurplusPercent,
virtual void dict_census_udpate(size_t size, bool split, bool birth) = 0;
virtual bool coal_dict_over_populated(size_t size) = 0;
virtual void begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current, float inter_sweep_estimate,
float intra__sweep_current) = 0;
virtual void endSweepDictCensus(double splitSurplusPercent) = 0;
virtual FreeChunk* findLargestDict() const = 0;
virtual void end_sweep_dict_census(double splitSurplusPercent) = 0;
virtual Chunk* find_largest_dict() const = 0;
// verify that the given chunk is in the dictionary.
virtual bool verifyChunkInFreeLists(FreeChunk* tc) const = 0;
virtual bool verify_chunk_in_free_list(Chunk* tc) const = 0;
// Sigma_{all_free_blocks} (block_size^2)
virtual double sum_of_squared_block_sizes() const = 0;
virtual FreeChunk* find_chunk_ends_at(HeapWord* target) const = 0;
virtual void inc_totalSize(size_t v) = 0;
virtual void dec_totalSize(size_t v) = 0;
virtual Chunk* find_chunk_ends_at(HeapWord* target) const = 0;
virtual void inc_total_size(size_t v) = 0;
virtual void dec_total_size(size_t v) = 0;
NOT_PRODUCT (
virtual size_t sumDictReturnedBytes() = 0;
virtual void initializeDictReturnedBytes() = 0;
virtual size_t totalCount() = 0;
virtual size_t sum_dict_returned_bytes() = 0;
virtual void initialize_dict_returned_bytes() = 0;
virtual size_t total_count() = 0;
)
virtual void reportStatistics() const {
virtual void report_statistics() const {
gclog_or_tty->print("No statistics available");
}
virtual void printDictCensus() const = 0;
virtual void print_dict_census() const = 0;
virtual void print_free_lists(outputStream* st) const = 0;
virtual void verify() const = 0;
......@@ -100,4 +99,4 @@ class FreeBlockDictionary: public CHeapObj {
void verify_par_locked() const PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREEBLOCKDICTIONARY_HPP
#endif // SHARE_VM_MEMORY_FREEBLOCKDICTIONARY_HPP
......@@ -23,20 +23,25 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/concurrentMarkSweep/freeList.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/vmThread.hpp"
#ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC
// Free list. A FreeList is used to access a linked list of chunks
// of space in the heap. The head and tail are maintained so that
// items can be (as in the current implementation) added at the
// at the tail of the list and removed from the head of the list to
// maintain a FIFO queue.
FreeList::FreeList() :
template <class Chunk>
FreeList<Chunk>::FreeList() :
_head(NULL), _tail(NULL)
#ifdef ASSERT
, _protecting_lock(NULL)
......@@ -48,7 +53,8 @@ FreeList::FreeList() :
init_statistics();
}
FreeList::FreeList(FreeChunk* fc) :
template <class Chunk>
FreeList<Chunk>::FreeList(Chunk* fc) :
_head(fc), _tail(fc)
#ifdef ASSERT
, _protecting_lock(NULL)
......@@ -59,48 +65,35 @@ FreeList::FreeList(FreeChunk* fc) :
_hint = 0;
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returnedBytes(size() * HeapWordSize);
#endif
}
FreeList::FreeList(HeapWord* addr, size_t size) :
_head((FreeChunk*) addr), _tail((FreeChunk*) addr)
#ifdef ASSERT
, _protecting_lock(NULL)
#endif
{
assert(size > sizeof(FreeChunk), "size is too small");
head()->setSize(size);
_size = size;
_count = 1;
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returnedBytes(_size * HeapWordSize);
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
}
void FreeList::reset(size_t hint) {
template <class Chunk>
void FreeList<Chunk>::reset(size_t hint) {
set_count(0);
set_head(NULL);
set_tail(NULL);
set_hint(hint);
}
void FreeList::init_statistics(bool split_birth) {
template <class Chunk>
void FreeList<Chunk>::init_statistics(bool split_birth) {
_allocation_stats.initialize(split_birth);
}
FreeChunk* FreeList::getChunkAtHead() {
template <class Chunk>
Chunk* FreeList<Chunk>::get_chunk_at_head() {
assert_proper_lock_protection();
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
FreeChunk* fc = head();
Chunk* fc = head();
if (fc != NULL) {
FreeChunk* nextFC = fc->next();
Chunk* nextFC = fc->next();
if (nextFC != NULL) {
// The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc.
nextFC->linkPrev(NULL);
nextFC->link_prev(NULL);
} else { // removed tail of list
link_tail(NULL);
}
......@@ -113,29 +106,30 @@ FreeChunk* FreeList::getChunkAtHead() {
}
void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) {
template <class Chunk>
void FreeList<Chunk>::getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl) {
assert_proper_lock_protection();
assert(fl->count() == 0, "Precondition");
if (count() > 0) {
int k = 1;
fl->set_head(head()); n--;
FreeChunk* tl = head();
Chunk* tl = head();
while (tl->next() != NULL && n > 0) {
tl = tl->next(); n--; k++;
}
assert(tl != NULL, "Loop Inv.");
// First, fix up the list we took from.
FreeChunk* new_head = tl->next();
Chunk* new_head = tl->next();
set_head(new_head);
set_count(count() - k);
if (new_head == NULL) {
set_tail(NULL);
} else {
new_head->linkPrev(NULL);
new_head->link_prev(NULL);
}
// Now we can fix up the tail.
tl->linkNext(NULL);
tl->link_next(NULL);
// And return the result.
fl->set_tail(tl);
fl->set_count(k);
......@@ -143,7 +137,8 @@ void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) {
}
// Remove this chunk from the list
void FreeList::removeChunk(FreeChunk*fc) {
template <class Chunk>
void FreeList<Chunk>::remove_chunk(Chunk*fc) {
assert_proper_lock_protection();
assert(head() != NULL, "Remove from empty list");
assert(fc != NULL, "Remove a NULL chunk");
......@@ -151,12 +146,12 @@ void FreeList::removeChunk(FreeChunk*fc) {
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
FreeChunk* prevFC = fc->prev();
FreeChunk* nextFC = fc->next();
Chunk* prevFC = fc->prev();
Chunk* nextFC = fc->next();
if (nextFC != NULL) {
// The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc.
nextFC->linkPrev(prevFC);
nextFC->link_prev(prevFC);
} else { // removed tail of list
link_tail(prevFC);
}
......@@ -165,7 +160,7 @@ void FreeList::removeChunk(FreeChunk*fc) {
assert(nextFC == NULL || nextFC->prev() == NULL,
"Prev of head should be NULL");
} else {
prevFC->linkNext(nextFC);
prevFC->link_next(nextFC);
assert(tail() != prevFC || prevFC->next() == NULL,
"Next of tail should be NULL");
}
......@@ -174,10 +169,10 @@ void FreeList::removeChunk(FreeChunk*fc) {
"H/T/C Inconsistency");
// clear next and prev fields of fc, debug only
NOT_PRODUCT(
fc->linkPrev(NULL);
fc->linkNext(NULL);
fc->link_prev(NULL);
fc->link_next(NULL);
)
assert(fc->isFree(), "Should still be a free chunk");
assert(fc->is_free(), "Should still be a free chunk");
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list");
......@@ -185,16 +180,17 @@ void FreeList::removeChunk(FreeChunk*fc) {
}
// Add this chunk at the head of the list.
void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
assert_proper_lock_protection();
assert(chunk != NULL, "insert a NULL chunk");
assert(size() == chunk->size(), "Wrong size");
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
FreeChunk* oldHead = head();
Chunk* oldHead = head();
assert(chunk != oldHead, "double insertion");
chunk->linkAfter(oldHead);
chunk->link_after(oldHead);
link_head(chunk);
if (oldHead == NULL) { // only chunk in list
assert(tail() == NULL, "inconsistent FreeList");
......@@ -203,7 +199,7 @@ void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returnedBytes_by(size()*HeapWordSize);
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
......@@ -212,23 +208,25 @@ void FreeList::returnChunkAtHead(FreeChunk* chunk, bool record_return) {
assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
}
void FreeList::returnChunkAtHead(FreeChunk* chunk) {
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
assert_proper_lock_protection();
returnChunkAtHead(chunk, true);
return_chunk_at_head(chunk, true);
}
// Add this chunk at the tail of the list.
void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
assert_proper_lock_protection();
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(chunk != NULL, "insert a NULL chunk");
assert(size() == chunk->size(), "wrong size");
FreeChunk* oldTail = tail();
Chunk* oldTail = tail();
assert(chunk != oldTail, "double insertion");
if (oldTail != NULL) {
oldTail->linkAfter(chunk);
oldTail->link_after(chunk);
} else { // only chunk in list
assert(head() == NULL, "inconsistent FreeList");
link_head(chunk);
......@@ -237,7 +235,7 @@ void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returnedBytes_by(size()*HeapWordSize);
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
......@@ -246,11 +244,13 @@ void FreeList::returnChunkAtTail(FreeChunk* chunk, bool record_return) {
assert(tail() == NULL || tail()->size() == size(), "wrong item on list");
}
void FreeList::returnChunkAtTail(FreeChunk* chunk) {
returnChunkAtTail(chunk, true);
template <class Chunk>
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
return_chunk_at_tail(chunk, true);
}
void FreeList::prepend(FreeList* fl) {
template <class Chunk>
void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
assert_proper_lock_protection();
if (fl->count() > 0) {
if (count() == 0) {
......@@ -259,11 +259,11 @@ void FreeList::prepend(FreeList* fl) {
set_count(fl->count());
} else {
// Both are non-empty.
FreeChunk* fl_tail = fl->tail();
FreeChunk* this_head = head();
Chunk* fl_tail = fl->tail();
Chunk* this_head = head();
assert(fl_tail->next() == NULL, "Well-formedness of fl");
fl_tail->linkNext(this_head);
this_head->linkPrev(fl_tail);
fl_tail->link_next(this_head);
this_head->link_prev(fl_tail);
set_head(fl->head());
set_count(count() + fl->count());
}
......@@ -273,13 +273,14 @@ void FreeList::prepend(FreeList* fl) {
}
}
// verifyChunkInFreeLists() is used to verify that an item is in this free list.
// verify_chunk_in_free_list() is used to verify that an item is in this free list.
// It is used as a debugging aid.
bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
template <class Chunk>
bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
// This is an internal consistency check, not part of the check that the
// chunk is in the free lists.
guarantee(fc->size() == size(), "Wrong list is being searched");
FreeChunk* curFC = head();
Chunk* curFC = head();
while (curFC) {
// This is an internal consistency check.
guarantee(size() == curFC->size(), "Chunk is in wrong list.");
......@@ -292,7 +293,8 @@ bool FreeList::verifyChunkInFreeLists(FreeChunk* fc) const {
}
#ifndef PRODUCT
void FreeList::verify_stats() const {
template <class Chunk>
void FreeList<Chunk>::verify_stats() const {
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
......@@ -300,24 +302,25 @@ void FreeList::verify_stats() const {
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths()
+ _allocation_stats.coalBirths() + 1) // Total Production Stock + 1
>= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths()
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prevSweep(" SIZE_FORMAT ")"
" + splitBirths(" SIZE_FORMAT ")"
" + coalBirths(" SIZE_FORMAT ") + 1 >= "
" splitDeaths(" SIZE_FORMAT ")"
" coalDeaths(" SIZE_FORMAT ")"
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(),
_allocation_stats.splitBirths(), _allocation_stats.splitDeaths(),
_allocation_stats.coalDeaths(), count()));
this, _size, _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
void FreeList::assert_proper_lock_protection_work() const {
template <class Chunk>
void FreeList<Chunk>::assert_proper_lock_protection_work() const {
assert(_protecting_lock != NULL, "Don't call this directly");
assert(ParallelGCThreads > 0, "Don't call this directly");
Thread* thr = Thread::current();
......@@ -334,7 +337,8 @@ void FreeList::assert_proper_lock_protection_work() const {
#endif
// Print the "label line" for free list stats.
void FreeList::print_labels_on(outputStream* st, const char* c) {
template <class Chunk>
void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) {
st->print("%16s\t", c);
st->print("%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t"
"%14s\t" "%14s\t" "%14s\t" "%14s\t" "%14s\t" "\n",
......@@ -346,7 +350,8 @@ void FreeList::print_labels_on(outputStream* st, const char* c) {
// to the call is a non-null string, it is printed in the first column;
// otherwise, if the argument is null (the default), then the size of the
// (free list) block is printed in the first column.
void FreeList::print_on(outputStream* st, const char* c) const {
template <class Chunk>
void FreeList<Chunk>::print_on(outputStream* st, const char* c) const {
if (c != NULL) {
st->print("%16s", c);
} else {
......@@ -355,6 +360,11 @@ void FreeList::print_on(outputStream* st, const char* c) const {
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfrSurp(), surplus(), desired(), prevSweep(), beforeSweep(),
count(), coalBirths(), coalDeaths(), splitBirths(), splitDeaths());
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
}
#ifndef SERIALGC
// Needs to be after the definitions have been seen.
template class FreeList<FreeChunk>;
#endif // SERIALGC
......@@ -22,39 +22,36 @@
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
#ifndef SHARE_VM_MEMORY_FREELIST_HPP
#define SHARE_VM_MEMORY_FREELIST_HPP
#include "gc_implementation/shared/allocationStats.hpp"
class CompactibleFreeListSpace;
// A class for maintaining a free list of FreeChunk's. The FreeList
// A class for maintaining a free list of Chunk's. The FreeList
// maintains a the structure of the list (head, tail, etc.) plus
// statistics for allocations from the list. The links between items
// are not part of FreeList. The statistics are
// used to make decisions about coalescing FreeChunk's when they
// used to make decisions about coalescing Chunk's when they
// are swept during collection.
//
// See the corresponding .cpp file for a description of the specifics
// for that implementation.
class Mutex;
class TreeList;
template <class Chunk> class TreeList;
template <class Chunk> class PrintTreeCensusClosure;
template <class Chunk>
class FreeList VALUE_OBJ_CLASS_SPEC {
friend class CompactibleFreeListSpace;
friend class VMStructs;
friend class PrintTreeCensusClosure;
protected:
TreeList* _parent;
TreeList* _left;
TreeList* _right;
friend class PrintTreeCensusClosure<Chunk>;
private:
FreeChunk* _head; // Head of list of free chunks
FreeChunk* _tail; // Tail of list of free chunks
Chunk* _head; // Head of list of free chunks
Chunk* _tail; // Tail of list of free chunks
size_t _size; // Size in Heap words of each chunk
ssize_t _count; // Number of entries in list
size_t _hint; // next larger size list with a positive surplus
......@@ -92,10 +89,7 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Construct a list without any entries.
FreeList();
// Construct a list with "fc" as the first (and lone) entry in the list.
FreeList(FreeChunk* fc);
// Construct a list which will have a FreeChunk at address "addr" and
// of size "size" as the first (and lone) entry in the list.
FreeList(HeapWord* addr, size_t size);
FreeList(Chunk* fc);
// Reset the head, tail, hint, and count of a free list.
void reset(size_t hint);
......@@ -108,43 +102,43 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
#endif
// Accessors.
FreeChunk* head() const {
Chunk* head() const {
assert_proper_lock_protection();
return _head;
}
void set_head(FreeChunk* v) {
void set_head(Chunk* v) {
assert_proper_lock_protection();
_head = v;
assert(!_head || _head->size() == _size, "bad chunk size");
}
// Set the head of the list and set the prev field of non-null
// values to NULL.
void link_head(FreeChunk* v) {
void link_head(Chunk* v) {
assert_proper_lock_protection();
set_head(v);
// If this method is not used (just set the head instead),
// this check can be avoided.
if (v != NULL) {
v->linkPrev(NULL);
v->link_prev(NULL);
}
}
FreeChunk* tail() const {
Chunk* tail() const {
assert_proper_lock_protection();
return _tail;
}
void set_tail(FreeChunk* v) {
void set_tail(Chunk* v) {
assert_proper_lock_protection();
_tail = v;
assert(!_tail || _tail->size() == _size, "bad chunk size");
}
// Set the tail of the list and set the next field of non-null
// values to NULL.
void link_tail(FreeChunk* v) {
void link_tail(Chunk* v) {
assert_proper_lock_protection();
set_tail(v);
if (v != NULL) {
v->clearNext();
v->clear_next();
}
}
......@@ -191,12 +185,12 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
inter_sweep_estimate,
intra_sweep_estimate);
}
ssize_t coalDesired() const {
return _allocation_stats.coalDesired();
ssize_t coal_desired() const {
return _allocation_stats.coal_desired();
}
void set_coalDesired(ssize_t v) {
void set_coal_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coalDesired(v);
_allocation_stats.set_coal_desired(v);
}
ssize_t surplus() const {
......@@ -215,114 +209,114 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
_allocation_stats.decrement_surplus();
}
ssize_t bfrSurp() const {
return _allocation_stats.bfrSurp();
ssize_t bfr_surp() const {
return _allocation_stats.bfr_surp();
}
void set_bfrSurp(ssize_t v) {
void set_bfr_surp(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_bfrSurp(v);
_allocation_stats.set_bfr_surp(v);
}
ssize_t prevSweep() const {
return _allocation_stats.prevSweep();
ssize_t prev_sweep() const {
return _allocation_stats.prev_sweep();
}
void set_prevSweep(ssize_t v) {
void set_prev_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_prevSweep(v);
_allocation_stats.set_prev_sweep(v);
}
ssize_t beforeSweep() const {
return _allocation_stats.beforeSweep();
ssize_t before_sweep() const {
return _allocation_stats.before_sweep();
}
void set_beforeSweep(ssize_t v) {
void set_before_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_beforeSweep(v);
_allocation_stats.set_before_sweep(v);
}
ssize_t coalBirths() const {
return _allocation_stats.coalBirths();
ssize_t coal_births() const {
return _allocation_stats.coal_births();
}
void set_coalBirths(ssize_t v) {
void set_coal_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coalBirths(v);
_allocation_stats.set_coal_births(v);
}
void increment_coalBirths() {
void increment_coal_births() {
assert_proper_lock_protection();
_allocation_stats.increment_coalBirths();
_allocation_stats.increment_coal_births();
}
ssize_t coalDeaths() const {
return _allocation_stats.coalDeaths();
ssize_t coal_deaths() const {
return _allocation_stats.coal_deaths();
}
void set_coalDeaths(ssize_t v) {
void set_coal_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coalDeaths(v);
_allocation_stats.set_coal_deaths(v);
}
void increment_coalDeaths() {
void increment_coal_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_coalDeaths();
_allocation_stats.increment_coal_deaths();
}
ssize_t splitBirths() const {
return _allocation_stats.splitBirths();
ssize_t split_births() const {
return _allocation_stats.split_births();
}
void set_splitBirths(ssize_t v) {
void set_split_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_splitBirths(v);
_allocation_stats.set_split_births(v);
}
void increment_splitBirths() {
void increment_split_births() {
assert_proper_lock_protection();
_allocation_stats.increment_splitBirths();
_allocation_stats.increment_split_births();
}
ssize_t splitDeaths() const {
return _allocation_stats.splitDeaths();
ssize_t split_deaths() const {
return _allocation_stats.split_deaths();
}
void set_splitDeaths(ssize_t v) {
void set_split_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_splitDeaths(v);
_allocation_stats.set_split_deaths(v);
}
void increment_splitDeaths() {
void increment_split_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_splitDeaths();
_allocation_stats.increment_split_deaths();
}
NOT_PRODUCT(
// For debugging. The "_returnedBytes" in all the lists are summed
// For debugging. The "_returned_bytes" in all the lists are summed
// and compared with the total number of bytes swept during a
// collection.
size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
void increment_returnedBytes_by(size_t v) {
_allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
void increment_returned_bytes_by(size_t v) {
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
}
)
// Unlink head of list and return it. Returns NULL if
// the list is empty.
FreeChunk* getChunkAtHead();
Chunk* get_chunk_at_head();
// Remove the first "n" or "count", whichever is smaller, chunks from the
// list, setting "fl", which is required to be empty, to point to them.
void getFirstNChunksFromList(size_t n, FreeList* fl);
void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl);
// Unlink this chunk from it's free list
void removeChunk(FreeChunk* fc);
void remove_chunk(Chunk* fc);
// Add this chunk to this free list.
void returnChunkAtHead(FreeChunk* fc);
void returnChunkAtTail(FreeChunk* fc);
void return_chunk_at_head(Chunk* fc);
void return_chunk_at_tail(Chunk* fc);
// Similar to returnChunk* but also records some diagnostic
// information.
void returnChunkAtHead(FreeChunk* fc, bool record_return);
void returnChunkAtTail(FreeChunk* fc, bool record_return);
void return_chunk_at_head(Chunk* fc, bool record_return);
void return_chunk_at_tail(Chunk* fc, bool record_return);
// Prepend "fl" (whose size is required to be the same as that of "this")
// to the front of "this" list.
void prepend(FreeList* fl);
void prepend(FreeList<Chunk>* fl);
// Verify that the chunk is in the list.
// found. Return NULL if "fc" is not found.
bool verifyChunkInFreeLists(FreeChunk* fc) const;
bool verify_chunk_in_free_list(Chunk* fc) const;
// Stats verification
void verify_stats() const PRODUCT_RETURN;
......@@ -332,4 +326,4 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
void print_on(outputStream* st, const char* c = NULL) const;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_FREELIST_HPP
#endif // SHARE_VM_MEMORY_FREELIST_HPP
......@@ -68,7 +68,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
ConcurrentMarkSweepGeneration* g = NULL;
g = new ConcurrentMarkSweepGeneration(rs,
init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice);
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
g->initialize_performance_counters();
......@@ -88,7 +88,7 @@ Generation* GenerationSpec::init(ReservedSpace rs, int level,
ASConcurrentMarkSweepGeneration* g = NULL;
g = new ASConcurrentMarkSweepGeneration(rs,
init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice);
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
g->initialize_performance_counters();
......@@ -175,7 +175,7 @@ PermGen* PermanentGenerationSpec::init(ReservedSpace rs,
}
// XXXPERM
return new CMSPermGen(perm_rs, init_size, ctrs,
(FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice);
(FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
}
#endif // SERIALGC
default:
......
......@@ -293,13 +293,10 @@
# include "c1/c1_globals.hpp"
#endif // COMPILER1
#ifndef SERIALGC
# include "gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp"
# include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp"
# include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
# include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
# include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
# include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
# include "gc_implementation/concurrentMarkSweep/freeList.hpp"
# include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
# include "gc_implementation/g1/dirtyCardQueue.hpp"
# include "gc_implementation/g1/g1BlockOffsetTable.hpp"
......
......@@ -44,7 +44,6 @@
#include "code/vmreg.hpp"
#include "compiler/oopMap.hpp"
#include "compiler/compileBroker.hpp"
#include "gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp"
#include "gc_implementation/shared/immutableSpace.hpp"
#include "gc_implementation/shared/markSweep.hpp"
#include "gc_implementation/shared/mutableSpace.hpp"
......@@ -55,6 +54,7 @@
#include "memory/cardTableRS.hpp"
#include "memory/compactPermGen.hpp"
#include "memory/defNewGeneration.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/generation.hpp"
#include "memory/generationSpec.hpp"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册