提交 2c61150c 编写于 作者: J jmasa

Merge

......@@ -499,7 +499,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Verify that the given chunk is in the free lists:
// i.e. either the binary tree dictionary, the indexed free lists
// or the linear allocation block.
bool verifyChunkInFreeLists(FreeChunk* fc) const;
bool verify_chunk_in_free_list(FreeChunk* fc) const;
// Verify that the given chunk is the linear allocation block
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
// Do some basic checks on the the free lists.
......@@ -608,7 +608,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void coalDeath(size_t size);
void smallSplitBirth(size_t size);
void smallSplitDeath(size_t size);
void splitBirth(size_t size);
void split_birth(size_t size);
void splitDeath(size_t size);
void split(size_t from, size_t to1);
......
......@@ -1026,7 +1026,7 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
// its mark-bit or P-bits not yet set. Such objects need
// to be safely navigable by block_start().
assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
collector()->direct_allocated(res, adjustedSize);
_direct_allocated_words += adjustedSize;
// allocation counters
......@@ -1391,7 +1391,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
oop obj = oop(obj_ptr);
OrderAccess::storestore();
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
// IMPORTANT: See note on object initialization for CMS above.
// Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object.
......@@ -1400,7 +1400,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
// Restore the mark word copied above.
obj->set_mark(m);
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
OrderAccess::storestore();
if (UseCompressedOops) {
......@@ -1421,7 +1421,7 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
promoInfo->track((PromotedObject*)obj, old->klass());
}
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
assert(old->is_oop(), "Will use and dereference old klass ptr below");
// Finally, install the klass pointer (this should be volatile).
......@@ -2034,7 +2034,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
pointer_delta(cms_space->end(), cms_space->compaction_top())
* HeapWordSize,
"All the free space should be compacted into one chunk at top");
assert(cms_space->dictionary()->totalChunkSize(
assert(cms_space->dictionary()->total_chunk_size(
debug_only(cms_space->freelistLock())) == 0 ||
cms_space->totalSizeInIndexedFreeLists() == 0,
"All the free space should be in a single chunk");
......@@ -6131,7 +6131,7 @@ void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
double nearLargestPercent = FLSLargestBlockCoalesceProximity;
HeapWord* minAddr = _cmsSpace->bottom();
HeapWord* largestAddr =
(HeapWord*) _cmsSpace->dictionary()->findLargestDict();
(HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
if (largestAddr == NULL) {
// The dictionary appears to be empty. In this case
// try to coalesce at the end of the heap.
......@@ -7906,7 +7906,7 @@ SweepClosure::SweepClosure(CMSCollector* collector,
_last_fc = NULL;
_sp->initializeIndexedFreeListArrayReturnedBytes();
_sp->dictionary()->initializeDictReturnedBytes();
_sp->dictionary()->initialize_dict_returned_bytes();
)
assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds");
......@@ -7954,13 +7954,13 @@ SweepClosure::~SweepClosure() {
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
indexListReturnedBytes);
gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
dictReturnedBytes);
dict_returned_bytes);
}
}
if (CMSTraceSweeper) {
......@@ -7985,9 +7985,9 @@ void SweepClosure::initialize_free_range(HeapWord* freeFinger,
if (CMSTestInFreeList) {
if (freeRangeInFreeLists) {
FreeChunk* fc = (FreeChunk*) freeFinger;
assert(fc->isFree(), "A chunk on the free list should be free.");
assert(fc->is_free(), "A chunk on the free list should be free.");
assert(fc->size() > 0, "Free range should have a size");
assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists");
assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
}
}
}
......@@ -8057,7 +8057,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
assert(addr < _limit, "sweep invariant");
// check if we should yield
do_yield_check(addr);
if (fc->isFree()) {
if (fc->is_free()) {
// Chunk that is already free
res = fc->size();
do_already_free_chunk(fc);
......@@ -8145,7 +8145,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
// Chunks that cannot be coalesced are not in the
// free lists.
if (CMSTestInFreeList && !fc->cantCoalesce()) {
assert(_sp->verifyChunkInFreeLists(fc),
assert(_sp->verify_chunk_in_free_list(fc),
"free chunk should be in free lists");
}
// a chunk that is already free, should not have been
......@@ -8171,7 +8171,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
FreeChunk* nextChunk = (FreeChunk*)(addr + size);
assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
nextChunk->isFree() && // ... which is free...
nextChunk->is_free() && // ... which is free...
nextChunk->cantCoalesce()) { // ... but can't be coalesced
// nothing to do
} else {
......@@ -8203,7 +8203,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
assert(ffc->size() == pointer_delta(addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"free range is not in free lists");
}
_sp->removeFreeChunkFromFreeLists(ffc);
......@@ -8262,7 +8262,7 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
assert(ffc->size() == pointer_delta(addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"free range is not in free lists");
}
_sp->removeFreeChunkFromFreeLists(ffc);
......@@ -8351,11 +8351,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t chunkSize) {
// do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator.
const bool fcInFreeLists = fc->isFree();
const bool fcInFreeLists = fc->is_free();
assert(_sp->adaptive_freelists(), "Should only be used in this case.");
assert((HeapWord*)fc <= _limit, "sweep invariant");
if (CMSTestInFreeList && fcInFreeLists) {
assert(_sp->verifyChunkInFreeLists(fc), "free chunk is not in free lists");
assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
}
if (CMSTraceSweeper) {
......@@ -8410,7 +8410,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
"Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc),
assert(_sp->verify_chunk_in_free_list(ffc),
"Chunk is not in free lists");
}
_sp->coalDeath(ffc->size());
......@@ -8459,7 +8459,7 @@ void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
if (eob >= _limit) {
assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
"[" PTR_FORMAT "," PTR_FORMAT ") in space "
......@@ -8482,8 +8482,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
if (!freeRangeInFreeLists()) {
if (CMSTestInFreeList) {
FreeChunk* fc = (FreeChunk*) chunk;
fc->setSize(size);
assert(!_sp->verifyChunkInFreeLists(fc),
fc->set_size(size);
assert(!_sp->verify_chunk_in_free_list(fc),
"chunk should not be in free lists yet");
}
if (CMSTraceSweeper) {
......@@ -8557,8 +8557,8 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
// This is actually very useful in a product build if it can
// be called from the debugger. Compile it into the product
// as needed.
bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
return debug_cms_space->verifyChunkInFreeLists(fc);
bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
return debug_cms_space->verify_chunk_in_free_list(fc);
}
#endif
......@@ -9255,7 +9255,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
size_t chunk_at_end_old_size = chunk_at_end->size();
assert(chunk_at_end_old_size >= word_size_change,
"Shrink is too large");
chunk_at_end->setSize(chunk_at_end_old_size -
chunk_at_end->set_size(chunk_at_end_old_size -
word_size_change);
_cmsSpace->freed((HeapWord*) chunk_at_end->end(),
word_size_change);
......
......@@ -75,20 +75,20 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
// calls. We really want the read of _mark and _prev from this pointer
// to be volatile but making the fields volatile causes all sorts of
// compilation errors.
return ((volatile FreeChunk*)addr)->isFree();
return ((volatile FreeChunk*)addr)->is_free();
}
bool isFree() const volatile {
bool is_free() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
return (((intptr_t)_prev) & 0x1) == 0x1;
}
bool cantCoalesce() const {
assert(isFree(), "can't get coalesce bit on not free");
assert(is_free(), "can't get coalesce bit on not free");
return (((intptr_t)_prev) & 0x2) == 0x2;
}
void dontCoalesce() {
// the block should be free
assert(isFree(), "Should look like a free block");
assert(is_free(), "Should look like a free block");
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
}
FreeChunk* prev() const {
......@@ -103,23 +103,23 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
return _size;
}
void setSize(size_t sz) {
void set_size(size_t sz) {
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
_size = sz;
}
FreeChunk* next() const { return _next; }
void linkAfter(FreeChunk* ptr) {
linkNext(ptr);
if (ptr != NULL) ptr->linkPrev(this);
void link_after(FreeChunk* ptr) {
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) {
void link_next(FreeChunk* ptr) { _next = ptr; }
void link_prev(FreeChunk* ptr) {
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
_prev = (FreeChunk*)((intptr_t)ptr | 0x1);
}
void clearNext() { _next = NULL; }
void clear_next() { _next = NULL; }
void markNotFree() {
// Set _prev (klass) to null before (if) clearing the mark word below
_prev = NULL;
......@@ -129,7 +129,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
set_mark(markOopDesc::prototype());
}
#endif
assert(!isFree(), "Error");
assert(!is_free(), "Error");
}
// Return the address past the end of this chunk
......
......@@ -121,7 +121,7 @@ void PromotionInfo::track(PromotedObject* trackOop) {
void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
// make a copy of header as it may need to be spooled
markOop mark = oop(trackOop)->mark();
trackOop->clearNext();
trackOop->clear_next();
if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
// save non-prototypical header, and mark oop
saveDisplacedHeader(mark);
......
......@@ -43,7 +43,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
// whose position will depend on endian-ness of the platform.
// This is so that there is no interference with the
// cms_free_bit occupying bit position 7 (lsb == 0)
// when we are using compressed oops; see FreeChunk::isFree().
// when we are using compressed oops; see FreeChunk::is_free().
// We cannot move the cms_free_bit down because currently
// biased locking code assumes that age bits are contiguous
// with the lock bits. Even if that assumption were relaxed,
......@@ -65,7 +65,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
};
public:
inline PromotedObject* next() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
......@@ -85,27 +85,27 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline void setPromotedMark() {
_next |= promoted_mask;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline bool hasPromotedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
return (_next & promoted_mask) == promoted_mask;
}
inline void setDisplacedMark() {
_next |= displaced_mark;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
inline bool hasDisplacedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
return (_next & displaced_mark) != 0;
}
inline void clearNext() {
inline void clear_next() {
_next = 0;
assert(!((FreeChunk*)this)->isFree(), "Error");
assert(!((FreeChunk*)this)->is_free(), "Error");
}
debug_only(void *next_addr() { return (void *) &_next; })
};
......
......@@ -46,7 +46,7 @@
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_totalSize, size_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary<FreeChunk>*) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
......
......@@ -39,7 +39,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// We measure the demand between the end of the previous sweep and
// beginning of this sweep:
// Count(end_last_sweep) - Count(start_this_sweep)
// + splitBirths(between) - splitDeaths(between)
// + split_births(between) - split_deaths(between)
// The above number divided by the time since the end of the
// previous sweep gives us a time rate of demand for blocks
// of this size. We compute a padded average of this rate as
......@@ -51,34 +51,34 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
AdaptivePaddedAverage _demand_rate_estimate;
ssize_t _desired; // Demand stimate computed as described above
ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing
ssize_t _coal_desired; // desired +/- small-percent for tuning coalescing
ssize_t _surplus; // count - (desired +/- small-percent),
// used to tune splitting in best fit
ssize_t _bfrSurp; // surplus at start of current sweep
ssize_t _prevSweep; // count from end of previous sweep
ssize_t _beforeSweep; // count from before current sweep
ssize_t _coalBirths; // additional chunks from coalescing
ssize_t _coalDeaths; // loss from coalescing
ssize_t _splitBirths; // additional chunks from splitting
ssize_t _splitDeaths; // loss from splitting
size_t _returnedBytes; // number of bytes returned to list.
ssize_t _bfr_surp; // surplus at start of current sweep
ssize_t _prev_sweep; // count from end of previous sweep
ssize_t _before_sweep; // count from before current sweep
ssize_t _coal_births; // additional chunks from coalescing
ssize_t _coal_deaths; // loss from coalescing
ssize_t _split_births; // additional chunks from splitting
ssize_t _split_deaths; // loss from splitting
size_t _returned_bytes; // number of bytes returned to list.
public:
void initialize(bool split_birth = false) {
AdaptivePaddedAverage* dummy =
new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
CMS_FLSPadding);
_desired = 0;
_coalDesired = 0;
_coal_desired = 0;
_surplus = 0;
_bfrSurp = 0;
_prevSweep = 0;
_beforeSweep = 0;
_coalBirths = 0;
_coalDeaths = 0;
_splitBirths = (split_birth ? 1 : 0);
_splitDeaths = 0;
_returnedBytes = 0;
_bfr_surp = 0;
_prev_sweep = 0;
_before_sweep = 0;
_coal_births = 0;
_coal_deaths = 0;
_split_births = (split_birth ? 1 : 0);
_split_deaths = 0;
_returned_bytes = 0;
}
AllocationStats() {
......@@ -99,12 +99,12 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
// vulnerable to noisy glitches. In such cases, we
// ignore the current sample and use currently available
// historical estimates.
assert(prevSweep() + splitBirths() + coalBirths() // "Total Production Stock"
>= splitDeaths() + coalDeaths() + (ssize_t)count, // "Current stock + depletion"
assert(prev_sweep() + split_births() + coal_births() // "Total Production Stock"
>= split_deaths() + coal_deaths() + (ssize_t)count, // "Current stock + depletion"
"Conservation Principle");
if (inter_sweep_current > _threshold) {
ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() + coalBirths()
- splitDeaths() - coalDeaths();
ssize_t demand = prev_sweep() - (ssize_t)count + split_births() + coal_births()
- split_deaths() - coal_deaths();
assert(demand >= 0,
err_msg("Demand (" SSIZE_FORMAT ") should be non-negative for "
PTR_FORMAT " (size=" SIZE_FORMAT ")",
......@@ -130,40 +130,40 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
ssize_t desired() const { return _desired; }
void set_desired(ssize_t v) { _desired = v; }
ssize_t coalDesired() const { return _coalDesired; }
void set_coalDesired(ssize_t v) { _coalDesired = v; }
ssize_t coal_desired() const { return _coal_desired; }
void set_coal_desired(ssize_t v) { _coal_desired = v; }
ssize_t surplus() const { return _surplus; }
void set_surplus(ssize_t v) { _surplus = v; }
void increment_surplus() { _surplus++; }
void decrement_surplus() { _surplus--; }
ssize_t bfrSurp() const { return _bfrSurp; }
void set_bfrSurp(ssize_t v) { _bfrSurp = v; }
ssize_t prevSweep() const { return _prevSweep; }
void set_prevSweep(ssize_t v) { _prevSweep = v; }
ssize_t beforeSweep() const { return _beforeSweep; }
void set_beforeSweep(ssize_t v) { _beforeSweep = v; }
ssize_t bfr_surp() const { return _bfr_surp; }
void set_bfr_surp(ssize_t v) { _bfr_surp = v; }
ssize_t prev_sweep() const { return _prev_sweep; }
void set_prev_sweep(ssize_t v) { _prev_sweep = v; }
ssize_t before_sweep() const { return _before_sweep; }
void set_before_sweep(ssize_t v) { _before_sweep = v; }
ssize_t coalBirths() const { return _coalBirths; }
void set_coalBirths(ssize_t v) { _coalBirths = v; }
void increment_coalBirths() { _coalBirths++; }
ssize_t coal_births() const { return _coal_births; }
void set_coal_births(ssize_t v) { _coal_births = v; }
void increment_coal_births() { _coal_births++; }
ssize_t coalDeaths() const { return _coalDeaths; }
void set_coalDeaths(ssize_t v) { _coalDeaths = v; }
void increment_coalDeaths() { _coalDeaths++; }
ssize_t coal_deaths() const { return _coal_deaths; }
void set_coal_deaths(ssize_t v) { _coal_deaths = v; }
void increment_coal_deaths() { _coal_deaths++; }
ssize_t splitBirths() const { return _splitBirths; }
void set_splitBirths(ssize_t v) { _splitBirths = v; }
void increment_splitBirths() { _splitBirths++; }
ssize_t split_births() const { return _split_births; }
void set_split_births(ssize_t v) { _split_births = v; }
void increment_split_births() { _split_births++; }
ssize_t splitDeaths() const { return _splitDeaths; }
void set_splitDeaths(ssize_t v) { _splitDeaths = v; }
void increment_splitDeaths() { _splitDeaths++; }
ssize_t split_deaths() const { return _split_deaths; }
void set_split_deaths(ssize_t v) { _split_deaths = v; }
void increment_split_deaths() { _split_deaths++; }
NOT_PRODUCT(
size_t returnedBytes() const { return _returnedBytes; }
void set_returnedBytes(size_t v) { _returnedBytes = v; }
size_t returned_bytes() const { return _returned_bytes; }
void set_returned_bytes(size_t v) { _returned_bytes = v; }
)
};
......
......@@ -70,22 +70,22 @@ class TreeList: public FreeList<Chunk> {
// Accessors for links in tree.
void setLeft(TreeList<Chunk>* tl) {
void set_left(TreeList<Chunk>* tl) {
_left = tl;
if (tl != NULL)
tl->setParent(this);
tl->set_parent(this);
}
void setRight(TreeList<Chunk>* tl) {
void set_right(TreeList<Chunk>* tl) {
_right = tl;
if (tl != NULL)
tl->setParent(this);
tl->set_parent(this);
}
void setParent(TreeList<Chunk>* tl) { _parent = tl; }
void set_parent(TreeList<Chunk>* tl) { _parent = tl; }
void clearLeft() { _left = NULL; }
void clearRight() { _right = NULL; }
void clearParent() { _parent = NULL; }
void initialize() { clearLeft(); clearRight(), clearParent(); }
void clear_right() { _right = NULL; }
void clear_parent() { _parent = NULL; }
void initialize() { clearLeft(); clear_right(), clear_parent(); }
// For constructing a TreeList from a Tree chunk or
// address and size.
......@@ -104,16 +104,16 @@ class TreeList: public FreeList<Chunk> {
// use with caution!
TreeChunk<Chunk>* largest_address();
// removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
// remove_chunk_replace_if_needed() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the
// TreeList that is the node in the tree. removeChunkReplaceIfNeeded()
// TreeList that is the node in the tree. remove_chunk_replace_if_needed()
// returns the possibly replaced TreeList* for the node in
// the tree. It also updates the parent of the original
// node to point to the new node.
TreeList<Chunk>* removeChunkReplaceIfNeeded(TreeChunk<Chunk>* tc);
TreeList<Chunk>* remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc);
// See FreeList.
void returnChunkAtHead(TreeChunk<Chunk>* tc);
void returnChunkAtTail(TreeChunk<Chunk>* tc);
void return_chunk_at_head(TreeChunk<Chunk>* tc);
void return_chunk_at_tail(TreeChunk<Chunk>* tc);
};
// A TreeChunk is a subclass of a Chunk that additionally
......@@ -151,7 +151,7 @@ class TreeChunk : public Chunk {
size_t size() const volatile { return Chunk::size(); }
// debugging
void verifyTreeChunkList() const;
void verify_tree_chunk_list() const;
};
......@@ -159,19 +159,19 @@ template <class Chunk>
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
friend class VMStructs;
bool _splay;
size_t _totalSize;
size_t _totalFreeBlocks;
size_t _total_size;
size_t _total_free_blocks;
TreeList<Chunk>* _root;
bool _adaptive_freelists;
// private accessors
bool splay() const { return _splay; }
void set_splay(bool v) { _splay = v; }
void set_totalSize(size_t v) { _totalSize = v; }
virtual void inc_totalSize(size_t v);
virtual void dec_totalSize(size_t v);
size_t totalFreeBlocks() const { return _totalFreeBlocks; }
void set_totalFreeBlocks(size_t v) { _totalFreeBlocks = v; }
void set_total_size(size_t v) { _total_size = v; }
virtual void inc_total_size(size_t v);
virtual void dec_total_size(size_t v);
size_t total_free_blocks() const { return _total_free_blocks; }
void set_total_free_blocks(size_t v) { _total_free_blocks = v; }
TreeList<Chunk>* root() const { return _root; }
void set_root(TreeList<Chunk>* v) { _root = v; }
bool adaptive_freelists() { return _adaptive_freelists; }
......@@ -186,46 +186,46 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// return it. If the chunk
// is the last chunk of that size, remove the node for that size
// from the tree.
TreeChunk<Chunk>* getChunkFromTree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay);
TreeChunk<Chunk>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay);
// Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList<Chunk>* findList (size_t size) const;
TreeList<Chunk>* find_list (size_t size) const;
// Remove this chunk from the tree. If the removal results
// in an empty list in the tree, remove the empty list.
TreeChunk<Chunk>* removeChunkFromTree(TreeChunk<Chunk>* tc);
TreeChunk<Chunk>* remove_chunk_from_tree(TreeChunk<Chunk>* tc);
// Remove the node in the trees starting at tl that has the
// minimum value and return it. Repair the tree as needed.
TreeList<Chunk>* removeTreeMinimum(TreeList<Chunk>* tl);
void semiSplayStep(TreeList<Chunk>* tl);
TreeList<Chunk>* remove_tree_minimum(TreeList<Chunk>* tl);
void semi_splay_step(TreeList<Chunk>* tl);
// Add this free chunk to the tree.
void insertChunkInTree(Chunk* freeChunk);
void insert_chunk_in_tree(Chunk* freeChunk);
public:
static const size_t min_tree_chunk_size = sizeof(TreeChunk<Chunk>)/HeapWordSize;
void verifyTree() const;
void verify_tree() const;
// verify that the given chunk is in the tree.
bool verifyChunkInFreeLists(Chunk* tc) const;
bool verify_chunk_in_free_list(Chunk* tc) const;
private:
void verifyTreeHelper(TreeList<Chunk>* tl) const;
static size_t verifyPrevFreePtrs(TreeList<Chunk>* tl);
void verify_tree_helper(TreeList<Chunk>* tl) const;
static size_t verify_prev_free_ptrs(TreeList<Chunk>* tl);
// Returns the total number of chunks in the list.
size_t totalListLength(TreeList<Chunk>* tl) const;
size_t total_list_length(TreeList<Chunk>* tl) const;
// Returns the total number of words in the chunks in the tree
// starting at "tl".
size_t totalSizeInTree(TreeList<Chunk>* tl) const;
size_t total_size_in_tree(TreeList<Chunk>* tl) const;
// Returns the sum of the square of the size of each block
// in the tree starting at "tl".
double sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const;
// Returns the total number of free blocks in the tree starting
// at "tl".
size_t totalFreeBlocksInTree(TreeList<Chunk>* tl) const;
size_t numFreeBlocks() const;
size_t total_free_blocks_in_tree(TreeList<Chunk>* tl) const;
size_t num_free_blocks() const;
size_t treeHeight() const;
size_t treeHeightHelper(TreeList<Chunk>* tl) const;
size_t totalNodesInTree(TreeList<Chunk>* tl) const;
size_t totalNodesHelper(TreeList<Chunk>* tl) const;
size_t tree_height_helper(TreeList<Chunk>* tl) const;
size_t total_nodes_in_tree(TreeList<Chunk>* tl) const;
size_t total_nodes_helper(TreeList<Chunk>* tl) const;
public:
// Constructor
......@@ -233,7 +233,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
BinaryTreeDictionary(MemRegion mr, bool adaptive_freelists, bool splay = false);
// Public accessors
size_t totalSize() const { return _totalSize; }
size_t total_size() const { return _total_size; }
// Reset the dictionary to the initial conditions with
// a single free chunk.
......@@ -245,37 +245,37 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// Return a chunk of size "size" or greater from
// the tree.
// want a better dynamic splay strategy for the future.
Chunk* getChunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) {
Chunk* get_chunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) {
FreeBlockDictionary<Chunk>::verify_par_locked();
Chunk* res = getChunkFromTree(size, dither, splay());
assert(res == NULL || res->isFree(),
Chunk* res = get_chunk_from_tree(size, dither, splay());
assert(res == NULL || res->is_free(),
"Should be returning a free chunk");
return res;
}
void returnChunk(Chunk* chunk) {
void return_chunk(Chunk* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked();
insertChunkInTree(chunk);
insert_chunk_in_tree(chunk);
}
void removeChunk(Chunk* chunk) {
void remove_chunk(Chunk* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked();
removeChunkFromTree((TreeChunk<Chunk>*)chunk);
assert(chunk->isFree(), "Should still be a free chunk");
remove_chunk_from_tree((TreeChunk<Chunk>*)chunk);
assert(chunk->is_free(), "Should still be a free chunk");
}
size_t maxChunkSize() const;
size_t totalChunkSize(debug_only(const Mutex* lock)) const {
size_t max_chunk_size() const;
size_t total_chunk_size(debug_only(const Mutex* lock)) const {
debug_only(
if (lock != NULL && lock->owned_by_self()) {
assert(totalSizeInTree(root()) == totalSize(),
"_totalSize inconsistency");
assert(total_size_in_tree(root()) == total_size(),
"_total_size inconsistency");
}
)
return totalSize();
return total_size();
}
size_t minSize() const {
size_t min_size() const {
return min_tree_chunk_size;
}
......@@ -288,40 +288,40 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// Find the list with size "size" in the binary tree and update
// the statistics in the list according to "split" (chunk was
// split or coalesce) and "birth" (chunk was added or removed).
void dictCensusUpdate(size_t size, bool split, bool birth);
void dict_census_udpate(size_t size, bool split, bool birth);
// Return true if the dictionary is overpopulated (more chunks of
// this size than desired) for size "size".
bool coalDictOverPopulated(size_t size);
bool coal_dict_over_populated(size_t size);
// Methods called at the beginning of a sweep to prepare the
// statistics for the sweep.
void beginSweepDictCensus(double coalSurplusPercent,
void begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current,
float inter_sweep_estimate,
float intra_sweep_estimate);
// Methods called after the end of a sweep to modify the
// statistics for the sweep.
void endSweepDictCensus(double splitSurplusPercent);
void end_sweep_dict_census(double splitSurplusPercent);
// Return the largest free chunk in the tree.
Chunk* findLargestDict() const;
Chunk* find_largest_dict() const;
// Accessors for statistics
void setTreeSurplus(double splitSurplusPercent);
void setTreeHints(void);
void set_tree_surplus(double splitSurplusPercent);
void set_tree_hints(void);
// Reset statistics for all the lists in the tree.
void clearTreeCensus(void);
void clear_tree_census(void);
// Print the statistcis for all the lists in the tree. Also may
// print out summaries.
void printDictCensus(void) const;
void print_dict_census(void) const;
void print_free_lists(outputStream* st) const;
// For debugging. Returns the sum of the _returnedBytes for
// For debugging. Returns the sum of the _returned_bytes for
// all lists in the tree.
size_t sumDictReturnedBytes() PRODUCT_RETURN0;
// Sets the _returnedBytes for all the lists in the tree to zero.
void initializeDictReturnedBytes() PRODUCT_RETURN;
size_t sum_dict_returned_bytes() PRODUCT_RETURN0;
// Sets the _returned_bytes for all the lists in the tree to zero.
void initialize_dict_returned_bytes() PRODUCT_RETURN;
// For debugging. Return the total number of chunks in the dictionary.
size_t totalCount() PRODUCT_RETURN0;
size_t total_count() PRODUCT_RETURN0;
void reportStatistics() const;
void report_statistics() const;
void verify() const;
};
......
......@@ -52,8 +52,8 @@ template <class Chunk> void FreeBlockDictionary<Chunk>::set_par_lock(Mutex* lock
template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() const {
#ifdef ASSERT
if (ParallelGCThreads > 0) {
Thread* myThread = Thread::current();
if (myThread->is_GC_task_thread()) {
Thread* my_thread = Thread::current();
if (my_thread->is_GC_task_thread()) {
assert(par_lock() != NULL, "Should be using locking?");
assert_lock_strong(par_lock());
}
......
......@@ -51,45 +51,45 @@ class FreeBlockDictionary: public CHeapObj {
NOT_PRODUCT(Mutex* _lock;)
public:
virtual void removeChunk(Chunk* fc) = 0;
virtual Chunk* getChunk(size_t size, Dither dither = atLeast) = 0;
virtual void returnChunk(Chunk* chunk) = 0;
virtual size_t totalChunkSize(debug_only(const Mutex* lock)) const = 0;
virtual size_t maxChunkSize() const = 0;
virtual size_t minSize() const = 0;
virtual void remove_chunk(Chunk* fc) = 0;
virtual Chunk* get_chunk(size_t size, Dither dither = atLeast) = 0;
virtual void return_chunk(Chunk* chunk) = 0;
virtual size_t total_chunk_size(debug_only(const Mutex* lock)) const = 0;
virtual size_t max_chunk_size() const = 0;
virtual size_t min_size() const = 0;
// Reset the dictionary to the initial conditions for a single
// block.
virtual void reset(HeapWord* addr, size_t size) = 0;
virtual void reset() = 0;
virtual void dictCensusUpdate(size_t size, bool split, bool birth) = 0;
virtual bool coalDictOverPopulated(size_t size) = 0;
virtual void beginSweepDictCensus(double coalSurplusPercent,
virtual void dict_census_udpate(size_t size, bool split, bool birth) = 0;
virtual bool coal_dict_over_populated(size_t size) = 0;
virtual void begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current, float inter_sweep_estimate,
float intra__sweep_current) = 0;
virtual void endSweepDictCensus(double splitSurplusPercent) = 0;
virtual Chunk* findLargestDict() const = 0;
virtual void end_sweep_dict_census(double splitSurplusPercent) = 0;
virtual Chunk* find_largest_dict() const = 0;
// verify that the given chunk is in the dictionary.
virtual bool verifyChunkInFreeLists(Chunk* tc) const = 0;
virtual bool verify_chunk_in_free_list(Chunk* tc) const = 0;
// Sigma_{all_free_blocks} (block_size^2)
virtual double sum_of_squared_block_sizes() const = 0;
virtual Chunk* find_chunk_ends_at(HeapWord* target) const = 0;
virtual void inc_totalSize(size_t v) = 0;
virtual void dec_totalSize(size_t v) = 0;
virtual void inc_total_size(size_t v) = 0;
virtual void dec_total_size(size_t v) = 0;
NOT_PRODUCT (
virtual size_t sumDictReturnedBytes() = 0;
virtual void initializeDictReturnedBytes() = 0;
virtual size_t totalCount() = 0;
virtual size_t sum_dict_returned_bytes() = 0;
virtual void initialize_dict_returned_bytes() = 0;
virtual size_t total_count() = 0;
)
virtual void reportStatistics() const {
virtual void report_statistics() const {
gclog_or_tty->print("No statistics available");
}
virtual void printDictCensus() const = 0;
virtual void print_dict_census() const = 0;
virtual void print_free_lists(outputStream* st) const = 0;
virtual void verify() const = 0;
......
......@@ -65,7 +65,7 @@ FreeList<Chunk>::FreeList(Chunk* fc) :
_hint = 0;
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returnedBytes(size() * HeapWordSize);
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
}
......@@ -83,7 +83,7 @@ void FreeList<Chunk>::init_statistics(bool split_birth) {
}
template <class Chunk>
Chunk* FreeList<Chunk>::getChunkAtHead() {
Chunk* FreeList<Chunk>::get_chunk_at_head() {
assert_proper_lock_protection();
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
......@@ -93,7 +93,7 @@ Chunk* FreeList<Chunk>::getChunkAtHead() {
if (nextFC != NULL) {
// The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc.
nextFC->linkPrev(NULL);
nextFC->link_prev(NULL);
} else { // removed tail of list
link_tail(NULL);
}
......@@ -126,10 +126,10 @@ void FreeList<Chunk>::getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl) {
if (new_head == NULL) {
set_tail(NULL);
} else {
new_head->linkPrev(NULL);
new_head->link_prev(NULL);
}
// Now we can fix up the tail.
tl->linkNext(NULL);
tl->link_next(NULL);
// And return the result.
fl->set_tail(tl);
fl->set_count(k);
......@@ -138,7 +138,7 @@ void FreeList<Chunk>::getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl) {
// Remove this chunk from the list
template <class Chunk>
void FreeList<Chunk>::removeChunk(Chunk*fc) {
void FreeList<Chunk>::remove_chunk(Chunk*fc) {
assert_proper_lock_protection();
assert(head() != NULL, "Remove from empty list");
assert(fc != NULL, "Remove a NULL chunk");
......@@ -151,7 +151,7 @@ void FreeList<Chunk>::removeChunk(Chunk*fc) {
if (nextFC != NULL) {
// The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc.
nextFC->linkPrev(prevFC);
nextFC->link_prev(prevFC);
} else { // removed tail of list
link_tail(prevFC);
}
......@@ -160,7 +160,7 @@ void FreeList<Chunk>::removeChunk(Chunk*fc) {
assert(nextFC == NULL || nextFC->prev() == NULL,
"Prev of head should be NULL");
} else {
prevFC->linkNext(nextFC);
prevFC->link_next(nextFC);
assert(tail() != prevFC || prevFC->next() == NULL,
"Next of tail should be NULL");
}
......@@ -169,10 +169,10 @@ void FreeList<Chunk>::removeChunk(Chunk*fc) {
"H/T/C Inconsistency");
// clear next and prev fields of fc, debug only
NOT_PRODUCT(
fc->linkPrev(NULL);
fc->linkNext(NULL);
fc->link_prev(NULL);
fc->link_next(NULL);
)
assert(fc->isFree(), "Should still be a free chunk");
assert(fc->is_free(), "Should still be a free chunk");
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list");
......@@ -181,7 +181,7 @@ void FreeList<Chunk>::removeChunk(Chunk*fc) {
// Add this chunk at the head of the list.
template <class Chunk>
void FreeList<Chunk>::returnChunkAtHead(Chunk* chunk, bool record_return) {
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
assert_proper_lock_protection();
assert(chunk != NULL, "insert a NULL chunk");
assert(size() == chunk->size(), "Wrong size");
......@@ -190,7 +190,7 @@ void FreeList<Chunk>::returnChunkAtHead(Chunk* chunk, bool record_return) {
Chunk* oldHead = head();
assert(chunk != oldHead, "double insertion");
chunk->linkAfter(oldHead);
chunk->link_after(oldHead);
link_head(chunk);
if (oldHead == NULL) { // only chunk in list
assert(tail() == NULL, "inconsistent FreeList");
......@@ -199,7 +199,7 @@ void FreeList<Chunk>::returnChunkAtHead(Chunk* chunk, bool record_return) {
increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returnedBytes_by(size()*HeapWordSize);
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
......@@ -209,14 +209,14 @@ void FreeList<Chunk>::returnChunkAtHead(Chunk* chunk, bool record_return) {
}
template <class Chunk>
void FreeList<Chunk>::returnChunkAtHead(Chunk* chunk) {
void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
assert_proper_lock_protection();
returnChunkAtHead(chunk, true);
return_chunk_at_head(chunk, true);
}
// Add this chunk at the tail of the list.
template <class Chunk>
void FreeList<Chunk>::returnChunkAtTail(Chunk* chunk, bool record_return) {
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
assert_proper_lock_protection();
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
......@@ -226,7 +226,7 @@ void FreeList<Chunk>::returnChunkAtTail(Chunk* chunk, bool record_return) {
Chunk* oldTail = tail();
assert(chunk != oldTail, "double insertion");
if (oldTail != NULL) {
oldTail->linkAfter(chunk);
oldTail->link_after(chunk);
} else { // only chunk in list
assert(head() == NULL, "inconsistent FreeList");
link_head(chunk);
......@@ -235,7 +235,7 @@ void FreeList<Chunk>::returnChunkAtTail(Chunk* chunk, bool record_return) {
increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returnedBytes_by(size()*HeapWordSize);
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
......@@ -245,8 +245,8 @@ void FreeList<Chunk>::returnChunkAtTail(Chunk* chunk, bool record_return) {
}
template <class Chunk>
void FreeList<Chunk>::returnChunkAtTail(Chunk* chunk) {
returnChunkAtTail(chunk, true);
void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
return_chunk_at_tail(chunk, true);
}
template <class Chunk>
......@@ -262,8 +262,8 @@ void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
Chunk* fl_tail = fl->tail();
Chunk* this_head = head();
assert(fl_tail->next() == NULL, "Well-formedness of fl");
fl_tail->linkNext(this_head);
this_head->linkPrev(fl_tail);
fl_tail->link_next(this_head);
this_head->link_prev(fl_tail);
set_head(fl->head());
set_count(count() + fl->count());
}
......@@ -273,10 +273,10 @@ void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
}
}
// verifyChunkInFreeLists() is used to verify that an item is in this free list.
// verify_chunk_in_free_list() is used to verify that an item is in this free list.
// It is used as a debugging aid.
template <class Chunk>
bool FreeList<Chunk>::verifyChunkInFreeLists(Chunk* fc) const {
bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
// This is an internal consistency check, not part of the check that the
// chunk is in the free lists.
guarantee(fc->size() == size(), "Wrong list is being searched");
......@@ -302,21 +302,21 @@ void FreeList<Chunk>::verify_stats() const {
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prevSweep() + _allocation_stats.splitBirths()
+ _allocation_stats.coalBirths() + 1) // Total Production Stock + 1
>= (_allocation_stats.splitDeaths() + _allocation_stats.coalDeaths()
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prevSweep(" SIZE_FORMAT ")"
" + splitBirths(" SIZE_FORMAT ")"
" + coalBirths(" SIZE_FORMAT ") + 1 >= "
" splitDeaths(" SIZE_FORMAT ")"
" coalDeaths(" SIZE_FORMAT ")"
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, _size, _allocation_stats.prevSweep(), _allocation_stats.splitBirths(),
_allocation_stats.splitBirths(), _allocation_stats.splitDeaths(),
_allocation_stats.coalDeaths(), count()));
this, _size, _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
template <class Chunk>
......@@ -360,8 +360,8 @@ void FreeList<Chunk>::print_on(outputStream* st, const char* c) const {
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfrSurp(), surplus(), desired(), prevSweep(), beforeSweep(),
count(), coalBirths(), coalDeaths(), splitBirths(), splitDeaths());
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
}
#ifndef SERIALGC
......
......@@ -119,7 +119,7 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// If this method is not used (just set the head instead),
// this check can be avoided.
if (v != NULL) {
v->linkPrev(NULL);
v->link_prev(NULL);
}
}
......@@ -138,7 +138,7 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
assert_proper_lock_protection();
set_tail(v);
if (v != NULL) {
v->clearNext();
v->clear_next();
}
}
......@@ -185,12 +185,12 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
inter_sweep_estimate,
intra_sweep_estimate);
}
ssize_t coalDesired() const {
return _allocation_stats.coalDesired();
ssize_t coal_desired() const {
return _allocation_stats.coal_desired();
}
void set_coalDesired(ssize_t v) {
void set_coal_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coalDesired(v);
_allocation_stats.set_coal_desired(v);
}
ssize_t surplus() const {
......@@ -209,106 +209,106 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
_allocation_stats.decrement_surplus();
}
ssize_t bfrSurp() const {
return _allocation_stats.bfrSurp();
ssize_t bfr_surp() const {
return _allocation_stats.bfr_surp();
}
void set_bfrSurp(ssize_t v) {
void set_bfr_surp(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_bfrSurp(v);
_allocation_stats.set_bfr_surp(v);
}
ssize_t prevSweep() const {
return _allocation_stats.prevSweep();
ssize_t prev_sweep() const {
return _allocation_stats.prev_sweep();
}
void set_prevSweep(ssize_t v) {
void set_prev_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_prevSweep(v);
_allocation_stats.set_prev_sweep(v);
}
ssize_t beforeSweep() const {
return _allocation_stats.beforeSweep();
ssize_t before_sweep() const {
return _allocation_stats.before_sweep();
}
void set_beforeSweep(ssize_t v) {
void set_before_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_beforeSweep(v);
_allocation_stats.set_before_sweep(v);
}
ssize_t coalBirths() const {
return _allocation_stats.coalBirths();
ssize_t coal_births() const {
return _allocation_stats.coal_births();
}
void set_coalBirths(ssize_t v) {
void set_coal_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coalBirths(v);
_allocation_stats.set_coal_births(v);
}
void increment_coalBirths() {
void increment_coal_births() {
assert_proper_lock_protection();
_allocation_stats.increment_coalBirths();
_allocation_stats.increment_coal_births();
}
ssize_t coalDeaths() const {
return _allocation_stats.coalDeaths();
ssize_t coal_deaths() const {
return _allocation_stats.coal_deaths();
}
void set_coalDeaths(ssize_t v) {
void set_coal_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coalDeaths(v);
_allocation_stats.set_coal_deaths(v);
}
void increment_coalDeaths() {
void increment_coal_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_coalDeaths();
_allocation_stats.increment_coal_deaths();
}
ssize_t splitBirths() const {
return _allocation_stats.splitBirths();
ssize_t split_births() const {
return _allocation_stats.split_births();
}
void set_splitBirths(ssize_t v) {
void set_split_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_splitBirths(v);
_allocation_stats.set_split_births(v);
}
void increment_splitBirths() {
void increment_split_births() {
assert_proper_lock_protection();
_allocation_stats.increment_splitBirths();
_allocation_stats.increment_split_births();
}
ssize_t splitDeaths() const {
return _allocation_stats.splitDeaths();
ssize_t split_deaths() const {
return _allocation_stats.split_deaths();
}
void set_splitDeaths(ssize_t v) {
void set_split_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_splitDeaths(v);
_allocation_stats.set_split_deaths(v);
}
void increment_splitDeaths() {
void increment_split_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_splitDeaths();
_allocation_stats.increment_split_deaths();
}
NOT_PRODUCT(
// For debugging. The "_returnedBytes" in all the lists are summed
// For debugging. The "_returned_bytes" in all the lists are summed
// and compared with the total number of bytes swept during a
// collection.
size_t returnedBytes() const { return _allocation_stats.returnedBytes(); }
void set_returnedBytes(size_t v) { _allocation_stats.set_returnedBytes(v); }
void increment_returnedBytes_by(size_t v) {
_allocation_stats.set_returnedBytes(_allocation_stats.returnedBytes() + v);
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
void increment_returned_bytes_by(size_t v) {
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
}
)
// Unlink head of list and return it. Returns NULL if
// the list is empty.
Chunk* getChunkAtHead();
Chunk* get_chunk_at_head();
// Remove the first "n" or "count", whichever is smaller, chunks from the
// list, setting "fl", which is required to be empty, to point to them.
void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl);
// Unlink this chunk from it's free list
void removeChunk(Chunk* fc);
void remove_chunk(Chunk* fc);
// Add this chunk to this free list.
void returnChunkAtHead(Chunk* fc);
void returnChunkAtTail(Chunk* fc);
void return_chunk_at_head(Chunk* fc);
void return_chunk_at_tail(Chunk* fc);
// Similar to returnChunk* but also records some diagnostic
// information.
void returnChunkAtHead(Chunk* fc, bool record_return);
void returnChunkAtTail(Chunk* fc, bool record_return);
void return_chunk_at_head(Chunk* fc, bool record_return);
void return_chunk_at_tail(Chunk* fc, bool record_return);
// Prepend "fl" (whose size is required to be the same as that of "this")
// to the front of "this" list.
......@@ -316,7 +316,7 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Verify that the chunk is in the list.
// found. Return NULL if "fc" is not found.
bool verifyChunkInFreeLists(Chunk* fc) const;
bool verify_chunk_in_free_list(Chunk* fc) const;
// Stats verification
void verify_stats() const PRODUCT_RETURN;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册