提交 ccabe237 编写于 作者: J jmasa

7045397: NPG: Add freelists to class loader arenas.

Reviewed-by: coleenp, stefank, jprovino, ohair
上级 2e80078c
...@@ -79,10 +79,10 @@ ifeq ($(INCLUDE_ALTERNATE_GCS), false) ...@@ -79,10 +79,10 @@ ifeq ($(INCLUDE_ALTERNATE_GCS), false)
CXXFLAGS += -DSERIALGC CXXFLAGS += -DSERIALGC
CFLAGS += -DSERIALGC CFLAGS += -DSERIALGC
Src_Files_EXCLUDE += \ Src_Files_EXCLUDE += \
binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \ cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \ cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \ concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \ freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \ concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \ dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \ g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/vmThread.hpp"
template <>
void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
if (c != NULL) {
st->print("%16s", c);
} else {
st->print(SIZE_FORMAT_W(16), size());
}
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
}
template <class Chunk>
AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
init_statistics();
}
template <class Chunk>
AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::initialize() {
FreeList<Chunk>::initialize();
set_hint(0);
init_statistics(true /* split_birth */);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::reset(size_t hint) {
FreeList<Chunk>::reset();
set_hint(hint);
}
#ifndef PRODUCT
template <class Chunk>
void AdaptiveFreeList<Chunk>::assert_proper_lock_protection_work() const {
assert(protecting_lock() != NULL, "Don't call this directly");
assert(ParallelGCThreads > 0, "Don't call this directly");
Thread* thr = Thread::current();
if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
// assert that we are holding the freelist lock
} else if (thr->is_GC_task_thread()) {
assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
} else if (thr->is_Java_thread()) {
assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
} else {
ShouldNotReachHere(); // unaccounted thread type?
}
}
#endif
template <class Chunk>
void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
_allocation_stats.initialize(split_birth);
}
template <class Chunk>
size_t AdaptiveFreeList<Chunk>::get_better_size() {
// A candidate chunk has been found. If it is already under
// populated and there is a hinT, REturn the hint(). Else
// return the size of this chunk.
if (surplus() <= 0) {
if (hint() != 0) {
return hint();
} else {
return size();
}
} else {
// This list has a surplus so use it.
return size();
}
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
assert_proper_lock_protection();
return_chunk_at_head(chunk, true);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
#ifdef ASSERT
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
#endif
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
return_chunk_at_tail(chunk, true);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
#ifdef ASSERT
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
#endif
}
#ifndef PRODUCT
template <class Chunk>
void AdaptiveFreeList<Chunk>::verify_stats() const {
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
// stats beforehand because in the case of the large-block BT
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
#endif
// Needs to be after the definitions have been seen.
template class AdaptiveFreeList<FreeChunk>;
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
#define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
#include "memory/freeList.hpp"
#include "gc_implementation/shared/allocationStats.hpp"
class CompactibleFreeListSpace;
// A class for maintaining a free list of Chunk's. The FreeList
// maintains a the structure of the list (head, tail, etc.) plus
// statistics for allocations from the list. The links between items
// are not part of FreeList. The statistics are
// used to make decisions about coalescing Chunk's when they
// are swept during collection.
//
// See the corresponding .cpp file for a description of the specifics
// for that implementation.
class Mutex;
template <class Chunk>
class AdaptiveFreeList : public FreeList<Chunk> {
friend class CompactibleFreeListSpace;
friend class VMStructs;
// friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
size_t _hint; // next larger size list with a positive surplus
AllocationStats _allocation_stats; // allocation-related statistics
public:
AdaptiveFreeList();
AdaptiveFreeList(Chunk* fc);
using FreeList<Chunk>::assert_proper_lock_protection;
#ifdef ASSERT
using FreeList<Chunk>::protecting_lock;
#endif
using FreeList<Chunk>::count;
using FreeList<Chunk>::size;
using FreeList<Chunk>::verify_chunk_in_free_list;
using FreeList<Chunk>::getFirstNChunksFromList;
using FreeList<Chunk>::print_on;
void return_chunk_at_head(Chunk* fc, bool record_return);
void return_chunk_at_head(Chunk* fc);
void return_chunk_at_tail(Chunk* fc, bool record_return);
void return_chunk_at_tail(Chunk* fc);
using FreeList<Chunk>::return_chunk_at_tail;
using FreeList<Chunk>::remove_chunk;
using FreeList<Chunk>::prepend;
using FreeList<Chunk>::print_labels_on;
using FreeList<Chunk>::get_chunk_at_head;
// Initialize.
void initialize();
// Reset the head, tail, hint, and count of a free list.
void reset(size_t hint);
void assert_proper_lock_protection_work() const PRODUCT_RETURN;
void print_on(outputStream* st, const char* c = NULL) const;
size_t hint() const {
return _hint;
}
void set_hint(size_t v) {
assert_proper_lock_protection();
assert(v == 0 || size() < v, "Bad hint");
_hint = v;
}
size_t get_better_size();
// Accessors for statistics
void init_statistics(bool split_birth = false);
AllocationStats* allocation_stats() {
assert_proper_lock_protection();
return &_allocation_stats;
}
ssize_t desired() const {
return _allocation_stats.desired();
}
void set_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current,
float inter_sweep_estimate,
float intra_sweep_estimate) {
assert_proper_lock_protection();
_allocation_stats.compute_desired(count(),
inter_sweep_current,
inter_sweep_estimate,
intra_sweep_estimate);
}
ssize_t coal_desired() const {
return _allocation_stats.coal_desired();
}
void set_coal_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_desired(v);
}
ssize_t surplus() const {
return _allocation_stats.surplus();
}
void set_surplus(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_surplus(v);
}
void increment_surplus() {
assert_proper_lock_protection();
_allocation_stats.increment_surplus();
}
void decrement_surplus() {
assert_proper_lock_protection();
_allocation_stats.decrement_surplus();
}
ssize_t bfr_surp() const {
return _allocation_stats.bfr_surp();
}
void set_bfr_surp(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_bfr_surp(v);
}
ssize_t prev_sweep() const {
return _allocation_stats.prev_sweep();
}
void set_prev_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_prev_sweep(v);
}
ssize_t before_sweep() const {
return _allocation_stats.before_sweep();
}
void set_before_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_before_sweep(v);
}
ssize_t coal_births() const {
return _allocation_stats.coal_births();
}
void set_coal_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_births(v);
}
void increment_coal_births() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_births();
}
ssize_t coal_deaths() const {
return _allocation_stats.coal_deaths();
}
void set_coal_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_deaths(v);
}
void increment_coal_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_deaths();
}
ssize_t split_births() const {
return _allocation_stats.split_births();
}
void set_split_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_births(v);
}
void increment_split_births() {
assert_proper_lock_protection();
_allocation_stats.increment_split_births();
}
ssize_t split_deaths() const {
return _allocation_stats.split_deaths();
}
void set_split_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_deaths(v);
}
void increment_split_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_split_deaths();
}
#ifndef PRODUCT
// For debugging. The "_returned_bytes" in all the lists are summed
// and compared with the total number of bytes swept during a
// collection.
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
void increment_returned_bytes_by(size_t v) {
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
}
// Stats verification
void verify_stats() const;
#endif // NOT PRODUCT
};
#endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
...@@ -91,7 +91,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, ...@@ -91,7 +91,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
_collector(NULL) _collector(NULL)
{ {
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize, assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
"FreeChunk is larger than expected"); "FreeChunk is larger than expected");
_bt.set_space(this); _bt.set_space(this);
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
// We have all of "mr", all of which we place in the dictionary // We have all of "mr", all of which we place in the dictionary
...@@ -101,14 +101,14 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, ...@@ -101,14 +101,14 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
// implementation, namely, the simple binary tree (splaying // implementation, namely, the simple binary tree (splaying
// temporarily disabled). // temporarily disabled).
switch (dictionaryChoice) { switch (dictionaryChoice) {
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
break;
case FreeBlockDictionary<FreeChunk>::dictionarySplayTree: case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
case FreeBlockDictionary<FreeChunk>::dictionarySkipList: case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
default: default:
warning("dictionaryChoice: selected option not understood; using" warning("dictionaryChoice: selected option not understood; using"
" default BinaryTreeDictionary implementation instead."); " default BinaryTreeDictionary implementation instead.");
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
break;
} }
assert(_dictionary != NULL, "CMS dictionary initialization"); assert(_dictionary != NULL, "CMS dictionary initialization");
// The indexed free lists are initially all empty and are lazily // The indexed free lists are initially all empty and are lazily
...@@ -453,7 +453,7 @@ const { ...@@ -453,7 +453,7 @@ const {
reportIndexedFreeListStatistics(); reportIndexedFreeListStatistics();
gclog_or_tty->print_cr("Layout of Indexed Freelists"); gclog_or_tty->print_cr("Layout of Indexed Freelists");
gclog_or_tty->print_cr("---------------------------"); gclog_or_tty->print_cr("---------------------------");
FreeList<FreeChunk>::print_labels_on(st, "size"); AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
_indexedFreeList[i].print_on(gclog_or_tty); _indexedFreeList[i].print_on(gclog_or_tty);
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
...@@ -1319,7 +1319,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { ...@@ -1319,7 +1319,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
size_t currSize = numWords + MinChunkSize; size_t currSize = numWords + MinChunkSize;
assert(currSize % MinObjAlignment == 0, "currSize should be aligned"); assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
for (i = currSize; i < IndexSetSize; i += IndexSetStride) { for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk>* fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
if (fl->head()) { if (fl->head()) {
ret = getFromListGreater(fl, numWords); ret = getFromListGreater(fl, numWords);
assert(ret == NULL || ret->is_free(), "Should be returning a free chunk"); assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
...@@ -1702,7 +1702,9 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { ...@@ -1702,7 +1702,9 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
_dictionary->return_chunk(chunk); _dictionary->return_chunk(chunk);
#ifndef PRODUCT #ifndef PRODUCT
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats(); TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
tl->verify_stats();
} }
#endif // PRODUCT #endif // PRODUCT
} }
...@@ -1745,7 +1747,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( ...@@ -1745,7 +1747,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
{ {
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
ec = dictionary()->find_largest_dict(); // get largest block ec = dictionary()->find_largest_dict(); // get largest block
if (ec != NULL && ec->end() == chunk) { if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
// It's a coterminal block - we can coalesce. // It's a coterminal block - we can coalesce.
size_t old_size = ec->size(); size_t old_size = ec->size();
coalDeath(old_size); coalDeath(old_size);
...@@ -1850,11 +1852,11 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { ...@@ -1850,11 +1852,11 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
the excess is >= MIN_CHUNK. */ the excess is >= MIN_CHUNK. */
size_t start = align_object_size(numWords + MinChunkSize); size_t start = align_object_size(numWords + MinChunkSize);
if (start < IndexSetSize) { if (start < IndexSetSize) {
FreeList<FreeChunk>* it = _indexedFreeList; AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
size_t hint = _indexedFreeList[start].hint(); size_t hint = _indexedFreeList[start].hint();
while (hint < IndexSetSize) { while (hint < IndexSetSize) {
assert(hint % MinObjAlignment == 0, "hint should be aligned"); assert(hint % MinObjAlignment == 0, "hint should be aligned");
FreeList<FreeChunk> *fl = &_indexedFreeList[hint]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
if (fl->surplus() > 0 && fl->head() != NULL) { if (fl->surplus() > 0 && fl->head() != NULL) {
// Found a list with surplus, reset original hint // Found a list with surplus, reset original hint
// and split out a free chunk which is returned. // and split out a free chunk which is returned.
...@@ -1873,7 +1875,7 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { ...@@ -1873,7 +1875,7 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
} }
/* Requires fl->size >= numWords + MinChunkSize */ /* Requires fl->size >= numWords + MinChunkSize */
FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl, FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
size_t numWords) { size_t numWords) {
FreeChunk *curr = fl->head(); FreeChunk *curr = fl->head();
size_t oldNumWords = curr->size(); size_t oldNumWords = curr->size();
...@@ -2155,7 +2157,7 @@ void CompactibleFreeListSpace::beginSweepFLCensus( ...@@ -2155,7 +2157,7 @@ void CompactibleFreeListSpace::beginSweepFLCensus(
assert_locked(); assert_locked();
size_t i; size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk>* fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
if (PrintFLSStatistics > 1) { if (PrintFLSStatistics > 1) {
gclog_or_tty->print("size[%d] : ", i); gclog_or_tty->print("size[%d] : ", i);
} }
...@@ -2174,7 +2176,7 @@ void CompactibleFreeListSpace::setFLSurplus() { ...@@ -2174,7 +2176,7 @@ void CompactibleFreeListSpace::setFLSurplus() {
assert_locked(); assert_locked();
size_t i; size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_surplus(fl->count() - fl->set_surplus(fl->count() -
(ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
} }
...@@ -2185,7 +2187,7 @@ void CompactibleFreeListSpace::setFLHints() { ...@@ -2185,7 +2187,7 @@ void CompactibleFreeListSpace::setFLHints() {
size_t i; size_t i;
size_t h = IndexSetSize; size_t h = IndexSetSize;
for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_hint(h); fl->set_hint(h);
if (fl->surplus() > 0) { if (fl->surplus() > 0) {
h = i; h = i;
...@@ -2197,7 +2199,7 @@ void CompactibleFreeListSpace::clearFLCensus() { ...@@ -2197,7 +2199,7 @@ void CompactibleFreeListSpace::clearFLCensus() {
assert_locked(); assert_locked();
size_t i; size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_prev_sweep(fl->count()); fl->set_prev_sweep(fl->count());
fl->set_coal_births(0); fl->set_coal_births(0);
fl->set_coal_deaths(0); fl->set_coal_deaths(0);
...@@ -2224,7 +2226,7 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { ...@@ -2224,7 +2226,7 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
return (fl->coal_desired() < 0) || return (fl->coal_desired() < 0) ||
((int)fl->count() > fl->coal_desired()); ((int)fl->count() > fl->coal_desired());
} else { } else {
...@@ -2234,14 +2236,14 @@ bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { ...@@ -2234,14 +2236,14 @@ bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
void CompactibleFreeListSpace::smallCoalBirth(size_t size) { void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coal_births(); fl->increment_coal_births();
fl->increment_surplus(); fl->increment_surplus();
} }
void CompactibleFreeListSpace::smallCoalDeath(size_t size) { void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coal_deaths(); fl->increment_coal_deaths();
fl->decrement_surplus(); fl->decrement_surplus();
} }
...@@ -2250,7 +2252,7 @@ void CompactibleFreeListSpace::coalBirth(size_t size) { ...@@ -2250,7 +2252,7 @@ void CompactibleFreeListSpace::coalBirth(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
smallCoalBirth(size); smallCoalBirth(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
false /* split */, false /* split */,
true /* birth */); true /* birth */);
} }
...@@ -2260,7 +2262,7 @@ void CompactibleFreeListSpace::coalDeath(size_t size) { ...@@ -2260,7 +2262,7 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
if(size < SmallForDictionary) { if(size < SmallForDictionary) {
smallCoalDeath(size); smallCoalDeath(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
false /* split */, false /* split */,
false /* birth */); false /* birth */);
} }
...@@ -2268,14 +2270,14 @@ void CompactibleFreeListSpace::coalDeath(size_t size) { ...@@ -2268,14 +2270,14 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
void CompactibleFreeListSpace::smallSplitBirth(size_t size) { void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_split_births(); fl->increment_split_births();
fl->increment_surplus(); fl->increment_surplus();
} }
void CompactibleFreeListSpace::smallSplitDeath(size_t size) { void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list"); assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size]; AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_split_deaths(); fl->increment_split_deaths();
fl->decrement_surplus(); fl->decrement_surplus();
} }
...@@ -2284,7 +2286,7 @@ void CompactibleFreeListSpace::split_birth(size_t size) { ...@@ -2284,7 +2286,7 @@ void CompactibleFreeListSpace::split_birth(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
smallSplitBirth(size); smallSplitBirth(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
true /* split */, true /* split */,
true /* birth */); true /* birth */);
} }
...@@ -2294,7 +2296,7 @@ void CompactibleFreeListSpace::splitDeath(size_t size) { ...@@ -2294,7 +2296,7 @@ void CompactibleFreeListSpace::splitDeath(size_t size) {
if (size < SmallForDictionary) { if (size < SmallForDictionary) {
smallSplitDeath(size); smallSplitDeath(size);
} else { } else {
dictionary()->dict_census_udpate(size, dictionary()->dict_census_update(size,
true /* split */, true /* split */,
false /* birth */); false /* birth */);
} }
...@@ -2517,10 +2519,10 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { ...@@ -2517,10 +2519,10 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
#ifndef PRODUCT #ifndef PRODUCT
void CompactibleFreeListSpace::check_free_list_consistency() const { void CompactibleFreeListSpace::check_free_list_consistency() const {
assert(_dictionary->min_size() <= IndexSetSize, assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
"Some sizes can't be allocated without recourse to" "Some sizes can't be allocated without recourse to"
" linear allocation buffers"); " linear allocation buffers");
assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>), assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
"else MIN_TREE_CHUNK_SIZE is wrong"); "else MIN_TREE_CHUNK_SIZE is wrong");
assert(IndexSetStart != 0, "IndexSetStart not initialized"); assert(IndexSetStart != 0, "IndexSetStart not initialized");
assert(IndexSetStride != 0, "IndexSetStride not initialized"); assert(IndexSetStride != 0, "IndexSetStride not initialized");
...@@ -2529,15 +2531,15 @@ void CompactibleFreeListSpace::check_free_list_consistency() const { ...@@ -2529,15 +2531,15 @@ void CompactibleFreeListSpace::check_free_list_consistency() const {
void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
assert_lock_strong(&_freelistLock); assert_lock_strong(&_freelistLock);
FreeList<FreeChunk> total; AdaptiveFreeList<FreeChunk> total;
gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size"); AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
size_t total_free = 0; size_t total_free = 0;
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
const FreeList<FreeChunk> *fl = &_indexedFreeList[i]; const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
total_free += fl->count() * fl->size(); total_free += fl->count() * fl->size();
if (i % (40*IndexSetStride) == 0) { if (i % (40*IndexSetStride) == 0) {
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size"); AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
} }
fl->print_on(gclog_or_tty); fl->print_on(gclog_or_tty);
total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() ); total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
...@@ -2620,7 +2622,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) { ...@@ -2620,7 +2622,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
res = _cfls->getChunkFromDictionaryExact(word_sz); res = _cfls->getChunkFromDictionaryExact(word_sz);
if (res == NULL) return NULL; if (res == NULL) return NULL;
} else { } else {
FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz]; AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
if (fl->count() == 0) { if (fl->count() == 0) {
// Attempt to refill this local free list. // Attempt to refill this local free list.
get_from_global_pool(word_sz, fl); get_from_global_pool(word_sz, fl);
...@@ -2640,7 +2642,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) { ...@@ -2640,7 +2642,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
// Get a chunk of blocks of the right size and update related // Get a chunk of blocks of the right size and update related
// book-keeping stats // book-keeping stats
void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) { void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
// Get the #blocks we want to claim // Get the #blocks we want to claim
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
assert(n_blks > 0, "Error"); assert(n_blks > 0, "Error");
...@@ -2722,7 +2724,7 @@ void CFLS_LAB::retire(int tid) { ...@@ -2722,7 +2724,7 @@ void CFLS_LAB::retire(int tid) {
if (num_retire > 0) { if (num_retire > 0) {
_cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
// Reset this list. // Reset this list.
_indexedFreeList[i] = FreeList<FreeChunk>(); _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
_indexedFreeList[i].set_size(i); _indexedFreeList[i].set_size(i);
} }
} }
...@@ -2736,7 +2738,7 @@ void CFLS_LAB::retire(int tid) { ...@@ -2736,7 +2738,7 @@ void CFLS_LAB::retire(int tid) {
} }
} }
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) { void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition."); assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize, assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition"); "Precondition");
...@@ -2752,12 +2754,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2752,12 +2754,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
(cur_sz < CompactibleFreeListSpace::IndexSetSize) && (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
(CMSSplitIndexedFreeListBlocks || k <= 1); (CMSSplitIndexedFreeListBlocks || k <= 1);
k++, cur_sz = k * word_sz) { k++, cur_sz = k * word_sz) {
FreeList<FreeChunk> fl_for_cur_sz; // Empty. AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
fl_for_cur_sz.set_size(cur_sz); fl_for_cur_sz.set_size(cur_sz);
{ {
MutexLockerEx x(_indexedFreeListParLocks[cur_sz], MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz]; AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
if (gfl->count() != 0) { if (gfl->count() != 0) {
// nn is the number of chunks of size cur_sz that // nn is the number of chunks of size cur_sz that
// we'd need to split k-ways each, in order to create // we'd need to split k-ways each, in order to create
...@@ -2832,12 +2834,11 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2832,12 +2834,11 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
MutexLockerEx x(parDictionaryAllocLock(), MutexLockerEx x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
while (n > 0) { while (n > 0) {
fc = dictionary()->get_chunk(MAX2(n * word_sz, fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
_dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast); FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) { if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_udpate(fc->size(), dictionary()->dict_census_update(fc->size(),
true /*split*/, true /*split*/,
false /*birth*/); false /*birth*/);
break; break;
...@@ -2890,7 +2891,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2890,7 +2891,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fc->set_size(prefix_size); fc->set_size(prefix_size);
if (rem >= IndexSetSize) { if (rem >= IndexSetSize) {
returnChunkToDictionary(rem_fc); returnChunkToDictionary(rem_fc);
dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/); dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
rem_fc = NULL; rem_fc = NULL;
} }
// Otherwise, return it to the small list below. // Otherwise, return it to the small list below.
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "memory/binaryTreeDictionary.hpp" #include "memory/binaryTreeDictionary.hpp"
#include "memory/blockOffsetTable.inline.hpp" #include "memory/blockOffsetTable.inline.hpp"
...@@ -38,6 +39,7 @@ ...@@ -38,6 +39,7 @@
class CompactibleFreeListSpace; class CompactibleFreeListSpace;
class BlkClosure; class BlkClosure;
class BlkClosureCareful; class BlkClosureCareful;
class FreeChunk;
class UpwardsObjectClosure; class UpwardsObjectClosure;
class ObjectClosureCareful; class ObjectClosureCareful;
class Klass; class Klass;
...@@ -131,7 +133,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -131,7 +133,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks
FreeList<FreeChunk> _indexedFreeList[IndexSetSize]; AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
// indexed array for small size blocks // indexed array for small size blocks
// allocation stategy // allocation stategy
bool _fitStrategy; // Use best fit strategy. bool _fitStrategy; // Use best fit strategy.
...@@ -168,7 +170,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -168,7 +170,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// If the count of "fl" is negative, it's absolute value indicates a // If the count of "fl" is negative, it's absolute value indicates a
// number of free chunks that had been previously "borrowed" from global // number of free chunks that had been previously "borrowed" from global
// list of size "word_sz", and must now be decremented. // list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl); void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Allocation helper functions // Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists // Allocate using a strategy that takes from the indexed free lists
...@@ -214,7 +216,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -214,7 +216,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// and return it. The split off remainder is returned to // and return it. The split off remainder is returned to
// the free lists. The old name for getFromListGreater // the free lists. The old name for getFromListGreater
// was lookInListGreater. // was lookInListGreater.
FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords); FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
// Get a chunk in the indexed free list or dictionary, // Get a chunk in the indexed free list or dictionary,
// by considering a larger chunk and splitting it. // by considering a larger chunk and splitting it.
FreeChunk* getChunkFromGreater(size_t numWords); FreeChunk* getChunkFromGreater(size_t numWords);
...@@ -621,7 +623,7 @@ class CFLS_LAB : public CHeapObj<mtGC> { ...@@ -621,7 +623,7 @@ class CFLS_LAB : public CHeapObj<mtGC> {
CompactibleFreeListSpace* _cfls; CompactibleFreeListSpace* _cfls;
// Our local free lists. // Our local free lists.
FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
// Initialized from a command-line arg. // Initialized from a command-line arg.
...@@ -634,7 +636,7 @@ class CFLS_LAB : public CHeapObj<mtGC> { ...@@ -634,7 +636,7 @@ class CFLS_LAB : public CHeapObj<mtGC> {
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
// Internal work method // Internal work method
void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl); void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
public: public:
CFLS_LAB(CompactibleFreeListSpace* cfls); CFLS_LAB(CompactibleFreeListSpace* cfls);
......
...@@ -9143,7 +9143,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { ...@@ -9143,7 +9143,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
size_t shrinkable_size_in_bytes = chunk_at_end->size(); size_t shrinkable_size_in_bytes = chunk_at_end->size();
size_t aligned_shrinkable_size_in_bytes = size_t aligned_shrinkable_size_in_bytes =
align_size_down(shrinkable_size_in_bytes, os::vm_page_size()); align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
assert(unallocated_start <= chunk_at_end->end(), assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
"Inconsistent chunk at end of space"); "Inconsistent chunk at end of space");
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes); size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
size_t word_size_before = heap_word_size(_virtual_space.committed_size()); size_t word_size_before = heap_word_size(_virtual_space.committed_size());
...@@ -9210,7 +9210,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { ...@@ -9210,7 +9210,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
"Inconsistency at end of space"); "Inconsistency at end of space");
assert(chunk_at_end->end() == _cmsSpace->end(), assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
"Shrinking is inconsistent"); "Shrinking is inconsistent");
return; return;
} }
......
...@@ -133,7 +133,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC { ...@@ -133,7 +133,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
} }
// Return the address past the end of this chunk // Return the address past the end of this chunk
HeapWord* end() const { return ((HeapWord*) this) + size(); } uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
// debugging // debugging
void verify() const PRODUCT_RETURN; void verify() const PRODUCT_RETURN;
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
#define VM_STRUCTS_CMS(nonstatic_field, \ #define VM_STRUCTS_CMS(nonstatic_field, \
volatile_nonstatic_field, \ volatile_nonstatic_field, \
static_field) \ static_field) \
...@@ -38,14 +40,8 @@ ...@@ -38,14 +40,8 @@
nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \ nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \
nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \ nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \
static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \ static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \
volatile_nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(LinearAllocBlock, _word_size, size_t) \ nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \ nonstatic_field(AFLBinaryTreeDictionary, _total_size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary<FreeChunk>*) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \ nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock) nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
...@@ -60,19 +56,17 @@ ...@@ -60,19 +56,17 @@
declare_toplevel_type(CMSCollector) \ declare_toplevel_type(CMSCollector) \
declare_toplevel_type(CMSBitMap) \ declare_toplevel_type(CMSBitMap) \
declare_toplevel_type(FreeChunk) \ declare_toplevel_type(FreeChunk) \
declare_toplevel_type(Metablock) \
declare_toplevel_type(ConcurrentMarkSweepThread*) \ declare_toplevel_type(ConcurrentMarkSweepThread*) \
declare_toplevel_type(ConcurrentMarkSweepGeneration*) \ declare_toplevel_type(ConcurrentMarkSweepGeneration*) \
declare_toplevel_type(SurrogateLockerThread*) \ declare_toplevel_type(SurrogateLockerThread*) \
declare_toplevel_type(CompactibleFreeListSpace*) \ declare_toplevel_type(CompactibleFreeListSpace*) \
declare_toplevel_type(CMSCollector*) \ declare_toplevel_type(CMSCollector*) \
declare_toplevel_type(FreeChunk*) \ declare_toplevel_type(AFLBinaryTreeDictionary*) \
declare_toplevel_type(BinaryTreeDictionary<FreeChunk>*) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>) \
declare_toplevel_type(LinearAllocBlock) \ declare_toplevel_type(LinearAllocBlock) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \ declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \
declare_type(BinaryTreeDictionary<FreeChunk>, FreeBlockDictionary<FreeChunk>) declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
#define VM_INT_CONSTANTS_CMS(declare_constant) \ #define VM_INT_CONSTANTS_CMS(declare_constant) \
declare_constant(Generation::ConcurrentMarkSweep) \ declare_constant(Generation::ConcurrentMarkSweep) \
......
...@@ -191,7 +191,7 @@ class VM_GenCollectFull: public VM_GC_Operation { ...@@ -191,7 +191,7 @@ class VM_GenCollectFull: public VM_GC_Operation {
class VM_CollectForMetadataAllocation: public VM_GC_Operation { class VM_CollectForMetadataAllocation: public VM_GC_Operation {
private: private:
MetaWord* _result; MetaWord* _result;
size_t _size; // size of object to be allocated size_t _size; // size of object to be allocated
Metaspace::MetadataType _mdtype; Metaspace::MetadataType _mdtype;
ClassLoaderData* _loader_data; ClassLoaderData* _loader_data;
public: public:
......
...@@ -25,9 +25,15 @@ ...@@ -25,9 +25,15 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_implementation/shared/allocationStats.hpp" #include "gc_implementation/shared/allocationStats.hpp"
#include "memory/binaryTreeDictionary.hpp" #include "memory/binaryTreeDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
#include "utilities/ostream.hpp" #include "utilities/ostream.hpp"
#ifndef SERIALGC #ifndef SERIALGC
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_implementation/shared/spaceDecorator.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC #endif // SERIALGC
...@@ -37,15 +43,18 @@ ...@@ -37,15 +43,18 @@
// This is currently used in the Concurrent Mark&Sweep implementation. // This is currently used in the Concurrent Mark&Sweep implementation.
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk>* TreeChunk<Chunk>::as_TreeChunk(Chunk* fc) { size_t TreeChunk<Chunk_t, FreeList_t>::_min_tree_chunk_size = sizeof(TreeChunk<Chunk_t, FreeList_t>)/HeapWordSize;
template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>* TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(Chunk_t* fc) {
// Do some assertion checking here. // Do some assertion checking here.
return (TreeChunk<Chunk>*) fc; return (TreeChunk<Chunk_t, FreeList_t>*) fc;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void TreeChunk<Chunk>::verify_tree_chunk_list() const { void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
TreeChunk<Chunk>* nextTC = (TreeChunk<Chunk>*)next(); TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
if (prev() != NULL) { // interior list node shouldn'r have tree fields if (prev() != NULL) { // interior list node shouldn'r have tree fields
guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL && guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
embedded_list()->right() == NULL, "should be clear"); embedded_list()->right() == NULL, "should be clear");
...@@ -57,53 +66,113 @@ void TreeChunk<Chunk>::verify_tree_chunk_list() const { ...@@ -57,53 +66,113 @@ void TreeChunk<Chunk>::verify_tree_chunk_list() const {
} }
} }
template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk_t, FreeList_t>::TreeList() {}
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk>* TreeList<Chunk>::as_TreeList(TreeChunk<Chunk>* tc) { TreeList<Chunk_t, FreeList_t>*
TreeList<Chunk_t, FreeList_t>::as_TreeList(TreeChunk<Chunk_t,FreeList_t>* tc) {
// This first free chunk in the list will be the tree list. // This first free chunk in the list will be the tree list.
assert(tc->size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "Chunk is too small for a TreeChunk"); assert((tc->size() >= (TreeChunk<Chunk_t, FreeList_t>::min_size())),
TreeList<Chunk>* tl = tc->embedded_list(); "Chunk is too small for a TreeChunk");
TreeList<Chunk_t, FreeList_t>* tl = tc->embedded_list();
tl->initialize();
tc->set_list(tl); tc->set_list(tl);
#ifdef ASSERT
tl->set_protecting_lock(NULL);
#endif
tl->set_hint(0);
tl->set_size(tc->size()); tl->set_size(tc->size());
tl->link_head(tc); tl->link_head(tc);
tl->link_tail(tc); tl->link_tail(tc);
tl->set_count(1); tl->set_count(1);
tl->init_statistics(true /* split_birth */);
tl->set_parent(NULL);
tl->set_left(NULL);
tl->set_right(NULL);
return tl; return tl;
} }
template <class Chunk>
TreeList<Chunk>* TreeList<Chunk>::as_TreeList(HeapWord* addr, size_t size) { template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk>* tc = (TreeChunk<Chunk>*) addr; TreeList<Chunk_t, FreeList_t>*
assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "Chunk is too small for a TreeChunk"); get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
// The space in the heap will have been mangled initially but FreeBlockDictionary<Chunk_t>::verify_par_locked();
// is not remangled when a free chunk is returned to the free list Chunk_t* res = get_chunk_from_tree(size, dither);
assert(res == NULL || res->is_free(),
"Should be returning a free chunk");
assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
res->size() == size, "Not correct size");
return res;
}
template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk_t, FreeList_t>*
TreeList<Chunk_t, FreeList_t>::as_TreeList(HeapWord* addr, size_t size) {
TreeChunk<Chunk_t, FreeList_t>* tc = (TreeChunk<Chunk_t, FreeList_t>*) addr;
assert((size >= TreeChunk<Chunk_t, FreeList_t>::min_size()),
"Chunk is too small for a TreeChunk");
// The space will have been mangled initially but
// is not remangled when a Chunk_t is returned to the free list
// (since it is used to maintain the chunk on the free list). // (since it is used to maintain the chunk on the free list).
assert((ZapUnusedHeapArea && tc->assert_is_mangled();
SpaceMangler::is_mangled((HeapWord*) tc->size_addr()) &&
SpaceMangler::is_mangled((HeapWord*) tc->prev_addr()) &&
SpaceMangler::is_mangled((HeapWord*) tc->next_addr())) ||
(tc->size() == 0 && tc->prev() == NULL && tc->next() == NULL),
"Space should be clear or mangled");
tc->set_size(size); tc->set_size(size);
tc->link_prev(NULL); tc->link_prev(NULL);
tc->link_next(NULL); tc->link_next(NULL);
TreeList<Chunk>* tl = TreeList<Chunk>::as_TreeList(tc); TreeList<Chunk_t, FreeList_t>* tl = TreeList<Chunk_t, FreeList_t>::as_TreeList(tc);
return tl; return tl;
} }
template <class Chunk>
TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc) {
TreeList<Chunk>* retTL = this; #ifndef SERIALGC
Chunk* list = head(); // Specialize for AdaptiveFreeList which tries to avoid
// splitting a chunk of a size that is under populated in favor of
// an over populated size. The general get_better_list() just returns
// the current list.
template <>
TreeList<FreeChunk, AdaptiveFreeList>*
TreeList<FreeChunk, AdaptiveFreeList>::get_better_list(
BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList>* dictionary) {
// A candidate chunk has been found. If it is already under
// populated, get a chunk associated with the hint for this
// chunk.
TreeList<FreeChunk, ::AdaptiveFreeList>* curTL = this;
if (surplus() <= 0) {
/* Use the hint to find a size with a surplus, and reset the hint. */
TreeList<FreeChunk, ::AdaptiveFreeList>* hintTL = this;
while (hintTL->hint() != 0) {
assert(hintTL->hint() > hintTL->size(),
"hint points in the wrong direction");
hintTL = dictionary->find_list(hintTL->hint());
assert(curTL != hintTL, "Infinite loop");
if (hintTL == NULL ||
hintTL == curTL /* Should not happen but protect against it */ ) {
// No useful hint. Set the hint to NULL and go on.
curTL->set_hint(0);
break;
}
assert(hintTL->size() > curTL->size(), "hint is inconsistent");
if (hintTL->surplus() > 0) {
// The hint led to a list that has a surplus. Use it.
// Set the hint for the candidate to an overpopulated
// size.
curTL->set_hint(hintTL->size());
// Change the candidate.
curTL = hintTL;
break;
}
}
}
return curTL;
}
#endif // SERIALGC
template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk_t, FreeList_t>*
TreeList<Chunk_t, FreeList_t>::get_better_list(
BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary) {
return this;
}
template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc) {
TreeList<Chunk_t, FreeList_t>* retTL = this;
Chunk_t* list = head();
assert(!list || list != list->next(), "Chunk on list twice"); assert(!list || list != list->next(), "Chunk on list twice");
assert(tc != NULL, "Chunk being removed is NULL"); assert(tc != NULL, "Chunk being removed is NULL");
assert(parent() == NULL || this == parent()->left() || assert(parent() == NULL || this == parent()->left() ||
...@@ -112,13 +181,13 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk ...@@ -112,13 +181,13 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
Chunk* prevFC = tc->prev(); Chunk_t* prevFC = tc->prev();
TreeChunk<Chunk>* nextTC = TreeChunk<Chunk>::as_TreeChunk(tc->next()); TreeChunk<Chunk_t, FreeList_t>* nextTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(tc->next());
assert(list != NULL, "should have at least the target chunk"); assert(list != NULL, "should have at least the target chunk");
// Is this the first item on the list? // Is this the first item on the list?
if (tc == list) { if (tc == list) {
// The "getChunk..." functions for a TreeList<Chunk> will not return the // The "getChunk..." functions for a TreeList<Chunk_t, FreeList_t> will not return the
// first chunk in the list unless it is the last chunk in the list // first chunk in the list unless it is the last chunk in the list
// because the first chunk is also acting as the tree node. // because the first chunk is also acting as the tree node.
// When coalescing happens, however, the first chunk in the a tree // When coalescing happens, however, the first chunk in the a tree
...@@ -127,8 +196,8 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk ...@@ -127,8 +196,8 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk
// allocated when the sweeper yields (giving up the free list lock) // allocated when the sweeper yields (giving up the free list lock)
// to allow mutator activity. If this chunk is the first in the // to allow mutator activity. If this chunk is the first in the
// list and is not the last in the list, do the work to copy the // list and is not the last in the list, do the work to copy the
// TreeList<Chunk> from the first chunk to the next chunk and update all // TreeList<Chunk_t, FreeList_t> from the first chunk to the next chunk and update all
// the TreeList<Chunk> pointers in the chunks in the list. // the TreeList<Chunk_t, FreeList_t> pointers in the chunks in the list.
if (nextTC == NULL) { if (nextTC == NULL) {
assert(prevFC == NULL, "Not last chunk in the list"); assert(prevFC == NULL, "Not last chunk in the list");
set_tail(NULL); set_tail(NULL);
...@@ -141,11 +210,11 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk ...@@ -141,11 +210,11 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk
// This can be slow for a long list. Consider having // This can be slow for a long list. Consider having
// an option that does not allow the first chunk on the // an option that does not allow the first chunk on the
// list to be coalesced. // list to be coalesced.
for (TreeChunk<Chunk>* curTC = nextTC; curTC != NULL; for (TreeChunk<Chunk_t, FreeList_t>* curTC = nextTC; curTC != NULL;
curTC = TreeChunk<Chunk>::as_TreeChunk(curTC->next())) { curTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(curTC->next())) {
curTC->set_list(retTL); curTC->set_list(retTL);
} }
// Fix the parent to point to the new TreeList<Chunk>. // Fix the parent to point to the new TreeList<Chunk_t, FreeList_t>.
if (retTL->parent() != NULL) { if (retTL->parent() != NULL) {
if (this == retTL->parent()->left()) { if (this == retTL->parent()->left()) {
retTL->parent()->set_left(retTL); retTL->parent()->set_left(retTL);
...@@ -176,9 +245,9 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk ...@@ -176,9 +245,9 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk
prevFC->link_after(nextTC); prevFC->link_after(nextTC);
} }
// Below this point the embeded TreeList<Chunk> being used for the // Below this point the embeded TreeList<Chunk_t, FreeList_t> being used for the
// tree node may have changed. Don't use "this" // tree node may have changed. Don't use "this"
// TreeList<Chunk>*. // TreeList<Chunk_t, FreeList_t>*.
// chunk should still be a free chunk (bit set in _prev) // chunk should still be a free chunk (bit set in _prev)
assert(!retTL->head() || retTL->size() == retTL->head()->size(), assert(!retTL->head() || retTL->size() == retTL->head()->size(),
"Wrong sized chunk in list"); "Wrong sized chunk in list");
...@@ -188,7 +257,7 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk ...@@ -188,7 +257,7 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk
tc->set_list(NULL); tc->set_list(NULL);
bool prev_found = false; bool prev_found = false;
bool next_found = false; bool next_found = false;
for (Chunk* curFC = retTL->head(); for (Chunk_t* curFC = retTL->head();
curFC != NULL; curFC = curFC->next()) { curFC != NULL; curFC = curFC->next()) {
assert(curFC != tc, "Chunk is still in list"); assert(curFC != tc, "Chunk is still in list");
if (curFC == prevFC) { if (curFC == prevFC) {
...@@ -215,8 +284,8 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk ...@@ -215,8 +284,8 @@ TreeList<Chunk>* TreeList<Chunk>::remove_chunk_replace_if_needed(TreeChunk<Chunk
return retTL; return retTL;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) { void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* chunk) {
assert(chunk != NULL, "returning NULL chunk"); assert(chunk != NULL, "returning NULL chunk");
assert(chunk->list() == this, "list should be set for chunk"); assert(chunk->list() == this, "list should be set for chunk");
assert(tail() != NULL, "The tree list is embedded in the first chunk"); assert(tail() != NULL, "The tree list is embedded in the first chunk");
...@@ -225,12 +294,12 @@ void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) { ...@@ -225,12 +294,12 @@ void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) {
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
Chunk* fc = tail(); Chunk_t* fc = tail();
fc->link_after(chunk); fc->link_after(chunk);
link_tail(chunk); link_tail(chunk);
assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list"); assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
increment_count(); FreeList_t<Chunk_t>::increment_count();
debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));) debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
...@@ -238,10 +307,10 @@ void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) { ...@@ -238,10 +307,10 @@ void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) {
// Add this chunk at the head of the list. "At the head of the list" // Add this chunk at the head of the list. "At the head of the list"
// is defined to be after the chunk pointer to by head(). This is // is defined to be after the chunk pointer to by head(). This is
// because the TreeList<Chunk> is embedded in the first TreeChunk<Chunk> in the // because the TreeList<Chunk_t, FreeList_t> is embedded in the first TreeChunk<Chunk_t, FreeList_t> in the
// list. See the definition of TreeChunk<Chunk>. // list. See the definition of TreeChunk<Chunk_t, FreeList_t>.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) { void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* chunk) {
assert(chunk->list() == this, "list should be set for chunk"); assert(chunk->list() == this, "list should be set for chunk");
assert(head() != NULL, "The tree list is embedded in the first chunk"); assert(head() != NULL, "The tree list is embedded in the first chunk");
assert(chunk != NULL, "returning NULL chunk"); assert(chunk != NULL, "returning NULL chunk");
...@@ -249,7 +318,7 @@ void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) { ...@@ -249,7 +318,7 @@ void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) {
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
Chunk* fc = head()->next(); Chunk_t* fc = head()->next();
if (fc != NULL) { if (fc != NULL) {
chunk->link_after(fc); chunk->link_after(fc);
} else { } else {
...@@ -258,28 +327,38 @@ void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) { ...@@ -258,28 +327,38 @@ void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) {
} }
head()->link_after(chunk); head()->link_after(chunk);
assert(!head() || size() == head()->size(), "Wrong sized chunk in list"); assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
increment_count(); FreeList_t<Chunk_t>::increment_count();
debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));) debug_only(increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk>* TreeList<Chunk>::head_as_TreeChunk() { void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
assert(head() == NULL || TreeChunk<Chunk>::as_TreeChunk(head())->list() == this, assert((ZapUnusedHeapArea &&
SpaceMangler::is_mangled((HeapWord*) Chunk_t::size_addr()) &&
SpaceMangler::is_mangled((HeapWord*) Chunk_t::prev_addr()) &&
SpaceMangler::is_mangled((HeapWord*) Chunk_t::next_addr())) ||
(size() == 0 && prev() == NULL && next() == NULL),
"Space should be clear or mangled");
}
template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::head_as_TreeChunk() {
assert(head() == NULL || (TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head())->list() == this),
"Wrong type of chunk?"); "Wrong type of chunk?");
return TreeChunk<Chunk>::as_TreeChunk(head()); return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk>* TreeList<Chunk>::first_available() { TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::first_available() {
assert(head() != NULL, "The head of the list cannot be NULL"); assert(head() != NULL, "The head of the list cannot be NULL");
Chunk* fc = head()->next(); Chunk_t* fc = head()->next();
TreeChunk<Chunk>* retTC; TreeChunk<Chunk_t, FreeList_t>* retTC;
if (fc == NULL) { if (fc == NULL) {
retTC = head_as_TreeChunk(); retTC = head_as_TreeChunk();
} else { } else {
retTC = TreeChunk<Chunk>::as_TreeChunk(fc); retTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(fc);
} }
assert(retTC->list() == this, "Wrong type of chunk."); assert(retTC->list() == this, "Wrong type of chunk.");
return retTC; return retTC;
...@@ -288,41 +367,32 @@ TreeChunk<Chunk>* TreeList<Chunk>::first_available() { ...@@ -288,41 +367,32 @@ TreeChunk<Chunk>* TreeList<Chunk>::first_available() {
// Returns the block with the largest heap address amongst // Returns the block with the largest heap address amongst
// those in the list for this size; potentially slow and expensive, // those in the list for this size; potentially slow and expensive,
// use with caution! // use with caution!
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk>* TreeList<Chunk>::largest_address() { TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::largest_address() {
assert(head() != NULL, "The head of the list cannot be NULL"); assert(head() != NULL, "The head of the list cannot be NULL");
Chunk* fc = head()->next(); Chunk_t* fc = head()->next();
TreeChunk<Chunk>* retTC; TreeChunk<Chunk_t, FreeList_t>* retTC;
if (fc == NULL) { if (fc == NULL) {
retTC = head_as_TreeChunk(); retTC = head_as_TreeChunk();
} else { } else {
// walk down the list and return the one with the highest // walk down the list and return the one with the highest
// heap address among chunks of this size. // heap address among chunks of this size.
Chunk* last = fc; Chunk_t* last = fc;
while (fc->next() != NULL) { while (fc->next() != NULL) {
if ((HeapWord*)last < (HeapWord*)fc) { if ((HeapWord*)last < (HeapWord*)fc) {
last = fc; last = fc;
} }
fc = fc->next(); fc = fc->next();
} }
retTC = TreeChunk<Chunk>::as_TreeChunk(last); retTC = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(last);
} }
assert(retTC->list() == this, "Wrong type of chunk."); assert(retTC->list() == this, "Wrong type of chunk.");
return retTC; return retTC;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(bool adaptive_freelists, bool splay) : BinaryTreeDictionary<Chunk_t, FreeList_t>::BinaryTreeDictionary(MemRegion mr) {
_splay(splay), _adaptive_freelists(adaptive_freelists), assert((mr.byte_size() > min_size()), "minimum chunk size");
_total_size(0), _total_free_blocks(0), _root(0) {}
template <class Chunk>
BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(MemRegion mr,
bool adaptive_freelists,
bool splay):
_adaptive_freelists(adaptive_freelists), _splay(splay)
{
assert(mr.word_size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
reset(mr); reset(mr);
assert(root()->left() == NULL, "reset check failed"); assert(root()->left() == NULL, "reset check failed");
...@@ -333,52 +403,48 @@ BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(MemRegion mr, ...@@ -333,52 +403,48 @@ BinaryTreeDictionary<Chunk>::BinaryTreeDictionary(MemRegion mr,
assert(total_free_blocks() == 1, "reset check failed"); assert(total_free_blocks() == 1, "reset check failed");
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::inc_total_size(size_t inc) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::inc_total_size(size_t inc) {
_total_size = _total_size + inc; _total_size = _total_size + inc;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::dec_total_size(size_t dec) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::dec_total_size(size_t dec) {
_total_size = _total_size - dec; _total_size = _total_size - dec;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::reset(MemRegion mr) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(MemRegion mr) {
assert(mr.word_size() >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size"); assert((mr.byte_size() > min_size()), "minimum chunk size");
set_root(TreeList<Chunk>::as_TreeList(mr.start(), mr.word_size())); set_root(TreeList<Chunk_t, FreeList_t>::as_TreeList(mr.start(), mr.word_size()));
set_total_size(mr.word_size()); set_total_size(mr.word_size());
set_total_free_blocks(1); set_total_free_blocks(1);
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::reset(HeapWord* addr, size_t byte_size) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(HeapWord* addr, size_t byte_size) {
MemRegion mr(addr, heap_word_size(byte_size)); MemRegion mr(addr, heap_word_size(byte_size));
reset(mr); reset(mr);
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::reset() { void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset() {
set_root(NULL); set_root(NULL);
set_total_size(0); set_total_size(0);
set_total_free_blocks(0); set_total_free_blocks(0);
} }
// Get a free block of size at least size from tree, or NULL. // Get a free block of size at least size from tree, or NULL.
// If a splay step is requested, the removal algorithm (only) incorporates template <class Chunk_t, template <class> class FreeList_t>
// a splay step as follows: TreeChunk<Chunk_t, FreeList_t>*
// . the search proceeds down the tree looking for a possible BinaryTreeDictionary<Chunk_t, FreeList_t>::get_chunk_from_tree(
// match. At the (closest) matching location, an appropriate splay step is applied size_t size,
// (zig, zig-zig or zig-zag). A chunk of the appropriate size is then returned enum FreeBlockDictionary<Chunk_t>::Dither dither)
// if available, and if it's the last chunk, the node is deleted. A deteleted
// node is replaced in place by its tree successor.
template <class Chunk>
TreeChunk<Chunk>*
BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay)
{ {
TreeList<Chunk> *curTL, *prevTL; TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
TreeChunk<Chunk>* retTC = NULL; TreeChunk<Chunk_t, FreeList_t>* retTC = NULL;
assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "minimum chunk size");
assert((size >= min_size()), "minimum chunk size");
if (FLSVerifyDictionary) { if (FLSVerifyDictionary) {
verify_tree(); verify_tree();
} }
...@@ -398,7 +464,7 @@ BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDict ...@@ -398,7 +464,7 @@ BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDict
} }
if (curTL == NULL) { // couldn't find exact match if (curTL == NULL) { // couldn't find exact match
if (dither == FreeBlockDictionary<Chunk>::exactly) return NULL; if (dither == FreeBlockDictionary<Chunk_t>::exactly) return NULL;
// try and find the next larger size by walking back up the search path // try and find the next larger size by walking back up the search path
for (curTL = prevTL; curTL != NULL;) { for (curTL = prevTL; curTL != NULL;) {
...@@ -410,46 +476,9 @@ BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDict ...@@ -410,46 +476,9 @@ BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDict
} }
if (curTL != NULL) { if (curTL != NULL) {
assert(curTL->size() >= size, "size inconsistency"); assert(curTL->size() >= size, "size inconsistency");
if (adaptive_freelists()) {
curTL = curTL->get_better_list(this);
// A candidate chunk has been found. If it is already under
// populated, get a chunk associated with the hint for this
// chunk.
if (curTL->surplus() <= 0) {
/* Use the hint to find a size with a surplus, and reset the hint. */
TreeList<Chunk>* hintTL = curTL;
while (hintTL->hint() != 0) {
assert(hintTL->hint() == 0 || hintTL->hint() > hintTL->size(),
"hint points in the wrong direction");
hintTL = find_list(hintTL->hint());
assert(curTL != hintTL, "Infinite loop");
if (hintTL == NULL ||
hintTL == curTL /* Should not happen but protect against it */ ) {
// No useful hint. Set the hint to NULL and go on.
curTL->set_hint(0);
break;
}
assert(hintTL->size() > size, "hint is inconsistent");
if (hintTL->surplus() > 0) {
// The hint led to a list that has a surplus. Use it.
// Set the hint for the candidate to an overpopulated
// size.
curTL->set_hint(hintTL->size());
// Change the candidate.
curTL = hintTL;
break;
}
// The evm code reset the hint of the candidate as
// at an interim point. Why? Seems like this leaves
// the hint pointing to a list that didn't work.
// curTL->set_hint(hintTL->size());
}
}
}
// don't waste time splaying if chunk's singleton
if (splay && curTL->head()->next() != NULL) {
semi_splay_step(curTL);
}
retTC = curTL->first_available(); retTC = curTL->first_available();
assert((retTC != NULL) && (curTL->count() > 0), assert((retTC != NULL) && (curTL->count() > 0),
"A list in the binary tree should not be NULL"); "A list in the binary tree should not be NULL");
...@@ -465,9 +494,9 @@ BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDict ...@@ -465,9 +494,9 @@ BinaryTreeDictionary<Chunk>::get_chunk_from_tree(size_t size, enum FreeBlockDict
return retTC; return retTC;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk>* BinaryTreeDictionary<Chunk>::find_list(size_t size) const { TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_list(size_t size) const {
TreeList<Chunk>* curTL; TreeList<Chunk_t, FreeList_t>* curTL;
for (curTL = root(); curTL != NULL;) { for (curTL = root(); curTL != NULL;) {
if (curTL->size() == size) { // exact match if (curTL->size() == size) { // exact match
break; break;
...@@ -484,10 +513,10 @@ TreeList<Chunk>* BinaryTreeDictionary<Chunk>::find_list(size_t size) const { ...@@ -484,10 +513,10 @@ TreeList<Chunk>* BinaryTreeDictionary<Chunk>::find_list(size_t size) const {
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
bool BinaryTreeDictionary<Chunk>::verify_chunk_in_free_list(Chunk* tc) const { bool BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_chunk_in_free_list(Chunk_t* tc) const {
size_t size = tc->size(); size_t size = tc->size();
TreeList<Chunk>* tl = find_list(size); TreeList<Chunk_t, FreeList_t>* tl = find_list(size);
if (tl == NULL) { if (tl == NULL) {
return false; return false;
} else { } else {
...@@ -495,9 +524,9 @@ bool BinaryTreeDictionary<Chunk>::verify_chunk_in_free_list(Chunk* tc) const { ...@@ -495,9 +524,9 @@ bool BinaryTreeDictionary<Chunk>::verify_chunk_in_free_list(Chunk* tc) const {
} }
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
Chunk* BinaryTreeDictionary<Chunk>::find_largest_dict() const { Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_largest_dict() const {
TreeList<Chunk> *curTL = root(); TreeList<Chunk_t, FreeList_t> *curTL = root();
if (curTL != NULL) { if (curTL != NULL) {
while(curTL->right() != NULL) curTL = curTL->right(); while(curTL->right() != NULL) curTL = curTL->right();
return curTL->largest_address(); return curTL->largest_address();
...@@ -510,15 +539,15 @@ Chunk* BinaryTreeDictionary<Chunk>::find_largest_dict() const { ...@@ -510,15 +539,15 @@ Chunk* BinaryTreeDictionary<Chunk>::find_largest_dict() const {
// chunk in a list on a tree node, just unlink it. // chunk in a list on a tree node, just unlink it.
// If it is the last chunk in the list (the next link is NULL), // If it is the last chunk in the list (the next link is NULL),
// remove the node and repair the tree. // remove the node and repair the tree.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeChunk<Chunk>* TreeChunk<Chunk_t, FreeList_t>*
BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) { BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc) {
assert(tc != NULL, "Should not call with a NULL chunk"); assert(tc != NULL, "Should not call with a NULL chunk");
assert(tc->is_free(), "Header is not marked correctly"); assert(tc->is_free(), "Header is not marked correctly");
TreeList<Chunk> *newTL, *parentTL; TreeList<Chunk_t, FreeList_t> *newTL, *parentTL;
TreeChunk<Chunk>* retTC; TreeChunk<Chunk_t, FreeList_t>* retTC;
TreeList<Chunk>* tl = tc->list(); TreeList<Chunk_t, FreeList_t>* tl = tc->list();
debug_only( debug_only(
bool removing_only_chunk = false; bool removing_only_chunk = false;
if (tl == _root) { if (tl == _root) {
...@@ -538,8 +567,8 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) { ...@@ -538,8 +567,8 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) {
retTC = tc; retTC = tc;
// Removing this chunk can have the side effect of changing the node // Removing this chunk can have the side effect of changing the node
// (TreeList<Chunk>*) in the tree. If the node is the root, update it. // (TreeList<Chunk_t, FreeList_t>*) in the tree. If the node is the root, update it.
TreeList<Chunk>* replacementTL = tl->remove_chunk_replace_if_needed(tc); TreeList<Chunk_t, FreeList_t>* replacementTL = tl->remove_chunk_replace_if_needed(tc);
assert(tc->is_free(), "Chunk should still be free"); assert(tc->is_free(), "Chunk should still be free");
assert(replacementTL->parent() == NULL || assert(replacementTL->parent() == NULL ||
replacementTL == replacementTL->parent()->left() || replacementTL == replacementTL->parent()->left() ||
...@@ -549,17 +578,18 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) { ...@@ -549,17 +578,18 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) {
assert(replacementTL->parent() == NULL, "Incorrectly replacing root"); assert(replacementTL->parent() == NULL, "Incorrectly replacing root");
set_root(replacementTL); set_root(replacementTL);
} }
debug_only( #ifdef ASSERT
if (tl != replacementTL) { if (tl != replacementTL) {
assert(replacementTL->head() != NULL, assert(replacementTL->head() != NULL,
"If the tree list was replaced, it should not be a NULL list"); "If the tree list was replaced, it should not be a NULL list");
TreeList<Chunk>* rhl = replacementTL->head_as_TreeChunk()->list(); TreeList<Chunk_t, FreeList_t>* rhl = replacementTL->head_as_TreeChunk()->list();
TreeList<Chunk>* rtl = TreeChunk<Chunk>::as_TreeChunk(replacementTL->tail())->list(); TreeList<Chunk_t, FreeList_t>* rtl =
TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(replacementTL->tail())->list();
assert(rhl == replacementTL, "Broken head"); assert(rhl == replacementTL, "Broken head");
assert(rtl == replacementTL, "Broken tail"); assert(rtl == replacementTL, "Broken tail");
assert(replacementTL->size() == tc->size(), "Broken size"); assert(replacementTL->size() == tc->size(), "Broken size");
} }
) #endif
// Does the tree need to be repaired? // Does the tree need to be repaired?
if (replacementTL->count() == 0) { if (replacementTL->count() == 0) {
...@@ -574,7 +604,7 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) { ...@@ -574,7 +604,7 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) {
} else if (replacementTL->right() == NULL) { } else if (replacementTL->right() == NULL) {
// right is NULL // right is NULL
newTL = replacementTL->left(); newTL = replacementTL->left();
debug_only(replacementTL->clearLeft();) debug_only(replacementTL->clear_left();)
} else { // we have both children, so, by patriarchal convention, } else { // we have both children, so, by patriarchal convention,
// my replacement is least node in right sub-tree // my replacement is least node in right sub-tree
complicated_splice = true; complicated_splice = true;
...@@ -623,7 +653,7 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) { ...@@ -623,7 +653,7 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) {
newTL->set_right(replacementTL->right()); newTL->set_right(replacementTL->right());
debug_only( debug_only(
replacementTL->clear_right(); replacementTL->clear_right();
replacementTL->clearLeft(); replacementTL->clear_left();
) )
} }
assert(replacementTL->right() == NULL && assert(replacementTL->right() == NULL &&
...@@ -644,21 +674,21 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) { ...@@ -644,21 +674,21 @@ BinaryTreeDictionary<Chunk>::remove_chunk_from_tree(TreeChunk<Chunk>* tc) {
verify_tree(); verify_tree();
} }
assert(!removing_only_chunk || _root == NULL, "root should be NULL"); assert(!removing_only_chunk || _root == NULL, "root should be NULL");
return TreeChunk<Chunk>::as_TreeChunk(retTC); return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(retTC);
} }
// Remove the leftmost node (lm) in the tree and return it. // Remove the leftmost node (lm) in the tree and return it.
// If lm has a right child, link it to the left node of // If lm has a right child, link it to the left node of
// the parent of lm. // the parent of lm.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
TreeList<Chunk>* BinaryTreeDictionary<Chunk>::remove_tree_minimum(TreeList<Chunk>* tl) { TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl) {
assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree"); assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
// locate the subtree minimum by walking down left branches // locate the subtree minimum by walking down left branches
TreeList<Chunk>* curTL = tl; TreeList<Chunk_t, FreeList_t>* curTL = tl;
for (; curTL->left() != NULL; curTL = curTL->left()); for (; curTL->left() != NULL; curTL = curTL->left());
// obviously curTL now has at most one child, a right child // obviously curTL now has at most one child, a right child
if (curTL != root()) { // Should this test just be removed? if (curTL != root()) { // Should this test just be removed?
TreeList<Chunk>* parentTL = curTL->parent(); TreeList<Chunk_t, FreeList_t>* parentTL = curTL->parent();
if (parentTL->left() == curTL) { // curTL is a left child if (parentTL->left() == curTL) { // curTL is a left child
parentTL->set_left(curTL->right()); parentTL->set_left(curTL->right());
} else { } else {
...@@ -685,31 +715,14 @@ TreeList<Chunk>* BinaryTreeDictionary<Chunk>::remove_tree_minimum(TreeList<Chunk ...@@ -685,31 +715,14 @@ TreeList<Chunk>* BinaryTreeDictionary<Chunk>::remove_tree_minimum(TreeList<Chunk
return curTL; return curTL;
} }
// Based on a simplification of the algorithm by Sleator and Tarjan (JACM 1985). template <class Chunk_t, template <class> class FreeList_t>
// The simplifications are the following: void BinaryTreeDictionary<Chunk_t, FreeList_t>::insert_chunk_in_tree(Chunk_t* fc) {
// . we splay only when we delete (not when we insert) TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
// . we apply a single spay step per deletion/access
// By doing such partial splaying, we reduce the amount of restructuring,
// while getting a reasonably efficient search tree (we think).
// [Measurements will be needed to (in)validate this expectation.]
template <class Chunk>
void BinaryTreeDictionary<Chunk>::semi_splay_step(TreeList<Chunk>* tc) {
// apply a semi-splay step at the given node:
// . if root, norting needs to be done
// . if child of root, splay once
// . else zig-zig or sig-zag depending on path from grandparent
if (root() == tc) return;
warning("*** Splaying not yet implemented; "
"tree operations may be inefficient ***");
}
template <class Chunk>
void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) {
TreeList<Chunk> *curTL, *prevTL;
size_t size = fc->size(); size_t size = fc->size();
assert(size >= BinaryTreeDictionary<Chunk>::min_tree_chunk_size, "too small to be a TreeList<Chunk>"); assert((size >= min_size()),
err_msg(SIZE_FORMAT " is too small to be a TreeChunk<Chunk_t, FreeList_t> " SIZE_FORMAT,
size, min_size()));
if (FLSVerifyDictionary) { if (FLSVerifyDictionary) {
verify_tree(); verify_tree();
} }
...@@ -729,9 +742,9 @@ void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) { ...@@ -729,9 +742,9 @@ void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) {
curTL = curTL->right(); curTL = curTL->right();
} }
} }
TreeChunk<Chunk>* tc = TreeChunk<Chunk>::as_TreeChunk(fc); TreeChunk<Chunk_t, FreeList_t>* tc = TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(fc);
// This chunk is being returned to the binary tree. Its embedded // This chunk is being returned to the binary tree. Its embedded
// TreeList<Chunk> should be unused at this point. // TreeList<Chunk_t, FreeList_t> should be unused at this point.
tc->initialize(); tc->initialize();
if (curTL != NULL) { // exact match if (curTL != NULL) { // exact match
tc->set_list(curTL); tc->set_list(curTL);
...@@ -739,8 +752,8 @@ void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) { ...@@ -739,8 +752,8 @@ void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) {
} else { // need a new node in tree } else { // need a new node in tree
tc->clear_next(); tc->clear_next();
tc->link_prev(NULL); tc->link_prev(NULL);
TreeList<Chunk>* newTL = TreeList<Chunk>::as_TreeList(tc); TreeList<Chunk_t, FreeList_t>* newTL = TreeList<Chunk_t, FreeList_t>::as_TreeList(tc);
assert(((TreeChunk<Chunk>*)tc)->list() == newTL, assert(((TreeChunk<Chunk_t, FreeList_t>*)tc)->list() == newTL,
"List was not initialized correctly"); "List was not initialized correctly");
if (prevTL == NULL) { // we are the only tree node if (prevTL == NULL) { // we are the only tree node
assert(root() == NULL, "control point invariant"); assert(root() == NULL, "control point invariant");
...@@ -768,30 +781,30 @@ void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) { ...@@ -768,30 +781,30 @@ void BinaryTreeDictionary<Chunk>::insert_chunk_in_tree(Chunk* fc) {
} }
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::max_chunk_size() const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::max_chunk_size() const {
FreeBlockDictionary<Chunk>::verify_par_locked(); FreeBlockDictionary<Chunk_t>::verify_par_locked();
TreeList<Chunk>* tc = root(); TreeList<Chunk_t, FreeList_t>* tc = root();
if (tc == NULL) return 0; if (tc == NULL) return 0;
for (; tc->right() != NULL; tc = tc->right()); for (; tc->right() != NULL; tc = tc->right());
return tc->size(); return tc->size();
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::total_list_length(TreeList<Chunk>* tl) const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const {
size_t res; size_t res;
res = tl->count(); res = tl->count();
#ifdef ASSERT #ifdef ASSERT
size_t cnt; size_t cnt;
Chunk* tc = tl->head(); Chunk_t* tc = tl->head();
for (cnt = 0; tc != NULL; tc = tc->next(), cnt++); for (cnt = 0; tc != NULL; tc = tc->next(), cnt++);
assert(res == cnt, "The count is not being maintained correctly"); assert(res == cnt, "The count is not being maintained correctly");
#endif #endif
return res; return res;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::total_size_in_tree(TreeList<Chunk>* tl) const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL) if (tl == NULL)
return 0; return 0;
return (tl->size() * total_list_length(tl)) + return (tl->size() * total_list_length(tl)) +
...@@ -799,8 +812,8 @@ size_t BinaryTreeDictionary<Chunk>::total_size_in_tree(TreeList<Chunk>* tl) cons ...@@ -799,8 +812,8 @@ size_t BinaryTreeDictionary<Chunk>::total_size_in_tree(TreeList<Chunk>* tl) cons
total_size_in_tree(tl->right()); total_size_in_tree(tl->right());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
double BinaryTreeDictionary<Chunk>::sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const { double BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const {
if (tl == NULL) { if (tl == NULL) {
return 0.0; return 0.0;
} }
...@@ -811,8 +824,8 @@ double BinaryTreeDictionary<Chunk>::sum_of_squared_block_sizes(TreeList<Chunk>* ...@@ -811,8 +824,8 @@ double BinaryTreeDictionary<Chunk>::sum_of_squared_block_sizes(TreeList<Chunk>*
return curr; return curr;
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::total_free_blocks_in_tree(TreeList<Chunk>* tl) const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL) if (tl == NULL)
return 0; return 0;
return total_list_length(tl) + return total_list_length(tl) +
...@@ -820,28 +833,28 @@ size_t BinaryTreeDictionary<Chunk>::total_free_blocks_in_tree(TreeList<Chunk>* t ...@@ -820,28 +833,28 @@ size_t BinaryTreeDictionary<Chunk>::total_free_blocks_in_tree(TreeList<Chunk>* t
total_free_blocks_in_tree(tl->right()); total_free_blocks_in_tree(tl->right());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::num_free_blocks() const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::num_free_blocks() const {
assert(total_free_blocks_in_tree(root()) == total_free_blocks(), assert(total_free_blocks_in_tree(root()) == total_free_blocks(),
"_total_free_blocks inconsistency"); "_total_free_blocks inconsistency");
return total_free_blocks(); return total_free_blocks();
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::tree_height_helper(TreeList<Chunk>* tl) const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL) if (tl == NULL)
return 0; return 0;
return 1 + MAX2(tree_height_helper(tl->left()), return 1 + MAX2(tree_height_helper(tl->left()),
tree_height_helper(tl->right())); tree_height_helper(tl->right()));
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::treeHeight() const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height() const {
return tree_height_helper(root()); return tree_height_helper(root());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::total_nodes_helper(TreeList<Chunk>* tl) const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL) { if (tl == NULL) {
return 0; return 0;
} }
...@@ -849,14 +862,18 @@ size_t BinaryTreeDictionary<Chunk>::total_nodes_helper(TreeList<Chunk>* tl) cons ...@@ -849,14 +862,18 @@ size_t BinaryTreeDictionary<Chunk>::total_nodes_helper(TreeList<Chunk>* tl) cons
total_nodes_helper(tl->right()); total_nodes_helper(tl->right());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::total_nodes_in_tree(TreeList<Chunk>* tl) const { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
return total_nodes_helper(root()); return total_nodes_helper(root());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::dict_census_udpate(size_t size, bool split, bool birth){ void BinaryTreeDictionary<Chunk_t, FreeList_t>::dict_census_update(size_t size, bool split, bool birth){}
TreeList<Chunk>* nd = find_list(size);
#ifndef SERIALGC
template <>
void BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::dict_census_update(size_t size, bool split, bool birth){
TreeList<FreeChunk, AdaptiveFreeList>* nd = find_list(size);
if (nd) { if (nd) {
if (split) { if (split) {
if (birth) { if (birth) {
...@@ -882,16 +899,26 @@ void BinaryTreeDictionary<Chunk>::dict_census_udpate(size_t size, bool split, bo ...@@ -882,16 +899,26 @@ void BinaryTreeDictionary<Chunk>::dict_census_udpate(size_t size, bool split, bo
// This is a birth associated with a LinAB. The chunk // This is a birth associated with a LinAB. The chunk
// for the LinAB is not in the dictionary. // for the LinAB is not in the dictionary.
} }
#endif // SERIALGC
template <class Chunk_t, template <class> class FreeList_t>
bool BinaryTreeDictionary<Chunk_t, FreeList_t>::coal_dict_over_populated(size_t size) {
// For the general type of freelists, encourage coalescing by
// returning true.
return true;
}
template <class Chunk> #ifndef SERIALGC
bool BinaryTreeDictionary<Chunk>::coal_dict_over_populated(size_t size) { template <>
bool BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::coal_dict_over_populated(size_t size) {
if (FLSAlwaysCoalesceLarge) return true; if (FLSAlwaysCoalesceLarge) return true;
TreeList<Chunk>* list_of_size = find_list(size); TreeList<FreeChunk, AdaptiveFreeList>* list_of_size = find_list(size);
// None of requested size implies overpopulated. // None of requested size implies overpopulated.
return list_of_size == NULL || list_of_size->coal_desired() <= 0 || return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
list_of_size->count() > list_of_size->coal_desired(); list_of_size->count() > list_of_size->coal_desired();
} }
#endif // SERIALGC
// Closures for walking the binary tree. // Closures for walking the binary tree.
// do_list() walks the free list in a node applying the closure // do_list() walks the free list in a node applying the closure
...@@ -899,19 +926,18 @@ bool BinaryTreeDictionary<Chunk>::coal_dict_over_populated(size_t size) { ...@@ -899,19 +926,18 @@ bool BinaryTreeDictionary<Chunk>::coal_dict_over_populated(size_t size) {
// do_tree() walks the nodes in the binary tree applying do_list() // do_tree() walks the nodes in the binary tree applying do_list()
// to each list at each node. // to each list at each node.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class TreeCensusClosure : public StackObj { class TreeCensusClosure : public StackObj {
protected: protected:
virtual void do_list(FreeList<Chunk>* fl) = 0; virtual void do_list(FreeList_t<Chunk_t>* fl) = 0;
public: public:
virtual void do_tree(TreeList<Chunk>* tl) = 0; virtual void do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> { class AscendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
using TreeCensusClosure<Chunk>::do_list;
public: public:
void do_tree(TreeList<Chunk>* tl) { void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
if (tl != NULL) { if (tl != NULL) {
do_tree(tl->left()); do_tree(tl->left());
do_list(tl); do_list(tl);
...@@ -920,11 +946,10 @@ class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> { ...@@ -920,11 +946,10 @@ class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> {
} }
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class DescendTreeCensusClosure : public TreeCensusClosure<Chunk> { class DescendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
using TreeCensusClosure<Chunk>::do_list;
public: public:
void do_tree(TreeList<Chunk>* tl) { void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
if (tl != NULL) { if (tl != NULL) {
do_tree(tl->right()); do_tree(tl->right());
do_list(tl); do_list(tl);
...@@ -935,8 +960,8 @@ class DescendTreeCensusClosure : public TreeCensusClosure<Chunk> { ...@@ -935,8 +960,8 @@ class DescendTreeCensusClosure : public TreeCensusClosure<Chunk> {
// For each list in the tree, calculate the desired, desired // For each list in the tree, calculate the desired, desired
// coalesce, count before sweep, and surplus before sweep. // coalesce, count before sweep, and surplus before sweep.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class BeginSweepClosure : public AscendTreeCensusClosure<Chunk> { class BeginSweepClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
double _percentage; double _percentage;
float _inter_sweep_current; float _inter_sweep_current;
float _inter_sweep_estimate; float _inter_sweep_estimate;
...@@ -951,32 +976,36 @@ class BeginSweepClosure : public AscendTreeCensusClosure<Chunk> { ...@@ -951,32 +976,36 @@ class BeginSweepClosure : public AscendTreeCensusClosure<Chunk> {
_inter_sweep_estimate(inter_sweep_estimate), _inter_sweep_estimate(inter_sweep_estimate),
_intra_sweep_estimate(intra_sweep_estimate) { } _intra_sweep_estimate(intra_sweep_estimate) { }
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList<Chunk_t>* fl) {}
#ifndef SERIALGC
void do_list(AdaptiveFreeList<Chunk_t>* fl) {
double coalSurplusPercent = _percentage; double coalSurplusPercent = _percentage;
fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate); fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent)); fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
fl->set_before_sweep(fl->count()); fl->set_before_sweep(fl->count());
fl->set_bfr_surp(fl->surplus()); fl->set_bfr_surp(fl->surplus());
} }
#endif // SERIALGC
}; };
// Used to search the tree until a condition is met. // Used to search the tree until a condition is met.
// Similar to TreeCensusClosure but searches the // Similar to TreeCensusClosure but searches the
// tree and returns promptly when found. // tree and returns promptly when found.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class TreeSearchClosure : public StackObj { class TreeSearchClosure : public StackObj {
protected: protected:
virtual bool do_list(FreeList<Chunk>* fl) = 0; virtual bool do_list(FreeList_t<Chunk_t>* fl) = 0;
public: public:
virtual bool do_tree(TreeList<Chunk>* tl) = 0; virtual bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
}; };
#if 0 // Don't need this yet but here for symmetry. #if 0 // Don't need this yet but here for symmetry.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class AscendTreeSearchClosure : public TreeSearchClosure { class AscendTreeSearchClosure : public TreeSearchClosure<Chunk_t> {
public: public:
bool do_tree(TreeList<Chunk>* tl) { bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
if (tl != NULL) { if (tl != NULL) {
if (do_tree(tl->left())) return true; if (do_tree(tl->left())) return true;
if (do_list(tl)) return true; if (do_list(tl)) return true;
...@@ -987,11 +1016,10 @@ class AscendTreeSearchClosure : public TreeSearchClosure { ...@@ -987,11 +1016,10 @@ class AscendTreeSearchClosure : public TreeSearchClosure {
}; };
#endif #endif
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class DescendTreeSearchClosure : public TreeSearchClosure<Chunk> { class DescendTreeSearchClosure : public TreeSearchClosure<Chunk_t, FreeList_t> {
using TreeSearchClosure<Chunk>::do_list;
public: public:
bool do_tree(TreeList<Chunk>* tl) { bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
if (tl != NULL) { if (tl != NULL) {
if (do_tree(tl->right())) return true; if (do_tree(tl->right())) return true;
if (do_list(tl)) return true; if (do_list(tl)) return true;
...@@ -1003,17 +1031,17 @@ class DescendTreeSearchClosure : public TreeSearchClosure<Chunk> { ...@@ -1003,17 +1031,17 @@ class DescendTreeSearchClosure : public TreeSearchClosure<Chunk> {
// Searches the tree for a chunk that ends at the // Searches the tree for a chunk that ends at the
// specified address. // specified address.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk> { class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk_t, FreeList_t> {
HeapWord* _target; HeapWord* _target;
Chunk* _found; Chunk_t* _found;
public: public:
EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {} EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {}
bool do_list(FreeList<Chunk>* fl) { bool do_list(FreeList_t<Chunk_t>* fl) {
Chunk* item = fl->head(); Chunk_t* item = fl->head();
while (item != NULL) { while (item != NULL) {
if (item->end() == _target) { if (item->end() == (uintptr_t*) _target) {
_found = item; _found = item;
return true; return true;
} }
...@@ -1021,22 +1049,22 @@ class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk> { ...@@ -1021,22 +1049,22 @@ class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk> {
} }
return false; return false;
} }
Chunk* found() { return _found; } Chunk_t* found() { return _found; }
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
Chunk* BinaryTreeDictionary<Chunk>::find_chunk_ends_at(HeapWord* target) const { Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_chunk_ends_at(HeapWord* target) const {
EndTreeSearchClosure<Chunk> etsc(target); EndTreeSearchClosure<Chunk_t, FreeList_t> etsc(target);
bool found_target = etsc.do_tree(root()); bool found_target = etsc.do_tree(root());
assert(found_target || etsc.found() == NULL, "Consistency check"); assert(found_target || etsc.found() == NULL, "Consistency check");
assert(!found_target || etsc.found() != NULL, "Consistency check"); assert(!found_target || etsc.found() != NULL, "Consistency check");
return etsc.found(); return etsc.found();
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::begin_sweep_dict_census(double coalSurplusPercent, void BinaryTreeDictionary<Chunk_t, FreeList_t>::begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) { float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
BeginSweepClosure<Chunk> bsc(coalSurplusPercent, inter_sweep_current, BeginSweepClosure<Chunk_t, FreeList_t> bsc(coalSurplusPercent, inter_sweep_current,
inter_sweep_estimate, inter_sweep_estimate,
intra_sweep_estimate); intra_sweep_estimate);
bsc.do_tree(root()); bsc.do_tree(root());
...@@ -1045,84 +1073,91 @@ void BinaryTreeDictionary<Chunk>::begin_sweep_dict_census(double coalSurplusPerc ...@@ -1045,84 +1073,91 @@ void BinaryTreeDictionary<Chunk>::begin_sweep_dict_census(double coalSurplusPerc
// Closures and methods for calculating total bytes returned to the // Closures and methods for calculating total bytes returned to the
// free lists in the tree. // free lists in the tree.
#ifndef PRODUCT #ifndef PRODUCT
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure<Chunk> { class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
public: public:
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList_t<Chunk_t>* fl) {
fl->set_returned_bytes(0); fl->set_returned_bytes(0);
} }
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::initialize_dict_returned_bytes() { void BinaryTreeDictionary<Chunk_t, FreeList_t>::initialize_dict_returned_bytes() {
InitializeDictReturnedBytesClosure<Chunk> idrb; InitializeDictReturnedBytesClosure<Chunk_t, FreeList_t> idrb;
idrb.do_tree(root()); idrb.do_tree(root());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class ReturnedBytesClosure : public AscendTreeCensusClosure<Chunk> { class ReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
size_t _dict_returned_bytes; size_t _dict_returned_bytes;
public: public:
ReturnedBytesClosure() { _dict_returned_bytes = 0; } ReturnedBytesClosure() { _dict_returned_bytes = 0; }
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList_t<Chunk_t>* fl) {
_dict_returned_bytes += fl->returned_bytes(); _dict_returned_bytes += fl->returned_bytes();
} }
size_t dict_returned_bytes() { return _dict_returned_bytes; } size_t dict_returned_bytes() { return _dict_returned_bytes; }
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::sum_dict_returned_bytes() { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_dict_returned_bytes() {
ReturnedBytesClosure<Chunk> rbc; ReturnedBytesClosure<Chunk_t, FreeList_t> rbc;
rbc.do_tree(root()); rbc.do_tree(root());
return rbc.dict_returned_bytes(); return rbc.dict_returned_bytes();
} }
// Count the number of entries in the tree. // Count the number of entries in the tree.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class treeCountClosure : public DescendTreeCensusClosure<Chunk> { class treeCountClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
public: public:
uint count; uint count;
treeCountClosure(uint c) { count = c; } treeCountClosure(uint c) { count = c; }
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList_t<Chunk_t>* fl) {
count++; count++;
} }
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::total_count() { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_count() {
treeCountClosure<Chunk> ctc(0); treeCountClosure<Chunk_t, FreeList_t> ctc(0);
ctc.do_tree(root()); ctc.do_tree(root());
return ctc.count; return ctc.count;
} }
#endif // PRODUCT #endif // PRODUCT
// Calculate surpluses for the lists in the tree. // Calculate surpluses for the lists in the tree.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk> { class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
double percentage; double percentage;
public: public:
setTreeSurplusClosure(double v) { percentage = v; } setTreeSurplusClosure(double v) { percentage = v; }
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList<Chunk_t>* fl) {}
#ifndef SERIALGC
void do_list(AdaptiveFreeList<Chunk_t>* fl) {
double splitSurplusPercent = percentage; double splitSurplusPercent = percentage;
fl->set_surplus(fl->count() - fl->set_surplus(fl->count() -
(ssize_t)((double)fl->desired() * splitSurplusPercent)); (ssize_t)((double)fl->desired() * splitSurplusPercent));
} }
#endif // SERIALGC
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::set_tree_surplus(double splitSurplusPercent) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_surplus(double splitSurplusPercent) {
setTreeSurplusClosure<Chunk> sts(splitSurplusPercent); setTreeSurplusClosure<Chunk_t, FreeList_t> sts(splitSurplusPercent);
sts.do_tree(root()); sts.do_tree(root());
} }
// Set hints for the lists in the tree. // Set hints for the lists in the tree.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk> { class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
size_t hint; size_t hint;
public: public:
setTreeHintsClosure(size_t v) { hint = v; } setTreeHintsClosure(size_t v) { hint = v; }
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList<Chunk_t>* fl) {}
#ifndef SERIALGC
void do_list(AdaptiveFreeList<Chunk_t>* fl) {
fl->set_hint(hint); fl->set_hint(hint);
assert(fl->hint() == 0 || fl->hint() > fl->size(), assert(fl->hint() == 0 || fl->hint() > fl->size(),
"Current hint is inconsistent"); "Current hint is inconsistent");
...@@ -1130,35 +1165,40 @@ class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk> { ...@@ -1130,35 +1165,40 @@ class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk> {
hint = fl->size(); hint = fl->size();
} }
} }
#endif // SERIALGC
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::set_tree_hints(void) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_hints(void) {
setTreeHintsClosure<Chunk> sth(0); setTreeHintsClosure<Chunk_t, FreeList_t> sth(0);
sth.do_tree(root()); sth.do_tree(root());
} }
// Save count before previous sweep and splits and coalesces. // Save count before previous sweep and splits and coalesces.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk> { class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList<Chunk_t>* fl) {}
#ifndef SERIALGC
void do_list(AdaptiveFreeList<Chunk_t>* fl) {
fl->set_prev_sweep(fl->count()); fl->set_prev_sweep(fl->count());
fl->set_coal_births(0); fl->set_coal_births(0);
fl->set_coal_deaths(0); fl->set_coal_deaths(0);
fl->set_split_births(0); fl->set_split_births(0);
fl->set_split_deaths(0); fl->set_split_deaths(0);
} }
#endif // SERIALGC
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::clear_tree_census(void) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::clear_tree_census(void) {
clearTreeCensusClosure<Chunk> ctc; clearTreeCensusClosure<Chunk_t, FreeList_t> ctc;
ctc.do_tree(root()); ctc.do_tree(root());
} }
// Do reporting and post sweep clean up. // Do reporting and post sweep clean up.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::end_sweep_dict_census(double splitSurplusPercent) { void BinaryTreeDictionary<Chunk_t, FreeList_t>::end_sweep_dict_census(double splitSurplusPercent) {
// Does walking the tree 3 times hurt? // Does walking the tree 3 times hurt?
set_tree_surplus(splitSurplusPercent); set_tree_surplus(splitSurplusPercent);
set_tree_hints(); set_tree_hints();
...@@ -1169,9 +1209,9 @@ void BinaryTreeDictionary<Chunk>::end_sweep_dict_census(double splitSurplusPerce ...@@ -1169,9 +1209,9 @@ void BinaryTreeDictionary<Chunk>::end_sweep_dict_census(double splitSurplusPerce
} }
// Print summary statistics // Print summary statistics
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::report_statistics() const { void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
FreeBlockDictionary<Chunk>::verify_par_locked(); FreeBlockDictionary<Chunk_t>::verify_par_locked();
gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n" gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
"------------------------------------\n"); "------------------------------------\n");
size_t total_size = total_chunk_size(debug_only(NULL)); size_t total_size = total_chunk_size(debug_only(NULL));
...@@ -1182,36 +1222,47 @@ void BinaryTreeDictionary<Chunk>::report_statistics() const { ...@@ -1182,36 +1222,47 @@ void BinaryTreeDictionary<Chunk>::report_statistics() const {
if (free_blocks > 0) { if (free_blocks > 0) {
gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks); gclog_or_tty->print("Av. Block Size: %d\n", total_size/free_blocks);
} }
gclog_or_tty->print("Tree Height: %d\n", treeHeight()); gclog_or_tty->print("Tree Height: %d\n", tree_height());
} }
// Print census information - counts, births, deaths, etc. // Print census information - counts, births, deaths, etc.
// for each list in the tree. Also print some summary // for each list in the tree. Also print some summary
// information. // information.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk> { class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
int _print_line; int _print_line;
size_t _total_free; size_t _total_free;
FreeList<Chunk> _total; FreeList_t<Chunk_t> _total;
public: public:
PrintTreeCensusClosure() { PrintTreeCensusClosure() {
_print_line = 0; _print_line = 0;
_total_free = 0; _total_free = 0;
} }
FreeList<Chunk>* total() { return &_total; } FreeList_t<Chunk_t>* total() { return &_total; }
size_t total_free() { return _total_free; } size_t total_free() { return _total_free; }
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList<Chunk_t>* fl) {
if (++_print_line >= 40) { if (++_print_line >= 40) {
FreeList<Chunk>::print_labels_on(gclog_or_tty, "size"); FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
_print_line = 0; _print_line = 0;
} }
fl->print_on(gclog_or_tty); fl->print_on(gclog_or_tty);
_total_free += fl->count() * fl->size() ; _total_free += fl->count() * fl->size() ;
total()->set_count( total()->count() + fl->count() ); total()->set_count( total()->count() + fl->count() );
total()->set_bfr_surp( total()->bfr_surp() + fl->bfr_surp() ); }
#ifndef SERIALGC
void do_list(AdaptiveFreeList<Chunk_t>* fl) {
if (++_print_line >= 40) {
FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
_print_line = 0;
}
fl->print_on(gclog_or_tty);
_total_free += fl->count() * fl->size() ;
total()->set_count( total()->count() + fl->count() );
total()->set_bfr_surp( total()->bfr_surp() + fl->bfr_surp() );
total()->set_surplus( total()->split_deaths() + fl->surplus() ); total()->set_surplus( total()->split_deaths() + fl->surplus() );
total()->set_desired( total()->desired() + fl->desired() ); total()->set_desired( total()->desired() + fl->desired() );
total()->set_prev_sweep( total()->prev_sweep() + fl->prev_sweep() ); total()->set_prev_sweep( total()->prev_sweep() + fl->prev_sweep() );
total()->set_before_sweep(total()->before_sweep() + fl->before_sweep()); total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
total()->set_coal_births( total()->coal_births() + fl->coal_births() ); total()->set_coal_births( total()->coal_births() + fl->coal_births() );
...@@ -1219,18 +1270,32 @@ class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk> { ...@@ -1219,18 +1270,32 @@ class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk> {
total()->set_split_births(total()->split_births() + fl->split_births()); total()->set_split_births(total()->split_births() + fl->split_births());
total()->set_split_deaths(total()->split_deaths() + fl->split_deaths()); total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
} }
#endif // SERIALGC
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::print_dict_census(void) const { void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_dict_census(void) const {
gclog_or_tty->print("\nBinaryTree\n"); gclog_or_tty->print("\nBinaryTree\n");
FreeList<Chunk>::print_labels_on(gclog_or_tty, "size"); FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
PrintTreeCensusClosure<Chunk> ptc; PrintTreeCensusClosure<Chunk_t, FreeList_t> ptc;
ptc.do_tree(root()); ptc.do_tree(root());
FreeList<Chunk>* total = ptc.total(); FreeList_t<Chunk_t>* total = ptc.total();
FreeList<Chunk>::print_labels_on(gclog_or_tty, " "); FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, " ");
}
#ifndef SERIALGC
template <>
void BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>::print_dict_census(void) const {
gclog_or_tty->print("\nBinaryTree\n");
AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList> ptc;
ptc.do_tree(root());
AdaptiveFreeList<FreeChunk>* total = ptc.total();
AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, " ");
total->print_on(gclog_or_tty, "TOTAL\t"); total->print_on(gclog_or_tty, "TOTAL\t");
gclog_or_tty->print( gclog_or_tty->print(
"total_free(words): " SIZE_FORMAT_W(16) "total_free(words): " SIZE_FORMAT_W(16)
...@@ -1242,9 +1307,10 @@ void BinaryTreeDictionary<Chunk>::print_dict_census(void) const { ...@@ -1242,9 +1307,10 @@ void BinaryTreeDictionary<Chunk>::print_dict_census(void) const {
(double)(total->desired() - total->count()) (double)(total->desired() - total->count())
/(total->desired() != 0 ? (double)total->desired() : 1.0)); /(total->desired() != 0 ? (double)total->desired() : 1.0));
} }
#endif // SERIALGC
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk> { class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
outputStream* _st; outputStream* _st;
int _print_line; int _print_line;
...@@ -1253,14 +1319,14 @@ class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk> { ...@@ -1253,14 +1319,14 @@ class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk> {
_st = st; _st = st;
_print_line = 0; _print_line = 0;
} }
void do_list(FreeList<Chunk>* fl) { void do_list(FreeList_t<Chunk_t>* fl) {
if (++_print_line >= 40) { if (++_print_line >= 40) {
FreeList<Chunk>::print_labels_on(_st, "size"); FreeList_t<Chunk_t>::print_labels_on(_st, "size");
_print_line = 0; _print_line = 0;
} }
fl->print_on(gclog_or_tty); fl->print_on(gclog_or_tty);
size_t sz = fl->size(); size_t sz = fl->size();
for (Chunk* fc = fl->head(); fc != NULL; for (Chunk_t* fc = fl->head(); fc != NULL;
fc = fc->next()) { fc = fc->next()) {
_st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", _st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
fc, (HeapWord*)fc + sz, fc, (HeapWord*)fc + sz,
...@@ -1269,11 +1335,11 @@ class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk> { ...@@ -1269,11 +1335,11 @@ class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk> {
} }
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::print_free_lists(outputStream* st) const { void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_free_lists(outputStream* st) const {
FreeList<Chunk>::print_labels_on(st, "size"); FreeList_t<Chunk_t>::print_labels_on(st, "size");
PrintFreeListsClosure<Chunk> pflc(st); PrintFreeListsClosure<Chunk_t, FreeList_t> pflc(st);
pflc.do_tree(root()); pflc.do_tree(root());
} }
...@@ -1281,18 +1347,18 @@ void BinaryTreeDictionary<Chunk>::print_free_lists(outputStream* st) const { ...@@ -1281,18 +1347,18 @@ void BinaryTreeDictionary<Chunk>::print_free_lists(outputStream* st) const {
// . _root has no parent // . _root has no parent
// . parent and child point to each other // . parent and child point to each other
// . each node's key correctly related to that of its child(ren) // . each node's key correctly related to that of its child(ren)
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::verify_tree() const { void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
guarantee(root() == NULL || total_free_blocks() == 0 || guarantee(root() == NULL || total_free_blocks() == 0 ||
total_size() != 0, "_total_size should't be 0?"); total_size() != 0, "_total_size should't be 0?");
guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent"); guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent");
verify_tree_helper(root()); verify_tree_helper(root());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
size_t BinaryTreeDictionary<Chunk>::verify_prev_free_ptrs(TreeList<Chunk>* tl) { size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl) {
size_t ct = 0; size_t ct = 0;
for (Chunk* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) { for (Chunk_t* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
ct++; ct++;
assert(curFC->prev() == NULL || curFC->prev()->is_free(), assert(curFC->prev() == NULL || curFC->prev()->is_free(),
"Chunk should be free"); "Chunk should be free");
...@@ -1303,8 +1369,8 @@ size_t BinaryTreeDictionary<Chunk>::verify_prev_free_ptrs(TreeList<Chunk>* tl) { ...@@ -1303,8 +1369,8 @@ size_t BinaryTreeDictionary<Chunk>::verify_prev_free_ptrs(TreeList<Chunk>* tl) {
// Note: this helper is recursive rather than iterative, so use with // Note: this helper is recursive rather than iterative, so use with
// caution on very deep trees; and watch out for stack overflow errors; // caution on very deep trees; and watch out for stack overflow errors;
// In general, to be used only for debugging. // In general, to be used only for debugging.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::verify_tree_helper(TreeList<Chunk>* tl) const { void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
if (tl == NULL) if (tl == NULL)
return; return;
guarantee(tl->size() != 0, "A list must has a size"); guarantee(tl->size() != 0, "A list must has a size");
...@@ -1332,15 +1398,25 @@ void BinaryTreeDictionary<Chunk>::verify_tree_helper(TreeList<Chunk>* tl) const ...@@ -1332,15 +1398,25 @@ void BinaryTreeDictionary<Chunk>::verify_tree_helper(TreeList<Chunk>* tl) const
verify_tree_helper(tl->right()); verify_tree_helper(tl->right());
} }
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
void BinaryTreeDictionary<Chunk>::verify() const { void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify() const {
verify_tree(); verify_tree();
guarantee(total_size() == total_size_in_tree(root()), "Total Size inconsistency"); guarantee(total_size() == total_size_in_tree(root()), "Total Size inconsistency");
} }
template class TreeList<Metablock, FreeList>;
template class BinaryTreeDictionary<Metablock, FreeList>;
template class TreeChunk<Metablock, FreeList>;
template class TreeList<Metachunk, FreeList>;
template class BinaryTreeDictionary<Metachunk, FreeList>;
template class TreeChunk<Metachunk, FreeList>;
#ifndef SERIALGC #ifndef SERIALGC
// Explicitly instantiate these types for FreeChunk. // Explicitly instantiate these types for FreeChunk.
template class BinaryTreeDictionary<FreeChunk>; template class TreeList<FreeChunk, AdaptiveFreeList>;
template class TreeChunk<FreeChunk>; template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>;
template class TreeList<FreeChunk>; template class TreeChunk<FreeChunk, AdaptiveFreeList>;
#endif // SERIALGC #endif // SERIALGC
...@@ -37,77 +37,78 @@ ...@@ -37,77 +37,78 @@
// A TreeList is a FreeList which can be used to maintain a // A TreeList is a FreeList which can be used to maintain a
// binary tree of free lists. // binary tree of free lists.
template <class Chunk> class TreeChunk; template <class Chunk_t, template <class> class FreeList_t> class TreeChunk;
template <class Chunk> class BinaryTreeDictionary; template <class Chunk_t, template <class> class FreeList_t> class BinaryTreeDictionary;
template <class Chunk> class AscendTreeCensusClosure; template <class Chunk_t, template <class> class FreeList_t> class AscendTreeCensusClosure;
template <class Chunk> class DescendTreeCensusClosure; template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
template <class Chunk> class DescendTreeSearchClosure; template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class TreeList: public FreeList<Chunk> { class TreeList : public FreeList_t<Chunk_t> {
friend class TreeChunk<Chunk>; friend class TreeChunk<Chunk_t, FreeList_t>;
friend class BinaryTreeDictionary<Chunk>; friend class BinaryTreeDictionary<Chunk_t, FreeList_t>;
friend class AscendTreeCensusClosure<Chunk>; friend class AscendTreeCensusClosure<Chunk_t, FreeList_t>;
friend class DescendTreeCensusClosure<Chunk>; friend class DescendTreeCensusClosure<Chunk_t, FreeList_t>;
friend class DescendTreeSearchClosure<Chunk>; friend class DescendTreeSearchClosure<Chunk_t, FreeList_t>;
TreeList<Chunk>* _parent; TreeList<Chunk_t, FreeList_t>* _parent;
TreeList<Chunk>* _left; TreeList<Chunk_t, FreeList_t>* _left;
TreeList<Chunk>* _right; TreeList<Chunk_t, FreeList_t>* _right;
protected: protected:
TreeList<Chunk>* parent() const { return _parent; }
TreeList<Chunk>* left() const { return _left; }
TreeList<Chunk>* right() const { return _right; }
// Explicitly import these names into our namespace to fix name lookup with templates TreeList<Chunk_t, FreeList_t>* parent() const { return _parent; }
using FreeList<Chunk>::head; TreeList<Chunk_t, FreeList_t>* left() const { return _left; }
using FreeList<Chunk>::set_head; TreeList<Chunk_t, FreeList_t>* right() const { return _right; }
using FreeList<Chunk>::tail; // Wrapper on call to base class, to get the template to compile.
using FreeList<Chunk>::set_tail; Chunk_t* head() const { return FreeList_t<Chunk_t>::head(); }
using FreeList<Chunk>::link_tail; Chunk_t* tail() const { return FreeList_t<Chunk_t>::tail(); }
void set_head(Chunk_t* head) { FreeList_t<Chunk_t>::set_head(head); }
void set_tail(Chunk_t* tail) { FreeList_t<Chunk_t>::set_tail(tail); }
using FreeList<Chunk>::increment_count; size_t size() const { return FreeList_t<Chunk_t>::size(); }
NOT_PRODUCT(using FreeList<Chunk>::increment_returned_bytes_by;)
using FreeList<Chunk>::verify_chunk_in_free_list;
using FreeList<Chunk>::size;
// Accessors for links in tree. // Accessors for links in tree.
void set_left(TreeList<Chunk>* tl) { void set_left(TreeList<Chunk_t, FreeList_t>* tl) {
_left = tl; _left = tl;
if (tl != NULL) if (tl != NULL)
tl->set_parent(this); tl->set_parent(this);
} }
void set_right(TreeList<Chunk>* tl) { void set_right(TreeList<Chunk_t, FreeList_t>* tl) {
_right = tl; _right = tl;
if (tl != NULL) if (tl != NULL)
tl->set_parent(this); tl->set_parent(this);
} }
void set_parent(TreeList<Chunk>* tl) { _parent = tl; } void set_parent(TreeList<Chunk_t, FreeList_t>* tl) { _parent = tl; }
void clearLeft() { _left = NULL; } void clear_left() { _left = NULL; }
void clear_right() { _right = NULL; } void clear_right() { _right = NULL; }
void clear_parent() { _parent = NULL; } void clear_parent() { _parent = NULL; }
void initialize() { clearLeft(); clear_right(), clear_parent(); } void initialize() { clear_left(); clear_right(), clear_parent(); FreeList_t<Chunk_t>::initialize(); }
// For constructing a TreeList from a Tree chunk or // For constructing a TreeList from a Tree chunk or
// address and size. // address and size.
static TreeList<Chunk>* as_TreeList(TreeChunk<Chunk>* tc); TreeList();
static TreeList<Chunk>* as_TreeList(HeapWord* addr, size_t size); static TreeList<Chunk_t, FreeList_t>*
as_TreeList(TreeChunk<Chunk_t, FreeList_t>* tc);
static TreeList<Chunk_t, FreeList_t>* as_TreeList(HeapWord* addr, size_t size);
// Returns the head of the free list as a pointer to a TreeChunk. // Returns the head of the free list as a pointer to a TreeChunk.
TreeChunk<Chunk>* head_as_TreeChunk(); TreeChunk<Chunk_t, FreeList_t>* head_as_TreeChunk();
// Returns the first available chunk in the free list as a pointer // Returns the first available chunk in the free list as a pointer
// to a TreeChunk. // to a TreeChunk.
TreeChunk<Chunk>* first_available(); TreeChunk<Chunk_t, FreeList_t>* first_available();
// Returns the block with the largest heap address amongst // Returns the block with the largest heap address amongst
// those in the list for this size; potentially slow and expensive, // those in the list for this size; potentially slow and expensive,
// use with caution! // use with caution!
TreeChunk<Chunk>* largest_address(); TreeChunk<Chunk_t, FreeList_t>* largest_address();
TreeList<Chunk_t, FreeList_t>* get_better_list(
BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary);
// remove_chunk_replace_if_needed() removes the given "tc" from the TreeList. // remove_chunk_replace_if_needed() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the // If "tc" is the first chunk in the list, it is also the
...@@ -115,10 +116,10 @@ class TreeList: public FreeList<Chunk> { ...@@ -115,10 +116,10 @@ class TreeList: public FreeList<Chunk> {
// returns the possibly replaced TreeList* for the node in // returns the possibly replaced TreeList* for the node in
// the tree. It also updates the parent of the original // the tree. It also updates the parent of the original
// node to point to the new node. // node to point to the new node.
TreeList<Chunk>* remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc); TreeList<Chunk_t, FreeList_t>* remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc);
// See FreeList. // See FreeList.
void return_chunk_at_head(TreeChunk<Chunk>* tc); void return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* tc);
void return_chunk_at_tail(TreeChunk<Chunk>* tc); void return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* tc);
}; };
// A TreeChunk is a subclass of a Chunk that additionally // A TreeChunk is a subclass of a Chunk that additionally
...@@ -134,52 +135,54 @@ class TreeList: public FreeList<Chunk> { ...@@ -134,52 +135,54 @@ class TreeList: public FreeList<Chunk> {
// on the free list for a node in the tree and is only removed if // on the free list for a node in the tree and is only removed if
// it is the last chunk on the free list. // it is the last chunk on the free list.
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class TreeChunk : public Chunk { class TreeChunk : public Chunk_t {
friend class TreeList<Chunk>; friend class TreeList<Chunk_t, FreeList_t>;
TreeList<Chunk>* _list; TreeList<Chunk_t, FreeList_t>* _list;
TreeList<Chunk> _embedded_list; // if non-null, this chunk is on _list TreeList<Chunk_t, FreeList_t> _embedded_list; // if non-null, this chunk is on _list
static size_t _min_tree_chunk_size;
protected: protected:
TreeList<Chunk>* embedded_list() const { return (TreeList<Chunk>*) &_embedded_list; } TreeList<Chunk_t, FreeList_t>* embedded_list() const { return (TreeList<Chunk_t, FreeList_t>*) &_embedded_list; }
void set_embedded_list(TreeList<Chunk>* v) { _embedded_list = *v; } void set_embedded_list(TreeList<Chunk_t, FreeList_t>* v) { _embedded_list = *v; }
public: public:
TreeList<Chunk>* list() { return _list; } TreeList<Chunk_t, FreeList_t>* list() { return _list; }
void set_list(TreeList<Chunk>* v) { _list = v; } void set_list(TreeList<Chunk_t, FreeList_t>* v) { _list = v; }
static TreeChunk<Chunk>* as_TreeChunk(Chunk* fc); static TreeChunk<Chunk_t, FreeList_t>* as_TreeChunk(Chunk_t* fc);
// Initialize fields in a TreeChunk that should be // Initialize fields in a TreeChunk that should be
// initialized when the TreeChunk is being added to // initialized when the TreeChunk is being added to
// a free list in the tree. // a free list in the tree.
void initialize() { embedded_list()->initialize(); } void initialize() { embedded_list()->initialize(); }
Chunk* next() const { return Chunk::next(); } Chunk_t* next() const { return Chunk_t::next(); }
Chunk* prev() const { return Chunk::prev(); } Chunk_t* prev() const { return Chunk_t::prev(); }
size_t size() const volatile { return Chunk::size(); } size_t size() const volatile { return Chunk_t::size(); }
static size_t min_size() {
return _min_tree_chunk_size;
}
// debugging // debugging
void verify_tree_chunk_list() const; void verify_tree_chunk_list() const;
void assert_is_mangled() const;
}; };
template <class Chunk> template <class Chunk_t, template <class> class FreeList_t>
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
friend class VMStructs; friend class VMStructs;
bool _splay;
bool _adaptive_freelists;
size_t _total_size; size_t _total_size;
size_t _total_free_blocks; size_t _total_free_blocks;
TreeList<Chunk>* _root; TreeList<Chunk_t, FreeList_t>* _root;
// private accessors // private accessors
bool splay() const { return _splay; }
void set_splay(bool v) { _splay = v; }
void set_total_size(size_t v) { _total_size = v; } void set_total_size(size_t v) { _total_size = v; }
virtual void inc_total_size(size_t v); virtual void inc_total_size(size_t v);
virtual void dec_total_size(size_t v); virtual void dec_total_size(size_t v);
size_t total_free_blocks() const { return _total_free_blocks; }
void set_total_free_blocks(size_t v) { _total_free_blocks = v; } void set_total_free_blocks(size_t v) { _total_free_blocks = v; }
TreeList<Chunk>* root() const { return _root; } TreeList<Chunk_t, FreeList_t>* root() const { return _root; }
void set_root(TreeList<Chunk>* v) { _root = v; } void set_root(TreeList<Chunk_t, FreeList_t>* v) { _root = v; }
bool adaptive_freelists() { return _adaptive_freelists; }
// This field is added and can be set to point to the // This field is added and can be set to point to the
// the Mutex used to synchronize access to the // the Mutex used to synchronize access to the
...@@ -191,54 +194,55 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -191,54 +194,55 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// return it. If the chunk // return it. If the chunk
// is the last chunk of that size, remove the node for that size // is the last chunk of that size, remove the node for that size
// from the tree. // from the tree.
TreeChunk<Chunk>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay); TreeChunk<Chunk_t, FreeList_t>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither);
// Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList<Chunk>* find_list (size_t size) const;
// Remove this chunk from the tree. If the removal results // Remove this chunk from the tree. If the removal results
// in an empty list in the tree, remove the empty list. // in an empty list in the tree, remove the empty list.
TreeChunk<Chunk>* remove_chunk_from_tree(TreeChunk<Chunk>* tc); TreeChunk<Chunk_t, FreeList_t>* remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc);
// Remove the node in the trees starting at tl that has the // Remove the node in the trees starting at tl that has the
// minimum value and return it. Repair the tree as needed. // minimum value and return it. Repair the tree as needed.
TreeList<Chunk>* remove_tree_minimum(TreeList<Chunk>* tl); TreeList<Chunk_t, FreeList_t>* remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl);
void semi_splay_step(TreeList<Chunk>* tl);
// Add this free chunk to the tree. // Add this free chunk to the tree.
void insert_chunk_in_tree(Chunk* freeChunk); void insert_chunk_in_tree(Chunk_t* freeChunk);
public: public:
static const size_t min_tree_chunk_size = sizeof(TreeChunk<Chunk>)/HeapWordSize; // Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList<Chunk_t, FreeList_t>* find_list (size_t size) const;
void verify_tree() const; void verify_tree() const;
// verify that the given chunk is in the tree. // verify that the given chunk is in the tree.
bool verify_chunk_in_free_list(Chunk* tc) const; bool verify_chunk_in_free_list(Chunk_t* tc) const;
private: private:
void verify_tree_helper(TreeList<Chunk>* tl) const; void verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
static size_t verify_prev_free_ptrs(TreeList<Chunk>* tl); static size_t verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl);
// Returns the total number of chunks in the list. // Returns the total number of chunks in the list.
size_t total_list_length(TreeList<Chunk>* tl) const; size_t total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const;
// Returns the total number of words in the chunks in the tree // Returns the total number of words in the chunks in the tree
// starting at "tl". // starting at "tl".
size_t total_size_in_tree(TreeList<Chunk>* tl) const; size_t total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
// Returns the sum of the square of the size of each block // Returns the sum of the square of the size of each block
// in the tree starting at "tl". // in the tree starting at "tl".
double sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const; double sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const;
// Returns the total number of free blocks in the tree starting // Returns the total number of free blocks in the tree starting
// at "tl". // at "tl".
size_t total_free_blocks_in_tree(TreeList<Chunk>* tl) const; size_t total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t num_free_blocks() const; size_t num_free_blocks() const;
size_t treeHeight() const; size_t tree_height() const;
size_t tree_height_helper(TreeList<Chunk>* tl) const; size_t tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t total_nodes_in_tree(TreeList<Chunk>* tl) const; size_t total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t total_nodes_helper(TreeList<Chunk>* tl) const; size_t total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
public: public:
// Constructor // Constructor
BinaryTreeDictionary(bool adaptive_freelists, bool splay = false); BinaryTreeDictionary() :
BinaryTreeDictionary(MemRegion mr, bool adaptive_freelists, bool splay = false); _total_size(0), _total_free_blocks(0), _root(0) {}
BinaryTreeDictionary(MemRegion mr);
// Public accessors // Public accessors
size_t total_size() const { return _total_size; } size_t total_size() const { return _total_size; }
size_t total_free_blocks() const { return _total_free_blocks; }
// Reset the dictionary to the initial conditions with // Reset the dictionary to the initial conditions with
// a single free chunk. // a single free chunk.
...@@ -249,23 +253,24 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -249,23 +253,24 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// Return a chunk of size "size" or greater from // Return a chunk of size "size" or greater from
// the tree. // the tree.
// want a better dynamic splay strategy for the future. Chunk_t* get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
Chunk* get_chunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) { FreeBlockDictionary<Chunk_t>::verify_par_locked();
FreeBlockDictionary<Chunk>::verify_par_locked(); Chunk_t* res = get_chunk_from_tree(size, dither);
Chunk* res = get_chunk_from_tree(size, dither, splay());
assert(res == NULL || res->is_free(), assert(res == NULL || res->is_free(),
"Should be returning a free chunk"); "Should be returning a free chunk");
assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
res->size() == size, "Not correct size");
return res; return res;
} }
void return_chunk(Chunk* chunk) { void return_chunk(Chunk_t* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked(); FreeBlockDictionary<Chunk_t>::verify_par_locked();
insert_chunk_in_tree(chunk); insert_chunk_in_tree(chunk);
} }
void remove_chunk(Chunk* chunk) { void remove_chunk(Chunk_t* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked(); FreeBlockDictionary<Chunk_t>::verify_par_locked();
remove_chunk_from_tree((TreeChunk<Chunk>*)chunk); remove_chunk_from_tree((TreeChunk<Chunk_t, FreeList_t>*)chunk);
assert(chunk->is_free(), "Should still be a free chunk"); assert(chunk->is_free(), "Should still be a free chunk");
} }
...@@ -281,19 +286,19 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -281,19 +286,19 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
} }
size_t min_size() const { size_t min_size() const {
return min_tree_chunk_size; return TreeChunk<Chunk_t, FreeList_t>::min_size();
} }
double sum_of_squared_block_sizes() const { double sum_of_squared_block_sizes() const {
return sum_of_squared_block_sizes(root()); return sum_of_squared_block_sizes(root());
} }
Chunk* find_chunk_ends_at(HeapWord* target) const; Chunk_t* find_chunk_ends_at(HeapWord* target) const;
// Find the list with size "size" in the binary tree and update // Find the list with size "size" in the binary tree and update
// the statistics in the list according to "split" (chunk was // the statistics in the list according to "split" (chunk was
// split or coalesce) and "birth" (chunk was added or removed). // split or coalesce) and "birth" (chunk was added or removed).
void dict_census_udpate(size_t size, bool split, bool birth); void dict_census_update(size_t size, bool split, bool birth);
// Return true if the dictionary is overpopulated (more chunks of // Return true if the dictionary is overpopulated (more chunks of
// this size than desired) for size "size". // this size than desired) for size "size".
bool coal_dict_over_populated(size_t size); bool coal_dict_over_populated(size_t size);
...@@ -307,7 +312,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> { ...@@ -307,7 +312,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// statistics for the sweep. // statistics for the sweep.
void end_sweep_dict_census(double splitSurplusPercent); void end_sweep_dict_census(double splitSurplusPercent);
// Return the largest free chunk in the tree. // Return the largest free chunk in the tree.
Chunk* find_largest_dict() const; Chunk_t* find_largest_dict() const;
// Accessors for statistics // Accessors for statistics
void set_tree_surplus(double splitSurplusPercent); void set_tree_surplus(double splitSurplusPercent);
void set_tree_hints(void); void set_tree_hints(void);
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC #endif // SERIALGC
#include "memory/freeBlockDictionary.hpp" #include "memory/freeBlockDictionary.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#ifdef TARGET_OS_FAMILY_linux #ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp" # include "thread_linux.inline.hpp"
#endif #endif
...@@ -62,6 +64,9 @@ template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() cons ...@@ -62,6 +64,9 @@ template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() cons
} }
#endif #endif
template class FreeBlockDictionary<Metablock>;
template class FreeBlockDictionary<Metachunk>;
#ifndef SERIALGC #ifndef SERIALGC
// Explicitly instantiate for FreeChunk // Explicitly instantiate for FreeChunk
template class FreeBlockDictionary<FreeChunk>; template class FreeBlockDictionary<FreeChunk>;
......
...@@ -66,7 +66,7 @@ class FreeBlockDictionary: public CHeapObj<mtGC> { ...@@ -66,7 +66,7 @@ class FreeBlockDictionary: public CHeapObj<mtGC> {
virtual void reset(HeapWord* addr, size_t size) = 0; virtual void reset(HeapWord* addr, size_t size) = 0;
virtual void reset() = 0; virtual void reset() = 0;
virtual void dict_census_udpate(size_t size, bool split, bool birth) = 0; virtual void dict_census_update(size_t size, bool split, bool birth) = 0;
virtual bool coal_dict_over_populated(size_t size) = 0; virtual bool coal_dict_over_populated(size_t size) = 0;
virtual void begin_sweep_dict_census(double coalSurplusPercent, virtual void begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current, float inter_sweep_estimate, float inter_sweep_current, float inter_sweep_estimate,
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "memory/freeBlockDictionary.hpp" #include "memory/freeBlockDictionary.hpp"
#include "memory/freeList.hpp" #include "memory/freeList.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#include "memory/sharedHeap.hpp" #include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
...@@ -49,8 +51,6 @@ FreeList<Chunk>::FreeList() : ...@@ -49,8 +51,6 @@ FreeList<Chunk>::FreeList() :
{ {
_size = 0; _size = 0;
_count = 0; _count = 0;
_hint = 0;
init_statistics();
} }
template <class Chunk> template <class Chunk>
...@@ -62,34 +62,50 @@ FreeList<Chunk>::FreeList(Chunk* fc) : ...@@ -62,34 +62,50 @@ FreeList<Chunk>::FreeList(Chunk* fc) :
{ {
_size = fc->size(); _size = fc->size();
_count = 1; _count = 1;
_hint = 0;
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
} }
template <class Chunk> template <class Chunk>
void FreeList<Chunk>::reset(size_t hint) { void FreeList<Chunk>::link_head(Chunk* v) {
assert_proper_lock_protection();
set_head(v);
// If this method is not used (just set the head instead),
// this check can be avoided.
if (v != NULL) {
v->link_prev(NULL);
}
}
template <class Chunk>
void FreeList<Chunk>::reset() {
// Don't set the _size to 0 because this method is
// used with a existing list that has a size but which has
// been emptied.
// Don't clear the _protecting_lock of an existing list.
set_count(0); set_count(0);
set_head(NULL); set_head(NULL);
set_tail(NULL); set_tail(NULL);
set_hint(hint);
} }
template <class Chunk> template <class Chunk>
void FreeList<Chunk>::init_statistics(bool split_birth) { void FreeList<Chunk>::initialize() {
_allocation_stats.initialize(split_birth); #ifdef ASSERT
// Needed early because it might be checked in other initializing code.
set_protecting_lock(NULL);
#endif
reset();
set_size(0);
} }
template <class Chunk> template <class Chunk_t>
Chunk* FreeList<Chunk>::get_chunk_at_head() { Chunk_t* FreeList<Chunk_t>::get_chunk_at_head() {
assert_proper_lock_protection(); assert_proper_lock_protection();
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
Chunk* fc = head(); Chunk_t* fc = head();
if (fc != NULL) { if (fc != NULL) {
Chunk* nextFC = fc->next(); Chunk_t* nextFC = fc->next();
if (nextFC != NULL) { if (nextFC != NULL) {
// The chunk fc being removed has a "next". Set the "next" to the // The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc. // "prev" of fc.
...@@ -197,11 +213,6 @@ void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) { ...@@ -197,11 +213,6 @@ void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
link_tail(chunk); link_tail(chunk);
} }
increment_count(); // of # of chunks in list increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list"); assert(head() == NULL || head()->size() == size(), "wrong item on list");
...@@ -233,11 +244,6 @@ void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) { ...@@ -233,11 +244,6 @@ void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
} }
link_tail(chunk); link_tail(chunk);
increment_count(); // of # of chunks in list increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list"); assert(head() == NULL || head()->size() == size(), "wrong item on list");
...@@ -273,7 +279,7 @@ void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) { ...@@ -273,7 +279,7 @@ void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
} }
} }
// verify_chunk_in_free_list() is used to verify that an item is in this free list. // verify_chunk_in_free_lists() is used to verify that an item is in this free list.
// It is used as a debugging aid. // It is used as a debugging aid.
template <class Chunk> template <class Chunk>
bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const { bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
...@@ -293,41 +299,15 @@ bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const { ...@@ -293,41 +299,15 @@ bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
} }
#ifndef PRODUCT #ifndef PRODUCT
template <class Chunk>
void FreeList<Chunk>::verify_stats() const {
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
// stats beforehand because in the case of the large-block BT
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, _size, _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
template <class Chunk> template <class Chunk>
void FreeList<Chunk>::assert_proper_lock_protection_work() const { void FreeList<Chunk>::assert_proper_lock_protection_work() const {
assert(_protecting_lock != NULL, "Don't call this directly"); assert(protecting_lock() != NULL, "Don't call this directly");
assert(ParallelGCThreads > 0, "Don't call this directly"); assert(ParallelGCThreads > 0, "Don't call this directly");
Thread* thr = Thread::current(); Thread* thr = Thread::current();
if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) { if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
// assert that we are holding the freelist lock // assert that we are holding the freelist lock
} else if (thr->is_GC_task_thread()) { } else if (thr->is_GC_task_thread()) {
assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED"); assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
} else if (thr->is_Java_thread()) { } else if (thr->is_Java_thread()) {
assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing"); assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
} else { } else {
...@@ -350,21 +330,17 @@ void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) { ...@@ -350,21 +330,17 @@ void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) {
// to the call is a non-null string, it is printed in the first column; // to the call is a non-null string, it is printed in the first column;
// otherwise, if the argument is null (the default), then the size of the // otherwise, if the argument is null (the default), then the size of the
// (free list) block is printed in the first column. // (free list) block is printed in the first column.
template <class Chunk> template <class Chunk_t>
void FreeList<Chunk>::print_on(outputStream* st, const char* c) const { void FreeList<Chunk_t>::print_on(outputStream* st, const char* c) const {
if (c != NULL) { if (c != NULL) {
st->print("%16s", c); st->print("%16s", c);
} else { } else {
st->print(SIZE_FORMAT_W(16), size()); st->print(SIZE_FORMAT_W(16), size());
} }
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
} }
template class FreeList<Metablock>;
template class FreeList<Metachunk>;
#ifndef SERIALGC #ifndef SERIALGC
// Needs to be after the definitions have been seen.
template class FreeList<FreeChunk>; template class FreeList<FreeChunk>;
#endif // SERIALGC #endif // SERIALGC
...@@ -40,23 +40,19 @@ class CompactibleFreeListSpace; ...@@ -40,23 +40,19 @@ class CompactibleFreeListSpace;
// for that implementation. // for that implementation.
class Mutex; class Mutex;
template <class Chunk> class TreeList;
template <class Chunk> class PrintTreeCensusClosure;
template <class Chunk> template <class Chunk_t>
class FreeList VALUE_OBJ_CLASS_SPEC { class FreeList VALUE_OBJ_CLASS_SPEC {
friend class CompactibleFreeListSpace; friend class CompactibleFreeListSpace;
friend class VMStructs; friend class VMStructs;
friend class PrintTreeCensusClosure<Chunk>;
private: private:
Chunk* _head; // Head of list of free chunks Chunk_t* _head; // Head of list of free chunks
Chunk* _tail; // Tail of list of free chunks Chunk_t* _tail; // Tail of list of free chunks
size_t _size; // Size in Heap words of each chunk size_t _size; // Size in Heap words of each chunk
ssize_t _count; // Number of entries in list ssize_t _count; // Number of entries in list
size_t _hint; // next larger size list with a positive surplus
AllocationStats _allocation_stats; // allocation-related statistics protected:
#ifdef ASSERT #ifdef ASSERT
Mutex* _protecting_lock; Mutex* _protecting_lock;
...@@ -71,10 +67,6 @@ class FreeList VALUE_OBJ_CLASS_SPEC { ...@@ -71,10 +67,6 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
#endif #endif
} }
// Initialize the allocation statistics.
protected:
void init_statistics(bool split_birth = false);
void set_count(ssize_t v) { _count = v;}
void increment_count() { void increment_count() {
_count++; _count++;
} }
...@@ -89,52 +81,48 @@ class FreeList VALUE_OBJ_CLASS_SPEC { ...@@ -89,52 +81,48 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Construct a list without any entries. // Construct a list without any entries.
FreeList(); FreeList();
// Construct a list with "fc" as the first (and lone) entry in the list. // Construct a list with "fc" as the first (and lone) entry in the list.
FreeList(Chunk* fc); FreeList(Chunk_t* fc);
// Reset the head, tail, hint, and count of a free list. // Do initialization
void reset(size_t hint); void initialize();
// Reset the head, tail, and count of a free list.
void reset();
// Declare the current free list to be protected by the given lock. // Declare the current free list to be protected by the given lock.
#ifdef ASSERT #ifdef ASSERT
void set_protecting_lock(Mutex* protecting_lock) { Mutex* protecting_lock() const { return _protecting_lock; }
_protecting_lock = protecting_lock; void set_protecting_lock(Mutex* v) {
_protecting_lock = v;
} }
#endif #endif
// Accessors. // Accessors.
Chunk* head() const { Chunk_t* head() const {
assert_proper_lock_protection(); assert_proper_lock_protection();
return _head; return _head;
} }
void set_head(Chunk* v) { void set_head(Chunk_t* v) {
assert_proper_lock_protection(); assert_proper_lock_protection();
_head = v; _head = v;
assert(!_head || _head->size() == _size, "bad chunk size"); assert(!_head || _head->size() == _size, "bad chunk size");
} }
// Set the head of the list and set the prev field of non-null // Set the head of the list and set the prev field of non-null
// values to NULL. // values to NULL.
void link_head(Chunk* v) { void link_head(Chunk_t* v);
assert_proper_lock_protection();
set_head(v);
// If this method is not used (just set the head instead),
// this check can be avoided.
if (v != NULL) {
v->link_prev(NULL);
}
}
Chunk* tail() const { Chunk_t* tail() const {
assert_proper_lock_protection(); assert_proper_lock_protection();
return _tail; return _tail;
} }
void set_tail(Chunk* v) { void set_tail(Chunk_t* v) {
assert_proper_lock_protection(); assert_proper_lock_protection();
_tail = v; _tail = v;
assert(!_tail || _tail->size() == _size, "bad chunk size"); assert(!_tail || _tail->size() == _size, "bad chunk size");
} }
// Set the tail of the list and set the next field of non-null // Set the tail of the list and set the next field of non-null
// values to NULL. // values to NULL.
void link_tail(Chunk* v) { void link_tail(Chunk_t* v) {
assert_proper_lock_protection(); assert_proper_lock_protection();
set_tail(v); set_tail(v);
if (v != NULL) { if (v != NULL) {
...@@ -152,174 +140,45 @@ class FreeList VALUE_OBJ_CLASS_SPEC { ...@@ -152,174 +140,45 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
assert_proper_lock_protection(); assert_proper_lock_protection();
_size = v; _size = v;
} }
ssize_t count() const { ssize_t count() const { return _count; }
return _count; void set_count(ssize_t v) { _count = v;}
}
size_t hint() const {
return _hint;
}
void set_hint(size_t v) {
assert_proper_lock_protection();
assert(v == 0 || _size < v, "Bad hint"); _hint = v;
}
// Accessors for statistics
AllocationStats* allocation_stats() {
assert_proper_lock_protection();
return &_allocation_stats;
}
ssize_t desired() const {
return _allocation_stats.desired();
}
void set_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current,
float inter_sweep_estimate,
float intra_sweep_estimate) {
assert_proper_lock_protection();
_allocation_stats.compute_desired(_count,
inter_sweep_current,
inter_sweep_estimate,
intra_sweep_estimate);
}
ssize_t coal_desired() const {
return _allocation_stats.coal_desired();
}
void set_coal_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_desired(v);
}
ssize_t surplus() const {
return _allocation_stats.surplus();
}
void set_surplus(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_surplus(v);
}
void increment_surplus() {
assert_proper_lock_protection();
_allocation_stats.increment_surplus();
}
void decrement_surplus() {
assert_proper_lock_protection();
_allocation_stats.decrement_surplus();
}
ssize_t bfr_surp() const {
return _allocation_stats.bfr_surp();
}
void set_bfr_surp(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_bfr_surp(v);
}
ssize_t prev_sweep() const {
return _allocation_stats.prev_sweep();
}
void set_prev_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_prev_sweep(v);
}
ssize_t before_sweep() const {
return _allocation_stats.before_sweep();
}
void set_before_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_before_sweep(v);
}
ssize_t coal_births() const {
return _allocation_stats.coal_births();
}
void set_coal_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_births(v);
}
void increment_coal_births() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_births();
}
ssize_t coal_deaths() const {
return _allocation_stats.coal_deaths();
}
void set_coal_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_deaths(v);
}
void increment_coal_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_deaths();
}
ssize_t split_births() const {
return _allocation_stats.split_births();
}
void set_split_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_births(v);
}
void increment_split_births() {
assert_proper_lock_protection();
_allocation_stats.increment_split_births();
}
ssize_t split_deaths() const { size_t get_better_size() { return size(); }
return _allocation_stats.split_deaths();
}
void set_split_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_deaths(v);
}
void increment_split_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_split_deaths();
}
NOT_PRODUCT( size_t returned_bytes() const { ShouldNotReachHere(); return 0; }
// For debugging. The "_returned_bytes" in all the lists are summed void set_returned_bytes(size_t v) {}
// and compared with the total number of bytes swept during a void increment_returned_bytes_by(size_t v) {}
// collection.
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
void increment_returned_bytes_by(size_t v) {
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
}
)
// Unlink head of list and return it. Returns NULL if // Unlink head of list and return it. Returns NULL if
// the list is empty. // the list is empty.
Chunk* get_chunk_at_head(); Chunk_t* get_chunk_at_head();
// Remove the first "n" or "count", whichever is smaller, chunks from the // Remove the first "n" or "count", whichever is smaller, chunks from the
// list, setting "fl", which is required to be empty, to point to them. // list, setting "fl", which is required to be empty, to point to them.
void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl); void getFirstNChunksFromList(size_t n, FreeList<Chunk_t>* fl);
// Unlink this chunk from it's free list // Unlink this chunk from it's free list
void remove_chunk(Chunk* fc); void remove_chunk(Chunk_t* fc);
// Add this chunk to this free list. // Add this chunk to this free list.
void return_chunk_at_head(Chunk* fc); void return_chunk_at_head(Chunk_t* fc);
void return_chunk_at_tail(Chunk* fc); void return_chunk_at_tail(Chunk_t* fc);
// Similar to returnChunk* but also records some diagnostic // Similar to returnChunk* but also records some diagnostic
// information. // information.
void return_chunk_at_head(Chunk* fc, bool record_return); void return_chunk_at_head(Chunk_t* fc, bool record_return);
void return_chunk_at_tail(Chunk* fc, bool record_return); void return_chunk_at_tail(Chunk_t* fc, bool record_return);
// Prepend "fl" (whose size is required to be the same as that of "this") // Prepend "fl" (whose size is required to be the same as that of "this")
// to the front of "this" list. // to the front of "this" list.
void prepend(FreeList<Chunk>* fl); void prepend(FreeList<Chunk_t>* fl);
// Verify that the chunk is in the list. // Verify that the chunk is in the list.
// found. Return NULL if "fc" is not found. // found. Return NULL if "fc" is not found.
bool verify_chunk_in_free_list(Chunk* fc) const; bool verify_chunk_in_free_list(Chunk_t* fc) const;
// Stats verification // Stats verification
void verify_stats() const PRODUCT_RETURN; // void verify_stats() const { ShouldNotReachHere(); };
// Printing support // Printing support
static void print_labels_on(outputStream* st, const char* c); static void print_labels_on(outputStream* st, const char* c);
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
#define SHARE_VM_MEMORY_METABLOCK_HPP
// Metablock are the unit of allocation from a Chunk. It is initialized
// with the size of the requested allocation. That size is overwritten
// once the allocation returns.
//
// A Metablock may be reused by its SpaceManager but are never moved between
// SpaceManagers. There is no explicit link to the Metachunk
// from which it was allocated. Metablock may be deallocated and
// put on a freelist but the space is never freed, rather
// the Metachunk it is a part of will be deallocated when it's
// associated class loader is collected.
class Metablock VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
// Used to align the allocation (see below).
union block_t {
void* _data[3];
struct header_t {
size_t _word_size;
Metablock* _next;
Metablock* _prev;
} _header;
} _block;
static size_t _min_block_byte_size;
static size_t _overhead;
typedef union block_t Block;
typedef struct header_t Header;
const Block* block() const { return &_block; }
const Block::header_t* header() const { return &(block()->_header); }
public:
static Metablock* initialize(MetaWord* p, size_t word_size);
// This places the body of the block at a 2 word boundary
// because every block starts on a 2 word boundary. Work out
// how to make the body on a 2 word boundary if the block
// starts on a arbitrary boundary. JJJ
size_t word_size() const { return header()->_word_size; }
void set_word_size(size_t v) { _block._header._word_size = v; }
size_t size() const volatile { return _block._header._word_size; }
void set_size(size_t v) { _block._header._word_size = v; }
Metablock* next() const { return header()->_next; }
void set_next(Metablock* v) { _block._header._next = v; }
Metablock* prev() const { return header()->_prev; }
void set_prev(Metablock* v) { _block._header._prev = v; }
static size_t min_block_byte_size() { return _min_block_byte_size; }
static size_t overhead() { return _overhead; }
bool is_free() { return header()->_word_size != 0; }
void clear_next() { set_next(NULL); }
void link_prev(Metablock* ptr) { set_prev(ptr); }
uintptr_t* end() { return ((uintptr_t*) this) + size(); }
bool cantCoalesce() const { return false; }
void link_next(Metablock* ptr) { set_next(ptr); }
void link_after(Metablock* ptr){
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
// Should not be needed in a free list of Metablocks
void markNotFree() { ShouldNotReachHere(); }
// Debug support
#ifdef ASSERT
void* prev_addr() const { return (void*)&_block._header._prev; }
void* next_addr() const { return (void*)&_block._header._next; }
void* size_addr() const { return (void*)&_block._header._word_size; }
#endif
bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
bool verify_par_locked() { return true; }
void assert_is_mangled() const {/* Don't check "\*/}
};
#endif // SHARE_VM_MEMORY_METABLOCK_HPP
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METACHUNK_HPP
#define SHARE_VM_MEMORY_METACHUNK_HPP
// Metachunk - Quantum of allocation from a Virtualspace
// Metachunks are reused (when freed are put on a global freelist) and
// have no permanent association to a SpaceManager.
// +--------------+ <- end
// | | --+ ---+
// | | | free |
// | | | |
// | | | | capacity
// | | | |
// | | <- top --+ |
// | | ---+ |
// | | | used |
// | | | |
// | | | |
// +--------------+ <- bottom ---+ ---+
class Metachunk VALUE_OBJ_CLASS_SPEC {
// link to support lists of chunks
Metachunk* _next;
Metachunk* _prev;
MetaWord* _bottom;
MetaWord* _end;
MetaWord* _top;
size_t _word_size;
// Used in a guarantee() so included in the Product builds
// even through it is only for debugging.
bool _is_free;
// Metachunks are allocated out of a MetadataVirtualSpace and
// and use some of its space to describe itself (plus alignment
// considerations). Metadata is allocated in the rest of the chunk.
// This size is the overhead of maintaining the Metachunk within
// the space.
static size_t _overhead;
void set_bottom(MetaWord* v) { _bottom = v; }
void set_end(MetaWord* v) { _end = v; }
void set_top(MetaWord* v) { _top = v; }
void set_word_size(size_t v) { _word_size = v; }
public:
#ifdef ASSERT
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false) {}
#else
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL) {}
#endif
// Used to add a Metachunk to a list of Metachunks
void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
MetaWord* allocate(size_t word_size);
static Metachunk* initialize(MetaWord* ptr, size_t word_size);
// Accessors
Metachunk* next() const { return _next; }
Metachunk* prev() const { return _prev; }
MetaWord* bottom() const { return _bottom; }
MetaWord* end() const { return _end; }
MetaWord* top() const { return _top; }
size_t word_size() const { return _word_size; }
size_t size() const volatile { return _word_size; }
void set_size(size_t v) { _word_size = v; }
bool is_free() { return _is_free; }
void set_is_free(bool v) { _is_free = v; }
static size_t overhead() { return _overhead; }
void clear_next() { set_next(NULL); }
void link_prev(Metachunk* ptr) { set_prev(ptr); }
uintptr_t* end() { return ((uintptr_t*) this) + size(); }
bool cantCoalesce() const { return false; }
void link_next(Metachunk* ptr) { set_next(ptr); }
void link_after(Metachunk* ptr){
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
// Reset top to bottom so chunk can be reused.
void reset_empty() { _top = (_bottom + _overhead); }
bool is_empty() { return _top == (_bottom + _overhead); }
// used (has been allocated)
// free (available for future allocations)
// capacity (total size of chunk)
size_t used_word_size();
size_t free_word_size();
size_t capacity_word_size();
// Debug support
#ifdef ASSERT
void* prev_addr() const { return (void*)&_prev; }
void* next_addr() const { return (void*)&_next; }
void* size_addr() const { return (void*)&_word_size; }
#endif
bool verify_chunk_in_free_list(Metachunk* tc) const { return true; }
bool verify_par_locked() { return true; }
void assert_is_mangled() const {/* Don't check "\*/}
#ifdef ASSERT
void mangle();
#endif // ASSERT
void print_on(outputStream* st) const;
void verify();
};
#endif // SHARE_VM_MEMORY_METACHUNK_HPP
...@@ -24,9 +24,12 @@ ...@@ -24,9 +24,12 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.hpp"
#include "memory/binaryTreeDictionary.hpp" #include "memory/binaryTreeDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/collectorPolicy.hpp" #include "memory/collectorPolicy.hpp"
#include "memory/filemap.hpp" #include "memory/filemap.hpp"
#include "memory/freeList.hpp" #include "memory/freeList.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#include "memory/metaspace.hpp" #include "memory/metaspace.hpp"
#include "memory/metaspaceShared.hpp" #include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
...@@ -37,15 +40,8 @@ ...@@ -37,15 +40,8 @@
#include "utilities/copy.hpp" #include "utilities/copy.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
// Define this macro to deallocate Metablock. If not defined, typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
// blocks are not yet deallocated and are only mangled. typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
#undef DEALLOCATE_BLOCKS
// Easily recognizable patterns
// These patterns can be the same in 32bit or 64bit since
// they only have to be easily recognizable.
const void* metaspace_allocation_leader = (void*) 0X11111111;
const void* metaspace_allocation_trailer = (void*) 0X77777777;
// Parameters for stress mode testing // Parameters for stress mode testing
const uint metadata_deallocate_a_lot_block = 10; const uint metadata_deallocate_a_lot_block = 10;
...@@ -53,7 +49,6 @@ const uint metadata_deallocate_a_lock_chunk = 3; ...@@ -53,7 +49,6 @@ const uint metadata_deallocate_a_lock_chunk = 3;
size_t const allocation_from_dictionary_limit = 64 * K; size_t const allocation_from_dictionary_limit = 64 * K;
const size_t metadata_chunk_initialize = 0xf7f7f7f7; const size_t metadata_chunk_initialize = 0xf7f7f7f7;
const size_t metadata_deallocate = 0xf5f5f5f5; const size_t metadata_deallocate = 0xf5f5f5f5;
const size_t metadata_space_manager_allocate = 0xf3f3f3f3;
MetaWord* last_allocated = 0; MetaWord* last_allocated = 0;
...@@ -62,11 +57,12 @@ enum ChunkIndex { ...@@ -62,11 +57,12 @@ enum ChunkIndex {
SmallIndex = 0, SmallIndex = 0,
MediumIndex = 1, MediumIndex = 1,
HumongousIndex = 2, HumongousIndex = 2,
NumberOfFreeLists = 3 NumberOfFreeLists = 2,
NumberOfInUseLists = 3
}; };
static ChunkIndex next_chunk_index(ChunkIndex i) { static ChunkIndex next_chunk_index(ChunkIndex i) {
assert(i < NumberOfFreeLists, "Out of bound"); assert(i < NumberOfInUseLists, "Out of bound");
return (ChunkIndex) (i+1); return (ChunkIndex) (i+1);
} }
...@@ -100,164 +96,13 @@ bool MetaspaceGC::_should_concurrent_collect = false; ...@@ -100,164 +96,13 @@ bool MetaspaceGC::_should_concurrent_collect = false;
// the Chunk after the header for the Chunk) where as Metachunks // the Chunk after the header for the Chunk) where as Metachunks
// point to space in a VirtualSpace. To replace Metachunks with // point to space in a VirtualSpace. To replace Metachunks with
// Chunks, change Chunks so that they can be allocated out of a VirtualSpace. // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
// size_t Metablock::_min_block_byte_size = sizeof(Metablock);
// Metablock are the unit of allocation from a Chunk. It contains
// the size of the requested allocation in a debug build.
// Also in a debug build it has a marker before and after the
// body of the block. The address of the body is the address returned
// by the allocation.
//
// Layout in a debug build. In a product build only the body is present.
//
// +-----------+-----------+------------+ +-----------+
// | word size | leader | body | ... | trailer |
// +-----------+-----------+------------+ +-----------+
//
// A Metablock may be reused by its SpaceManager but are never moved between
// SpaceManagers. There is no explicit link to the Metachunk
// from which it was allocated. Metablock are not deallocated, rather
// the Metachunk it is a part of will be deallocated when it's
// associated class loader is collected.
//
// When the word size of a block is passed in to the deallocation
// call the word size no longer needs to be part of a Metablock.
class Metablock {
friend class VMStructs;
private:
// Used to align the allocation (see below) and for debugging.
#ifdef ASSERT
struct {
size_t _word_size;
void* _leader;
} _header;
void* _data[1];
#endif
static size_t _overhead;
#ifdef ASSERT
void set_word_size(size_t v) { _header._word_size = v; }
void* leader() { return _header._leader; }
void* trailer() {
jlong index = (jlong) _header._word_size - sizeof(_header)/BytesPerWord - 1;
assert(index > 0, err_msg("Bad indexling of trailer %d", index));
void** ptr = &_data[index];
return *ptr;
}
void set_leader(void* v) { _header._leader = v; }
void set_trailer(void* v) {
void** ptr = &_data[_header._word_size - sizeof(_header)/BytesPerWord - 1];
*ptr = v;
}
public:
size_t word_size() { return _header._word_size; }
#endif
public:
static Metablock* initialize(MetaWord* p, size_t word_size);
// This places the body of the block at a 2 word boundary
// because every block starts on a 2 word boundary. Work out
// how to make the body on a 2 word boundary if the block
// starts on a arbitrary boundary. JJJ
#ifdef ASSERT
MetaWord* data() { return (MetaWord*) &_data[0]; }
#else
MetaWord* data() { return (MetaWord*) this; }
#endif
static Metablock* metablock_from_data(MetaWord* p) {
#ifdef ASSERT #ifdef ASSERT
size_t word_offset = offset_of(Metablock, _data)/BytesPerWord; size_t Metablock::_overhead =
Metablock* result = (Metablock*) (p - word_offset); Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
return result;
#else #else
return (Metablock*) p; size_t Metablock::_overhead = 0;
#endif #endif
}
static size_t overhead() { return _overhead; }
void verify();
};
// Metachunk - Quantum of allocation from a Virtualspace
// Metachunks are reused (when freed are put on a global freelist) and
// have no permanent association to a SpaceManager.
// +--------------+ <- end
// | | --+ ---+
// | | | free |
// | | | |
// | | | | capacity
// | | | |
// | | <- top --+ |
// | | ---+ |
// | | | used |
// | | | |
// | | | |
// +--------------+ <- bottom ---+ ---+
class Metachunk VALUE_OBJ_CLASS_SPEC {
// link to support lists of chunks
Metachunk* _next;
MetaWord* _bottom;
MetaWord* _end;
MetaWord* _top;
size_t _word_size;
// Metachunks are allocated out of a MetadataVirtualSpace and
// and use some of its space to describe itself (plus alignment
// considerations). Metadata is allocated in the rest of the chunk.
// This size is the overhead of maintaining the Metachunk within
// the space.
static size_t _overhead;
void set_bottom(MetaWord* v) { _bottom = v; }
void set_end(MetaWord* v) { _end = v; }
void set_top(MetaWord* v) { _top = v; }
void set_word_size(size_t v) { _word_size = v; }
public:
// Used to add a Metachunk to a list of Metachunks
void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
Metablock* allocate(size_t word_size);
static Metachunk* initialize(MetaWord* ptr, size_t word_size);
// Accessors
Metachunk* next() const { return _next; }
MetaWord* bottom() const { return _bottom; }
MetaWord* end() const { return _end; }
MetaWord* top() const { return _top; }
size_t word_size() const { return _word_size; }
static size_t overhead() { return _overhead; }
// Reset top to bottom so chunk can be reused.
void reset_empty() { _top = (_bottom + _overhead); }
bool is_empty() { return _top == (_bottom + _overhead); }
// used (has been allocated)
// free (available for future allocations)
// capacity (total size of chunk)
size_t used_word_size();
size_t free_word_size();
size_t capacity_word_size();
#ifdef ASSERT
void mangle() {
// Mangle the payload of the chunk and not the links that
// maintain list of chunks.
HeapWord* start = (HeapWord*)(bottom() + overhead());
size_t word_size = capacity_word_size() - overhead();
Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
}
#endif // ASSERT
void print_on(outputStream* st) const;
void verify();
};
// Pointer to list of Metachunks. // Pointer to list of Metachunks.
...@@ -292,7 +137,10 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { ...@@ -292,7 +137,10 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
// SmallChunk // SmallChunk
// MediumChunk // MediumChunk
// HumongousChunk // HumongousChunk
ChunkList _free_chunks[3]; ChunkList _free_chunks[NumberOfFreeLists];
// HumongousChunk
ChunkTreeDictionary _humongous_dictionary;
// ChunkManager in all lists of this type // ChunkManager in all lists of this type
size_t _free_chunks_total; size_t _free_chunks_total;
...@@ -337,7 +185,9 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { ...@@ -337,7 +185,9 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
} }
ChunkList* free_medium_chunks() { return &_free_chunks[1]; } ChunkList* free_medium_chunks() { return &_free_chunks[1]; }
ChunkList* free_small_chunks() { return &_free_chunks[0]; } ChunkList* free_small_chunks() { return &_free_chunks[0]; }
ChunkList* free_humongous_chunks() { return &_free_chunks[2]; } ChunkTreeDictionary* humongous_dictionary() {
return &_humongous_dictionary;
}
ChunkList* free_chunks(ChunkIndex index); ChunkList* free_chunks(ChunkIndex index);
...@@ -356,41 +206,35 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { ...@@ -356,41 +206,35 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
void locked_print_free_chunks(outputStream* st); void locked_print_free_chunks(outputStream* st);
void locked_print_sum_free_chunks(outputStream* st); void locked_print_sum_free_chunks(outputStream* st);
void print_on(outputStream* st);
}; };
// Used to manage the free list of Metablocks (a block corresponds // Used to manage the free list of Metablocks (a block corresponds
// to the allocation of a quantum of metadata). // to the allocation of a quantum of metadata).
class BlockFreelist VALUE_OBJ_CLASS_SPEC { class BlockFreelist VALUE_OBJ_CLASS_SPEC {
#ifdef DEALLOCATE_BLOCKS BlockTreeDictionary* _dictionary;
BinaryTreeDictionary<Metablock>* _dictionary; static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
#endif
static Metablock* initialize_free_chunk(Metablock* block, size_t word_size);
#ifdef DEALLOCATE_BLOCKS
// Accessors // Accessors
BinaryTreeDictionary<Metablock>* dictionary() const { return _dictionary; } BlockTreeDictionary* dictionary() const { return _dictionary; }
#endif
public: public:
BlockFreelist(); BlockFreelist();
~BlockFreelist(); ~BlockFreelist();
// Get and return a block to the free list // Get and return a block to the free list
Metablock* get_block(size_t word_size); MetaWord* get_block(size_t word_size);
void return_block(Metablock* block, size_t word_size); void return_block(MetaWord* p, size_t word_size);
size_t totalSize() { size_t total_size() {
#ifdef DEALLOCATE_BLOCKS if (dictionary() == NULL) {
if (dictionary() == NULL) {
return 0;
} else {
return dictionary()->totalSize();
}
#else
return 0; return 0;
#endif } else {
return dictionary()->total_size();
} }
}
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
}; };
...@@ -600,7 +444,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -600,7 +444,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
}; };
}; };
class Metadebug : AllStatic { class Metadebug : AllStatic {
// Debugging support for Metaspaces // Debugging support for Metaspaces
static int _deallocate_block_a_lot_count; static int _deallocate_block_a_lot_count;
...@@ -655,7 +498,7 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -655,7 +498,7 @@ class SpaceManager : public CHeapObj<mtClass> {
// List of chunks in use by this SpaceManager. Allocations // List of chunks in use by this SpaceManager. Allocations
// are done from the current chunk. The list is used for deallocating // are done from the current chunk. The list is used for deallocating
// chunks when the SpaceManager is freed. // chunks when the SpaceManager is freed.
Metachunk* _chunks_in_use[NumberOfFreeLists]; Metachunk* _chunks_in_use[NumberOfInUseLists];
Metachunk* _current_chunk; Metachunk* _current_chunk;
// Virtual space where allocation comes from. // Virtual space where allocation comes from.
...@@ -700,24 +543,6 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -700,24 +543,6 @@ class SpaceManager : public CHeapObj<mtClass> {
// Add chunk to the list of chunks in use // Add chunk to the list of chunks in use
void add_chunk(Metachunk* v, bool make_current); void add_chunk(Metachunk* v, bool make_current);
// Debugging support
void verify_chunks_in_use_index(ChunkIndex index, Metachunk* v) {
switch (index) {
case 0:
assert(v->word_size() == SmallChunk, "Not a SmallChunk");
break;
case 1:
assert(v->word_size() == MediumChunk, "Not a MediumChunk");
break;
case 2:
assert(v->word_size() > MediumChunk, "Not a HumongousChunk");
break;
default:
assert(false, "Wrong list.");
}
}
protected:
Mutex* lock() const { return _lock; } Mutex* lock() const { return _lock; }
public: public:
...@@ -751,10 +576,10 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -751,10 +576,10 @@ class SpaceManager : public CHeapObj<mtClass> {
MetaWord* allocate(size_t word_size); MetaWord* allocate(size_t word_size);
// Helper for allocations // Helper for allocations
Metablock* allocate_work(size_t word_size); MetaWord* allocate_work(size_t word_size);
// Returns a block to the per manager freelist // Returns a block to the per manager freelist
void deallocate(MetaWord* p); void deallocate(MetaWord* p, size_t word_size);
// Based on the allocation size and a minimum chunk size, // Based on the allocation size and a minimum chunk size,
// returned chunk size (for expanding space for chunk allocation). // returned chunk size (for expanding space for chunk allocation).
...@@ -763,7 +588,7 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -763,7 +588,7 @@ class SpaceManager : public CHeapObj<mtClass> {
// Called when an allocation from the current chunk fails. // Called when an allocation from the current chunk fails.
// Gets a new chunk (may require getting a new virtual space), // Gets a new chunk (may require getting a new virtual space),
// and allocates from that chunk. // and allocates from that chunk.
Metablock* grow_and_allocate(size_t word_size); MetaWord* grow_and_allocate(size_t word_size);
// debugging support. // debugging support.
...@@ -780,6 +605,8 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -780,6 +605,8 @@ class SpaceManager : public CHeapObj<mtClass> {
uint const SpaceManager::_small_chunk_limit = 4; uint const SpaceManager::_small_chunk_limit = 4;
const char* SpaceManager::_expand_lock_name = const char* SpaceManager::_expand_lock_name =
"SpaceManager chunk allocation lock"; "SpaceManager chunk allocation lock";
const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1;
...@@ -788,39 +615,26 @@ Mutex* const SpaceManager::_expand_lock = ...@@ -788,39 +615,26 @@ Mutex* const SpaceManager::_expand_lock =
SpaceManager::_expand_lock_name, SpaceManager::_expand_lock_name,
Mutex::_allow_vm_block_flag); Mutex::_allow_vm_block_flag);
#ifdef ASSERT
size_t Metablock::_overhead =
Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
#else
size_t Metablock::_overhead = 0;
#endif
size_t Metachunk::_overhead = size_t Metachunk::_overhead =
Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord; Chunk::aligned_overhead_size(sizeof(Metachunk)) / BytesPerWord;
// New blocks returned by the Metaspace are zero initialized. // New blocks returned by the Metaspace are zero initialized.
// We should fix the constructors to not assume this instead. // We should fix the constructors to not assume this instead.
Metablock* Metablock::initialize(MetaWord* p, size_t word_size) { Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
if (p == NULL) {
return NULL;
}
Metablock* result = (Metablock*) p; Metablock* result = (Metablock*) p;
// Clear the memory // Clear the memory
Copy::fill_to_aligned_words((HeapWord*)result, word_size); Copy::fill_to_aligned_words((HeapWord*)result, word_size);
#ifdef ASSERT #ifdef ASSERT
result->set_word_size(word_size); result->set_word_size(word_size);
// Check after work size is set.
result->set_leader((void*) metaspace_allocation_leader);
result->set_trailer((void*) metaspace_allocation_trailer);
#endif #endif
return result; return result;
} }
void Metablock::verify() {
#ifdef ASSERT
assert(leader() == metaspace_allocation_leader &&
trailer() == metaspace_allocation_trailer,
"block has been corrupted");
#endif
}
// Metachunk methods // Metachunk methods
Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) { Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
...@@ -843,18 +657,13 @@ Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) { ...@@ -843,18 +657,13 @@ Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
} }
Metablock* Metachunk::allocate(size_t word_size) { MetaWord* Metachunk::allocate(size_t word_size) {
Metablock* result = NULL; MetaWord* result = NULL;
// If available, bump the pointer to allocate. // If available, bump the pointer to allocate.
if (free_word_size() >= word_size) { if (free_word_size() >= word_size) {
result = Metablock::initialize(_top, word_size); result = _top;
_top = _top + word_size; _top = _top + word_size;
} }
#ifdef ASSERT
assert(result == NULL ||
result->word_size() == word_size,
"Block size is not set correctly");
#endif
return result; return result;
} }
...@@ -878,103 +687,85 @@ void Metachunk::print_on(outputStream* st) const { ...@@ -878,103 +687,85 @@ void Metachunk::print_on(outputStream* st) const {
bottom(), top(), end(), word_size()); bottom(), top(), end(), word_size());
} }
#ifdef ASSERT
void Metachunk::mangle() {
// Mangle the payload of the chunk and not the links that
// maintain list of chunks.
HeapWord* start = (HeapWord*)(bottom() + overhead());
size_t word_size = capacity_word_size() - overhead();
Copy::fill_to_words(start, word_size, metadata_chunk_initialize);
}
#endif // ASSERT
void Metachunk::verify() { void Metachunk::verify() {
#ifdef ASSERT #ifdef ASSERT
// Cannot walk through the blocks unless the blocks have // Cannot walk through the blocks unless the blocks have
// headers with sizes. // headers with sizes.
MetaWord* curr = bottom() + overhead(); assert(_bottom <= _top &&
while (curr < top()) { _top <= _end,
Metablock* block = (Metablock*) curr; "Chunk has been smashed");
size_t word_size = block->word_size(); assert(SpaceManager::is_humongous(_word_size) ||
block->verify(); _word_size == SpaceManager::MediumChunk ||
curr = curr + word_size; _word_size == SpaceManager::SmallChunk,
} "Chunk size is wrong");
#endif #endif
return; return;
} }
// BlockFreelist methods // BlockFreelist methods
#ifdef DEALLOCATE_BLOCKS
BlockFreelist::BlockFreelist() : _dictionary(NULL) {} BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
#else
BlockFreelist::BlockFreelist() {}
#endif
BlockFreelist::~BlockFreelist() { BlockFreelist::~BlockFreelist() {
#ifdef DEALLOCATE_BLOCKS
if (_dictionary != NULL) { if (_dictionary != NULL) {
if (Verbose && TraceMetadataChunkAllocation) { if (Verbose && TraceMetadataChunkAllocation) {
_dictionary->print_free_lists(gclog_or_tty); _dictionary->print_free_lists(gclog_or_tty);
} }
delete _dictionary; delete _dictionary;
} }
#endif
} }
Metablock* BlockFreelist::initialize_free_chunk(Metablock* block, size_t word_size) { Metablock* BlockFreelist::initialize_free_chunk(MetaWord* p, size_t word_size) {
#ifdef DEALLOCATE_BLOCKS Metablock* block = (Metablock*) p;
#ifdef ASSERT block->set_word_size(word_size);
assert(word_size = block->word_size(), "Wrong chunk size"); block->set_prev(NULL);
#endif block->set_next(NULL);
Metablock* result = block;
result->setSize(word_size);
result->linkPrev(NULL);
result->linkNext(NULL);
return result;
#else
ShouldNotReachHere();
return block; return block;
#endif
} }
void BlockFreelist::return_block(Metablock* block, size_t word_size) { void BlockFreelist::return_block(MetaWord* p, size_t word_size) {
#ifdef ASSERT Metablock* free_chunk = initialize_free_chunk(p, word_size);
assert(word_size = block->word_size(), "Block size is wrong");;
#endif
Metablock* free_chunk = initialize_free_chunk(block, word_size);
#ifdef DEALLOCATE_BLOCKS
if (dictionary() == NULL) { if (dictionary() == NULL) {
_dictionary = new BinaryTreeDictionary<Metablock>(false /* adaptive_freelists */); _dictionary = new BlockTreeDictionary();
} }
dictionary()->returnChunk(free_chunk); dictionary()->return_chunk(free_chunk);
#endif
} }
Metablock* BlockFreelist::get_block(size_t word_size) { MetaWord* BlockFreelist::get_block(size_t word_size) {
#ifdef DEALLOCATE_BLOCKS
if (dictionary() == NULL) { if (dictionary() == NULL) {
return NULL; return NULL;
} }
Metablock* free_chunk = if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
dictionary()->getChunk(word_size, FreeBlockDictionary<Metablock>::exactly); // Dark matter. Too small for dictionary.
#else
Metablock* free_chunk = NULL;
#endif
if (free_chunk == NULL) {
return NULL; return NULL;
} }
assert(free_chunk->word_size() == word_size, "Size of chunk is incorrect");
Metablock* block = Metablock::initialize((MetaWord*) free_chunk, word_size);
#ifdef ASSERT
assert(block->word_size() == word_size, "Block size is not set correctly");
#endif
return block; Metablock* free_block =
dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
if (free_block == NULL) {
return NULL;
}
return (MetaWord*) free_block;
} }
void BlockFreelist::print_on(outputStream* st) const { void BlockFreelist::print_on(outputStream* st) const {
#ifdef DEALLOCATE_BLOCKS
if (dictionary() == NULL) { if (dictionary() == NULL) {
return; return;
} }
dictionary()->print_free_lists(st); dictionary()->print_free_lists(st);
#else
return;
#endif
} }
// VirtualSpaceNode methods // VirtualSpaceNode methods
...@@ -1597,14 +1388,11 @@ void Metadebug::deallocate_block_a_lot(SpaceManager* sm, ...@@ -1597,14 +1388,11 @@ void Metadebug::deallocate_block_a_lot(SpaceManager* sm,
Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) { Metadebug::deallocate_block_a_lot_count() % MetaDataDeallocateALotInterval == 0 ) {
Metadebug::set_deallocate_block_a_lot_count(0); Metadebug::set_deallocate_block_a_lot_count(0);
for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) { for (uint i = 0; i < metadata_deallocate_a_lot_block; i++) {
Metablock* dummy_block = sm->allocate_work(raw_word_size); MetaWord* dummy_block = sm->allocate_work(raw_word_size);
if (dummy_block == 0) { if (dummy_block == 0) {
break; break;
} }
#ifdef ASSERT sm->deallocate(dummy_block, raw_word_size);
assert(dummy_block->word_size() == raw_word_size, "Block size is not set correctly");
#endif
sm->deallocate(dummy_block->data());
} }
} else { } else {
Metadebug::inc_deallocate_block_a_lot_count(); Metadebug::inc_deallocate_block_a_lot_count();
...@@ -1784,8 +1572,8 @@ void ChunkManager::verify() { ...@@ -1784,8 +1572,8 @@ void ChunkManager::verify() {
} }
void ChunkManager::locked_verify() { void ChunkManager::locked_verify() {
locked_verify_free_chunks_total();
locked_verify_free_chunks_count(); locked_verify_free_chunks_count();
locked_verify_free_chunks_total();
} }
void ChunkManager::locked_print_free_chunks(outputStream* st) { void ChunkManager::locked_print_free_chunks(outputStream* st) {
...@@ -1803,7 +1591,6 @@ ChunkList* ChunkManager::free_chunks(ChunkIndex index) { ...@@ -1803,7 +1591,6 @@ ChunkList* ChunkManager::free_chunks(ChunkIndex index) {
return &_free_chunks[index]; return &_free_chunks[index];
} }
// These methods that sum the free chunk lists are used in printing // These methods that sum the free chunk lists are used in printing
// methods that are used in product builds. // methods that are used in product builds.
size_t ChunkManager::sum_free_chunks() { size_t ChunkManager::sum_free_chunks() {
...@@ -1818,6 +1605,7 @@ size_t ChunkManager::sum_free_chunks() { ...@@ -1818,6 +1605,7 @@ size_t ChunkManager::sum_free_chunks() {
result = result + list->sum_list_capacity(); result = result + list->sum_list_capacity();
} }
result = result + humongous_dictionary()->total_size();
return result; return result;
} }
...@@ -1831,6 +1619,7 @@ size_t ChunkManager::sum_free_chunks_count() { ...@@ -1831,6 +1619,7 @@ size_t ChunkManager::sum_free_chunks_count() {
} }
count = count + list->sum_list_count(); count = count + list->sum_list_count();
} }
count = count + humongous_dictionary()->total_free_blocks();
return count; return count;
} }
...@@ -1875,23 +1664,24 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { ...@@ -1875,23 +1664,24 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
assert_lock_strong(SpaceManager::expand_lock()); assert_lock_strong(SpaceManager::expand_lock());
locked_verify(); locked_verify();
ChunkList* free_list = find_free_chunks_list(word_size);
assert(free_list != NULL, "Sanity check");
Metachunk* chunk = free_list->head(); Metachunk* chunk = NULL;
debug_only(Metachunk* debug_head = chunk;) if (!SpaceManager::is_humongous(word_size)) {
ChunkList* free_list = find_free_chunks_list(word_size);
assert(free_list != NULL, "Sanity check");
if (chunk == NULL) { chunk = free_list->head();
return NULL; debug_only(Metachunk* debug_head = chunk;)
}
if (chunk == NULL) {
return NULL;
}
Metachunk* prev_chunk = chunk;
if (chunk->word_size() == word_size) {
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
// Remove the chunk as the head of the list. // Remove the chunk as the head of the list.
free_list->set_head(chunk->next()); free_list->set_head(chunk->next());
chunk->set_next(NULL); chunk->set_next(NULL);
// Chunk has been removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
if (TraceMetadataChunkAllocation && Verbose) { if (TraceMetadataChunkAllocation && Verbose) {
tty->print_cr("ChunkManager::free_chunks_get: free_list " tty->print_cr("ChunkManager::free_chunks_get: free_list "
...@@ -1899,79 +1689,24 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { ...@@ -1899,79 +1689,24 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
free_list, chunk, chunk->word_size()); free_list, chunk, chunk->word_size());
} }
} else { } else {
assert(SpaceManager::is_humongous(word_size), chunk = humongous_dictionary()->get_chunk(
"Should only need to check humongous"); word_size,
// This code to find the best fit is just for purposes of FreeBlockDictionary<Metachunk>::atLeast);
// investigating the loss due to fragmentation on a humongous
// chunk. It will be replace by a binaryTreeDictionary for if (chunk != NULL) {
// the humongous chunks. if (TraceMetadataHumongousAllocation) {
uint count = 0; size_t waste = chunk->word_size() - word_size;
Metachunk* best_fit = NULL; tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
Metachunk* best_fit_prev = NULL; " for requested size " SIZE_FORMAT
while (chunk != NULL) { " waste " SIZE_FORMAT,
count++; chunk->word_size(), word_size, waste);
if (chunk->word_size() < word_size) {
prev_chunk = chunk;
chunk = chunk->next();
} else if (chunk->word_size() == word_size) {
break;
} else {
if (best_fit == NULL ||
best_fit->word_size() > chunk->word_size()) {
best_fit_prev = prev_chunk;
best_fit = chunk;
}
prev_chunk = chunk;
chunk = chunk->next();
} }
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
#ifdef ASSERT
chunk->set_is_free(false);
#endif
} }
if (chunk == NULL) {
prev_chunk = best_fit_prev;
chunk = best_fit;
}
if (chunk != NULL) {
if (TraceMetadataHumongousAllocation) {
size_t waste = chunk->word_size() - word_size;
tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
" for requested size " SIZE_FORMAT
" waste " SIZE_FORMAT
" found at " SIZE_FORMAT " of " SIZE_FORMAT,
chunk->word_size(), word_size, waste,
count, free_list->sum_list_count());
}
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
// Remove the chunk if it is at the head of the list.
if (chunk == free_list->head()) {
free_list->set_head(chunk->next());
if (TraceMetadataHumongousAllocation) {
tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
" new head " PTR_FORMAT,
free_list, chunk, chunk->word_size(),
free_list->head());
}
} else {
// Remove a chunk in the interior of the list
prev_chunk->set_next(chunk->next());
if (TraceMetadataHumongousAllocation) {
tty->print_cr("ChunkManager::free_chunks_get: humongous free_list "
PTR_FORMAT " chunk " PTR_FORMAT " size " SIZE_FORMAT
PTR_FORMAT " prev " PTR_FORMAT " next " PTR_FORMAT,
free_list, chunk, chunk->word_size(),
prev_chunk, chunk->next());
}
}
chunk->set_next(NULL);
} else {
if (TraceMetadataHumongousAllocation) {
tty->print_cr("ChunkManager::free_chunks_get: New humongous chunk of size "
SIZE_FORMAT,
word_size);
}
}
} }
locked_verify(); locked_verify();
return chunk; return chunk;
...@@ -2000,12 +1735,18 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { ...@@ -2000,12 +1735,18 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
return chunk; return chunk;
} }
void ChunkManager::print_on(outputStream* out) {
if (PrintFLSStatistics != 0) {
humongous_dictionary()->report_statistics();
}
}
// SpaceManager methods // SpaceManager methods
size_t SpaceManager::sum_free_in_chunks_in_use() const { size_t SpaceManager::sum_free_in_chunks_in_use() const {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
size_t free = 0; size_t free = 0;
for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
Metachunk* chunk = chunks_in_use(i); Metachunk* chunk = chunks_in_use(i);
while (chunk != NULL) { while (chunk != NULL) {
free += chunk->free_word_size(); free += chunk->free_word_size();
...@@ -2018,11 +1759,12 @@ size_t SpaceManager::sum_free_in_chunks_in_use() const { ...@@ -2018,11 +1759,12 @@ size_t SpaceManager::sum_free_in_chunks_in_use() const {
size_t SpaceManager::sum_waste_in_chunks_in_use() const { size_t SpaceManager::sum_waste_in_chunks_in_use() const {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
size_t result = 0; size_t result = 0;
for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
// Count the free space in all the chunk but not the
// current chunk from which allocations are still being done.
result += sum_waste_in_chunks_in_use(i); result += sum_waste_in_chunks_in_use(i);
} }
return result; return result;
} }
...@@ -2033,10 +1775,10 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { ...@@ -2033,10 +1775,10 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
// Count the free space in all the chunk but not the // Count the free space in all the chunk but not the
// current chunk from which allocations are still being done. // current chunk from which allocations are still being done.
if (chunk != NULL) { if (chunk != NULL) {
while (chunk != NULL) { Metachunk* prev = chunk;
if (chunk != current_chunk()) { while (chunk != NULL && chunk != current_chunk()) {
result += chunk->free_word_size(); result += chunk->free_word_size();
} prev = chunk;
chunk = chunk->next(); chunk = chunk->next();
count++; count++;
} }
...@@ -2047,7 +1789,7 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { ...@@ -2047,7 +1789,7 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const {
size_t SpaceManager::sum_capacity_in_chunks_in_use() const { size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
size_t sum = 0; size_t sum = 0;
for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
Metachunk* chunk = chunks_in_use(i); Metachunk* chunk = chunks_in_use(i);
while (chunk != NULL) { while (chunk != NULL) {
// Just changed this sum += chunk->capacity_word_size(); // Just changed this sum += chunk->capacity_word_size();
...@@ -2061,9 +1803,10 @@ size_t SpaceManager::sum_capacity_in_chunks_in_use() const { ...@@ -2061,9 +1803,10 @@ size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
size_t SpaceManager::sum_count_in_chunks_in_use() { size_t SpaceManager::sum_count_in_chunks_in_use() {
size_t count = 0; size_t count = 0;
for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
count = count + sum_count_in_chunks_in_use(i); count = count + sum_count_in_chunks_in_use(i);
} }
return count; return count;
} }
...@@ -2081,7 +1824,7 @@ size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { ...@@ -2081,7 +1824,7 @@ size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) {
size_t SpaceManager::sum_used_in_chunks_in_use() const { size_t SpaceManager::sum_used_in_chunks_in_use() const {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
size_t used = 0; size_t used = 0;
for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
Metachunk* chunk = chunks_in_use(i); Metachunk* chunk = chunks_in_use(i);
while (chunk != NULL) { while (chunk != NULL) {
used += chunk->used_word_size(); used += chunk->used_word_size();
...@@ -2139,15 +1882,13 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) { ...@@ -2139,15 +1882,13 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) {
gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size); gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size);
gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT, gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT,
chunk_word_size); chunk_word_size);
gclog_or_tty->print_cr(" block overhead " PTR_FORMAT gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT,
" chunk overhead " PTR_FORMAT,
Metablock::overhead(),
Metachunk::overhead()); Metachunk::overhead());
} }
return chunk_word_size; return chunk_word_size;
} }
Metablock* SpaceManager::grow_and_allocate(size_t word_size) { MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
assert(vs_list()->current_virtual_space() != NULL, assert(vs_list()->current_virtual_space() != NULL,
"Should have been set"); "Should have been set");
assert(current_chunk() == NULL || assert(current_chunk() == NULL ||
...@@ -2180,7 +1921,7 @@ Metablock* SpaceManager::grow_and_allocate(size_t word_size) { ...@@ -2180,7 +1921,7 @@ Metablock* SpaceManager::grow_and_allocate(size_t word_size) {
void SpaceManager::print_on(outputStream* st) const { void SpaceManager::print_on(outputStream* st) const {
for (ChunkIndex i = SmallIndex; for (ChunkIndex i = SmallIndex;
i < NumberOfFreeLists ; i < NumberOfInUseLists ;
i = next_chunk_index(i) ) { i = next_chunk_index(i) ) {
st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT, st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT,
chunks_in_use(i), chunks_in_use(i),
...@@ -2191,8 +1932,11 @@ void SpaceManager::print_on(outputStream* st) const { ...@@ -2191,8 +1932,11 @@ void SpaceManager::print_on(outputStream* st) const {
sum_waste_in_chunks_in_use(SmallIndex), sum_waste_in_chunks_in_use(SmallIndex),
sum_waste_in_chunks_in_use(MediumIndex), sum_waste_in_chunks_in_use(MediumIndex),
sum_waste_in_chunks_in_use(HumongousIndex)); sum_waste_in_chunks_in_use(HumongousIndex));
// Nothing in them yet // block free lists
// block_freelists()->print_on(st); if (block_freelists() != NULL) {
st->print_cr("total in block free lists " SIZE_FORMAT,
block_freelists()->total_size());
}
} }
SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) : SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) :
...@@ -2200,7 +1944,7 @@ SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) : ...@@ -2200,7 +1944,7 @@ SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) :
_allocation_total(0), _allocation_total(0),
_lock(lock) { _lock(lock) {
Metadebug::init_allocation_fail_alot_count(); Metadebug::init_allocation_fail_alot_count();
for (ChunkIndex i = SmallIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { for (ChunkIndex i = SmallIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
_chunks_in_use[i] = NULL; _chunks_in_use[i] = NULL;
} }
_current_chunk = NULL; _current_chunk = NULL;
...@@ -2262,22 +2006,24 @@ SpaceManager::~SpaceManager() { ...@@ -2262,22 +2006,24 @@ SpaceManager::~SpaceManager() {
// Humongous chunks are never the current chunk. // Humongous chunks are never the current chunk.
Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); Metachunk* humongous_chunks = chunks_in_use(HumongousIndex);
if (humongous_chunks != NULL) { while (humongous_chunks != NULL) {
chunk_manager->free_humongous_chunks()->add_at_head(humongous_chunks); #ifdef ASSERT
set_chunks_in_use(HumongousIndex, NULL); humongous_chunks->set_is_free(true);
#endif
Metachunk* next_humongous_chunks = humongous_chunks->next();
chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
humongous_chunks = next_humongous_chunks;
} }
set_chunks_in_use(HumongousIndex, NULL);
chunk_manager->locked_verify(); chunk_manager->locked_verify();
} }
void SpaceManager::deallocate(MetaWord* p) { void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
assert_lock_strong(_lock); assert_lock_strong(_lock);
ShouldNotReachHere(); // Where is this needed. size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
#ifdef DEALLOCATE_BLOCKS assert(word_size >= min_size,
Metablock* block = Metablock::metablock_from_data(p); err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
// This is expense but kept it until integration JJJ block_freelists()->return_block(p, word_size);
assert(contains((address)block), "Block does not belong to this metaspace");
block_freelists()->return_block(block, word_size);
#endif
} }
// Adds a chunk to the list of chunks in use. // Adds a chunk to the list of chunks in use.
...@@ -2366,50 +2112,40 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { ...@@ -2366,50 +2112,40 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
MetaWord* SpaceManager::allocate(size_t word_size) { MetaWord* SpaceManager::allocate(size_t word_size) {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
size_t block_overhead = Metablock::overhead();
// If only the dictionary is going to be used (i.e., no // If only the dictionary is going to be used (i.e., no
// indexed free list), then there is a minimum size requirement. // indexed free list), then there is a minimum size requirement.
// MinChunkSize is a placeholder for the real minimum size JJJ // MinChunkSize is a placeholder for the real minimum size JJJ
size_t byte_size_with_overhead = (word_size + block_overhead) * BytesPerWord; size_t byte_size = word_size * BytesPerWord;
#ifdef DEALLOCATE_BLOCKS
size_t raw_bytes_size = MAX2(ARENA_ALIGN(byte_size_with_overhead), size_t byte_size_with_overhead = byte_size + Metablock::overhead();
MinChunkSize * BytesPerWord);
#else size_t raw_bytes_size = MAX2(byte_size_with_overhead,
size_t raw_bytes_size = ARENA_ALIGN(byte_size_with_overhead); Metablock::min_block_byte_size());
#endif raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
size_t raw_word_size = raw_bytes_size / BytesPerWord; size_t raw_word_size = raw_bytes_size / BytesPerWord;
assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
BlockFreelist* fl = block_freelists(); BlockFreelist* fl = block_freelists();
Metablock* block = NULL; MetaWord* p = NULL;
// Allocation from the dictionary is expensive in the sense that // Allocation from the dictionary is expensive in the sense that
// the dictionary has to be searched for a size. Don't allocate // the dictionary has to be searched for a size. Don't allocate
// from the dictionary until it starts to get fat. Is this // from the dictionary until it starts to get fat. Is this
// a reasonable policy? Maybe an skinny dictionary is fast enough // a reasonable policy? Maybe an skinny dictionary is fast enough
// for allocations. Do some profiling. JJJ // for allocations. Do some profiling. JJJ
if (fl->totalSize() > allocation_from_dictionary_limit) { if (fl->total_size() > allocation_from_dictionary_limit) {
block = fl->get_block(raw_word_size); p = fl->get_block(raw_word_size);
} }
if (block == NULL) { if (p == NULL) {
block = allocate_work(raw_word_size); p = allocate_work(raw_word_size);
if (block == NULL) {
return NULL;
}
} }
Metadebug::deallocate_block_a_lot(this, raw_word_size); Metadebug::deallocate_block_a_lot(this, raw_word_size);
// Push the allocation past the word containing the size and leader. return p;
#ifdef ASSERT
MetaWord* result = block->data();
return result;
#else
return (MetaWord*) block;
#endif
} }
// Returns the address of spaced allocated for "word_size". // Returns the address of spaced allocated for "word_size".
// This methods does not know about blocks (Metablocks) // This methods does not know about blocks (Metablocks)
Metablock* SpaceManager::allocate_work(size_t word_size) { MetaWord* SpaceManager::allocate_work(size_t word_size) {
assert_lock_strong(_lock); assert_lock_strong(_lock);
#ifdef ASSERT #ifdef ASSERT
if (Metadebug::test_metadata_failure()) { if (Metadebug::test_metadata_failure()) {
...@@ -2417,7 +2153,7 @@ Metablock* SpaceManager::allocate_work(size_t word_size) { ...@@ -2417,7 +2153,7 @@ Metablock* SpaceManager::allocate_work(size_t word_size) {
} }
#endif #endif
// Is there space in the current chunk? // Is there space in the current chunk?
Metablock* result = NULL; MetaWord* result = NULL;
// For DumpSharedSpaces, only allocate out of the current chunk which is // For DumpSharedSpaces, only allocate out of the current chunk which is
// never null because we gave it the size we wanted. Caller reports out // never null because we gave it the size we wanted. Caller reports out
...@@ -2436,8 +2172,8 @@ Metablock* SpaceManager::allocate_work(size_t word_size) { ...@@ -2436,8 +2172,8 @@ Metablock* SpaceManager::allocate_work(size_t word_size) {
} }
if (result > 0) { if (result > 0) {
inc_allocation_total(word_size); inc_allocation_total(word_size);
assert(result != (Metablock*) chunks_in_use(MediumIndex), "Head of the list is being allocated"); assert(result != (MetaWord*) chunks_in_use(MediumIndex),
assert(result->word_size() == word_size, "Size not set correctly"); "Head of the list is being allocated");
} }
return result; return result;
...@@ -2447,13 +2183,13 @@ void SpaceManager::verify() { ...@@ -2447,13 +2183,13 @@ void SpaceManager::verify() {
// If there are blocks in the dictionary, then // If there are blocks in the dictionary, then
// verfication of chunks does not work since // verfication of chunks does not work since
// being in the dictionary alters a chunk. // being in the dictionary alters a chunk.
if (block_freelists()->totalSize() == 0) { if (block_freelists()->total_size() == 0) {
// Skip the small chunks because their next link points to // Skip the small chunks because their next link points to
// medium chunks. This is because the small chunk is the // medium chunks. This is because the small chunk is the
// current chunk (for allocations) until it is full and the // current chunk (for allocations) until it is full and the
// the addition of the next chunk does not NULL the next // the addition of the next chunk does not NULL the next
// like of the small chunk. // like of the small chunk.
for (ChunkIndex i = MediumIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { for (ChunkIndex i = MediumIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
Metachunk* curr = chunks_in_use(i); Metachunk* curr = chunks_in_use(i);
while (curr != NULL) { while (curr != NULL) {
curr->verify(); curr->verify();
...@@ -2492,7 +2228,7 @@ void SpaceManager::dump(outputStream* const out) const { ...@@ -2492,7 +2228,7 @@ void SpaceManager::dump(outputStream* const out) const {
// Add up statistics for all chunks in this SpaceManager. // Add up statistics for all chunks in this SpaceManager.
for (ChunkIndex index = SmallIndex; for (ChunkIndex index = SmallIndex;
index < NumberOfFreeLists; index < NumberOfInUseLists;
index = next_chunk_index(index)) { index = next_chunk_index(index)) {
for (Metachunk* curr = chunks_in_use(index); for (Metachunk* curr = chunks_in_use(index);
curr != NULL; curr != NULL;
...@@ -2521,7 +2257,7 @@ void SpaceManager::dump(outputStream* const out) const { ...@@ -2521,7 +2257,7 @@ void SpaceManager::dump(outputStream* const out) const {
#ifdef ASSERT #ifdef ASSERT
void SpaceManager::mangle_freed_chunks() { void SpaceManager::mangle_freed_chunks() {
for (ChunkIndex index = SmallIndex; for (ChunkIndex index = SmallIndex;
index < NumberOfFreeLists; index < NumberOfInUseLists;
index = next_chunk_index(index)) { index = next_chunk_index(index)) {
for (Metachunk* curr = chunks_in_use(index); for (Metachunk* curr = chunks_in_use(index);
curr != NULL; curr != NULL;
...@@ -2833,13 +2569,12 @@ void Metaspace::initialize(Mutex* lock, size_t initial_size) { ...@@ -2833,13 +2569,12 @@ void Metaspace::initialize(Mutex* lock, size_t initial_size) {
} }
} }
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
// DumpSharedSpaces doesn't use class metadata area (yet) // DumpSharedSpaces doesn't use class metadata area (yet)
if (mdtype == ClassType && !DumpSharedSpaces) { if (mdtype == ClassType && !DumpSharedSpaces) {
return class_vsm()->allocate(word_size); return class_vsm()->allocate(word_size);
} else { } else {
return vsm()->allocate(word_size); return vsm()->allocate(word_size);
} }
} }
...@@ -2853,6 +2588,7 @@ MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) ...@@ -2853,6 +2588,7 @@ MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype)
gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
" to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
} }
result = allocate(word_size, mdtype); result = allocate(word_size, mdtype);
return result; return result;
...@@ -2889,37 +2625,39 @@ size_t Metaspace::capacity_words(MetadataType mdtype) const { ...@@ -2889,37 +2625,39 @@ size_t Metaspace::capacity_words(MetadataType mdtype) const {
void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
if (SafepointSynchronize::is_at_safepoint()) { if (SafepointSynchronize::is_at_safepoint()) {
assert(Thread::current()->is_VM_thread(), "should be the VM thread"); assert(Thread::current()->is_VM_thread(), "should be the VM thread");
// Don't take lock // Don't take Heap_lock
#ifdef DEALLOCATE_BLOCKS MutexLocker ml(vsm()->lock());
if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
// Dark matter. Too small for dictionary.
#ifdef ASSERT
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
return;
}
if (is_class) { if (is_class) {
class_vsm()->deallocate(ptr); class_vsm()->deallocate(ptr, word_size);
} else { } else {
vsm()->deallocate(ptr); vsm()->deallocate(ptr, word_size);
} }
#else
#ifdef ASSERT
Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
#endif
#endif
} else { } else {
MutexLocker ml(vsm()->lock()); MutexLocker ml(vsm()->lock());
#ifdef DEALLOCATE_BLOCKS if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
// Dark matter. Too small for dictionary.
#ifdef ASSERT
Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
#endif
return;
}
if (is_class) { if (is_class) {
class_vsm()->deallocate(ptr); class_vsm()->deallocate(ptr, word_size);
} else { } else {
vsm()->deallocate(ptr); vsm()->deallocate(ptr, word_size);
} }
#else
#ifdef ASSERT
Copy::fill_to_words((HeapWord*)ptr, word_size, metadata_deallocate);
#endif
#endif
} }
} }
MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
bool read_only, MetadataType mdtype, TRAPS) { bool read_only, MetadataType mdtype, TRAPS) {
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
assert(false, "Should not allocate with exception pending"); assert(false, "Should not allocate with exception pending");
...@@ -2943,7 +2681,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, ...@@ -2943,7 +2681,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
if (result == NULL) { if (result == NULL) {
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
} }
return result; return Metablock::initialize(result, word_size);
} }
result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
...@@ -2951,7 +2689,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, ...@@ -2951,7 +2689,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
if (result == NULL) { if (result == NULL) {
// Try to clean out some memory and retry. // Try to clean out some memory and retry.
result = result =
Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
loader_data, word_size, mdtype); loader_data, word_size, mdtype);
// If result is still null, we are out of memory. // If result is still null, we are out of memory.
...@@ -2967,7 +2705,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, ...@@ -2967,7 +2705,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
THROW_OOP_0(Universe::out_of_memory_error_perm_gen()); THROW_OOP_0(Universe::out_of_memory_error_perm_gen());
} }
} }
return result; return Metablock::initialize(result, word_size);
} }
void Metaspace::print_on(outputStream* out) const { void Metaspace::print_on(outputStream* out) const {
......
...@@ -57,12 +57,10 @@ ...@@ -57,12 +57,10 @@
// //
class ClassLoaderData; class ClassLoaderData;
class Metablock;
class MetaWord; class MetaWord;
class Mutex; class Mutex;
class outputStream; class outputStream;
class FreeChunk;
template <class Chunk_t> class FreeList;
template <class Chunk_t> class BinaryTreeDictionary;
class SpaceManager; class SpaceManager;
// Metaspaces each have a SpaceManager and allocations // Metaspaces each have a SpaceManager and allocations
...@@ -128,7 +126,7 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -128,7 +126,7 @@ class Metaspace : public CHeapObj<mtClass> {
size_t capacity_words(MetadataType mdtype) const; size_t capacity_words(MetadataType mdtype) const;
size_t waste_words(MetadataType mdtype) const; size_t waste_words(MetadataType mdtype) const;
static MetaWord* allocate(ClassLoaderData* loader_data, size_t size, static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
bool read_only, MetadataType mdtype, TRAPS); bool read_only, MetadataType mdtype, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
......
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#include "memory/generation.hpp" #include "memory/generation.hpp"
#include "memory/generationSpec.hpp" #include "memory/generationSpec.hpp"
#include "memory/heap.hpp" #include "memory/heap.hpp"
#include "memory/metablock.hpp"
#include "memory/space.hpp" #include "memory/space.hpp"
#include "memory/tenuredGeneration.hpp" #include "memory/tenuredGeneration.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
...@@ -249,6 +250,7 @@ typedef TwoOopHashtable<Klass*, mtClass> KlassTwoOopHashtable; ...@@ -249,6 +250,7 @@ typedef TwoOopHashtable<Klass*, mtClass> KlassTwoOopHashtable;
typedef Hashtable<Klass*, mtClass> KlassHashtable; typedef Hashtable<Klass*, mtClass> KlassHashtable;
typedef HashtableEntry<Klass*, mtClass> KlassHashtableEntry; typedef HashtableEntry<Klass*, mtClass> KlassHashtableEntry;
typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable; typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
//-------------------------------------------------------------------------------- //--------------------------------------------------------------------------------
// VM_STRUCTS // VM_STRUCTS
...@@ -1237,7 +1239,15 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable; ...@@ -1237,7 +1239,15 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
nonstatic_field(AccessFlags, _flags, jint) \ nonstatic_field(AccessFlags, _flags, jint) \
nonstatic_field(elapsedTimer, _counter, jlong) \ nonstatic_field(elapsedTimer, _counter, jlong) \
nonstatic_field(elapsedTimer, _active, bool) \ nonstatic_field(elapsedTimer, _active, bool) \
nonstatic_field(InvocationCounter, _counter, unsigned int) nonstatic_field(InvocationCounter, _counter, unsigned int) \
volatile_nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
nonstatic_field(FreeList<Metablock>, _size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(FreeList<Metablock>, _count, ssize_t) \
nonstatic_field(MetablockTreeDictionary, _total_size, size_t)
/* NOTE that we do not use the last_entry() macro here; it is used */ /* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */ /* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */
...@@ -2080,7 +2090,24 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable; ...@@ -2080,7 +2090,24 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
declare_toplevel_type(Universe) \ declare_toplevel_type(Universe) \
declare_toplevel_type(vframeArray) \ declare_toplevel_type(vframeArray) \
declare_toplevel_type(vframeArrayElement) \ declare_toplevel_type(vframeArrayElement) \
declare_toplevel_type(Annotations*) declare_toplevel_type(Annotations*) \
\
/***************/ \
/* Miscellaneous types */ \
/***************/ \
\
/* freelist */ \
declare_toplevel_type(FreeChunk*) \
declare_toplevel_type(Metablock*) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>) \
declare_toplevel_type(FreeBlockDictionary<Metablock>*) \
declare_toplevel_type(FreeList<Metablock>*) \
declare_toplevel_type(FreeList<Metablock>) \
declare_toplevel_type(MetablockTreeDictionary*) \
declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>) \
declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
/* NOTE that we do not use the last_entry() macro here; it is used */ /* NOTE that we do not use the last_entry() macro here; it is used */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册