提交 ccabe237 编写于 作者: J jmasa

7045397: NPG: Add freelists to class loader arenas.

Reviewed-by: coleenp, stefank, jprovino, ohair
上级 2e80078c
......@@ -79,10 +79,10 @@ ifeq ($(INCLUDE_ALTERNATE_GCS), false)
CXXFLAGS += -DSERIALGC
CFLAGS += -DSERIALGC
Src_Files_EXCLUDE += \
binaryTreeDictionary.cpp cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp cmsPermGen.cpp compactibleFreeListSpace.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp freeBlockDictionary.cpp \
freeChunk.cpp freeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
#include "runtime/vmThread.hpp"
template <>
void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
if (c != NULL) {
st->print("%16s", c);
} else {
st->print(SIZE_FORMAT_W(16), size());
}
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
}
template <class Chunk>
AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
init_statistics();
}
template <class Chunk>
AdaptiveFreeList<Chunk>::AdaptiveFreeList(Chunk* fc) : FreeList<Chunk>(fc), _hint(0) {
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::initialize() {
FreeList<Chunk>::initialize();
set_hint(0);
init_statistics(true /* split_birth */);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::reset(size_t hint) {
FreeList<Chunk>::reset();
set_hint(hint);
}
#ifndef PRODUCT
template <class Chunk>
void AdaptiveFreeList<Chunk>::assert_proper_lock_protection_work() const {
assert(protecting_lock() != NULL, "Don't call this directly");
assert(ParallelGCThreads > 0, "Don't call this directly");
Thread* thr = Thread::current();
if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
// assert that we are holding the freelist lock
} else if (thr->is_GC_task_thread()) {
assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
} else if (thr->is_Java_thread()) {
assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
} else {
ShouldNotReachHere(); // unaccounted thread type?
}
}
#endif
template <class Chunk>
void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
_allocation_stats.initialize(split_birth);
}
template <class Chunk>
size_t AdaptiveFreeList<Chunk>::get_better_size() {
// A candidate chunk has been found. If it is already under
// populated and there is a hinT, REturn the hint(). Else
// return the size of this chunk.
if (surplus() <= 0) {
if (hint() != 0) {
return hint();
} else {
return size();
}
} else {
// This list has a surplus so use it.
return size();
}
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
assert_proper_lock_protection();
return_chunk_at_head(chunk, true);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
#ifdef ASSERT
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
#endif
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
return_chunk_at_tail(chunk, true);
}
template <class Chunk>
void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
#ifdef ASSERT
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
#endif
}
#ifndef PRODUCT
template <class Chunk>
void AdaptiveFreeList<Chunk>::verify_stats() const {
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
// stats beforehand because in the case of the large-block BT
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
#endif
// Needs to be after the definitions have been seen.
template class AdaptiveFreeList<FreeChunk>;
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
#define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
#include "memory/freeList.hpp"
#include "gc_implementation/shared/allocationStats.hpp"
class CompactibleFreeListSpace;
// A class for maintaining a free list of Chunk's. The FreeList
// maintains a the structure of the list (head, tail, etc.) plus
// statistics for allocations from the list. The links between items
// are not part of FreeList. The statistics are
// used to make decisions about coalescing Chunk's when they
// are swept during collection.
//
// See the corresponding .cpp file for a description of the specifics
// for that implementation.
class Mutex;
template <class Chunk>
class AdaptiveFreeList : public FreeList<Chunk> {
friend class CompactibleFreeListSpace;
friend class VMStructs;
// friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
size_t _hint; // next larger size list with a positive surplus
AllocationStats _allocation_stats; // allocation-related statistics
public:
AdaptiveFreeList();
AdaptiveFreeList(Chunk* fc);
using FreeList<Chunk>::assert_proper_lock_protection;
#ifdef ASSERT
using FreeList<Chunk>::protecting_lock;
#endif
using FreeList<Chunk>::count;
using FreeList<Chunk>::size;
using FreeList<Chunk>::verify_chunk_in_free_list;
using FreeList<Chunk>::getFirstNChunksFromList;
using FreeList<Chunk>::print_on;
void return_chunk_at_head(Chunk* fc, bool record_return);
void return_chunk_at_head(Chunk* fc);
void return_chunk_at_tail(Chunk* fc, bool record_return);
void return_chunk_at_tail(Chunk* fc);
using FreeList<Chunk>::return_chunk_at_tail;
using FreeList<Chunk>::remove_chunk;
using FreeList<Chunk>::prepend;
using FreeList<Chunk>::print_labels_on;
using FreeList<Chunk>::get_chunk_at_head;
// Initialize.
void initialize();
// Reset the head, tail, hint, and count of a free list.
void reset(size_t hint);
void assert_proper_lock_protection_work() const PRODUCT_RETURN;
void print_on(outputStream* st, const char* c = NULL) const;
size_t hint() const {
return _hint;
}
void set_hint(size_t v) {
assert_proper_lock_protection();
assert(v == 0 || size() < v, "Bad hint");
_hint = v;
}
size_t get_better_size();
// Accessors for statistics
void init_statistics(bool split_birth = false);
AllocationStats* allocation_stats() {
assert_proper_lock_protection();
return &_allocation_stats;
}
ssize_t desired() const {
return _allocation_stats.desired();
}
void set_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current,
float inter_sweep_estimate,
float intra_sweep_estimate) {
assert_proper_lock_protection();
_allocation_stats.compute_desired(count(),
inter_sweep_current,
inter_sweep_estimate,
intra_sweep_estimate);
}
ssize_t coal_desired() const {
return _allocation_stats.coal_desired();
}
void set_coal_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_desired(v);
}
ssize_t surplus() const {
return _allocation_stats.surplus();
}
void set_surplus(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_surplus(v);
}
void increment_surplus() {
assert_proper_lock_protection();
_allocation_stats.increment_surplus();
}
void decrement_surplus() {
assert_proper_lock_protection();
_allocation_stats.decrement_surplus();
}
ssize_t bfr_surp() const {
return _allocation_stats.bfr_surp();
}
void set_bfr_surp(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_bfr_surp(v);
}
ssize_t prev_sweep() const {
return _allocation_stats.prev_sweep();
}
void set_prev_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_prev_sweep(v);
}
ssize_t before_sweep() const {
return _allocation_stats.before_sweep();
}
void set_before_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_before_sweep(v);
}
ssize_t coal_births() const {
return _allocation_stats.coal_births();
}
void set_coal_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_births(v);
}
void increment_coal_births() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_births();
}
ssize_t coal_deaths() const {
return _allocation_stats.coal_deaths();
}
void set_coal_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_deaths(v);
}
void increment_coal_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_deaths();
}
ssize_t split_births() const {
return _allocation_stats.split_births();
}
void set_split_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_births(v);
}
void increment_split_births() {
assert_proper_lock_protection();
_allocation_stats.increment_split_births();
}
ssize_t split_deaths() const {
return _allocation_stats.split_deaths();
}
void set_split_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_deaths(v);
}
void increment_split_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_split_deaths();
}
#ifndef PRODUCT
// For debugging. The "_returned_bytes" in all the lists are summed
// and compared with the total number of bytes swept during a
// collection.
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
void increment_returned_bytes_by(size_t v) {
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
}
// Stats verification
void verify_stats() const;
#endif // NOT PRODUCT
};
#endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
......@@ -91,7 +91,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
_collector(NULL)
{
assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
"FreeChunk is larger than expected");
"FreeChunk is larger than expected");
_bt.set_space(this);
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
// We have all of "mr", all of which we place in the dictionary
......@@ -101,14 +101,14 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
// implementation, namely, the simple binary tree (splaying
// temporarily disabled).
switch (dictionaryChoice) {
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
break;
case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
default:
warning("dictionaryChoice: selected option not understood; using"
" default BinaryTreeDictionary implementation instead.");
case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
break;
}
assert(_dictionary != NULL, "CMS dictionary initialization");
// The indexed free lists are initially all empty and are lazily
......@@ -453,7 +453,7 @@ const {
reportIndexedFreeListStatistics();
gclog_or_tty->print_cr("Layout of Indexed Freelists");
gclog_or_tty->print_cr("---------------------------");
FreeList<FreeChunk>::print_labels_on(st, "size");
AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
_indexedFreeList[i].print_on(gclog_or_tty);
for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
......@@ -1319,7 +1319,7 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
size_t currSize = numWords + MinChunkSize;
assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk>* fl = &_indexedFreeList[i];
AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
if (fl->head()) {
ret = getFromListGreater(fl, numWords);
assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
......@@ -1702,7 +1702,9 @@ CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
_dictionary->return_chunk(chunk);
#ifndef PRODUCT
if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
tl->verify_stats();
}
#endif // PRODUCT
}
......@@ -1745,7 +1747,7 @@ CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
{
MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
ec = dictionary()->find_largest_dict(); // get largest block
if (ec != NULL && ec->end() == chunk) {
if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
// It's a coterminal block - we can coalesce.
size_t old_size = ec->size();
coalDeath(old_size);
......@@ -1850,11 +1852,11 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
the excess is >= MIN_CHUNK. */
size_t start = align_object_size(numWords + MinChunkSize);
if (start < IndexSetSize) {
FreeList<FreeChunk>* it = _indexedFreeList;
AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
size_t hint = _indexedFreeList[start].hint();
while (hint < IndexSetSize) {
assert(hint % MinObjAlignment == 0, "hint should be aligned");
FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
if (fl->surplus() > 0 && fl->head() != NULL) {
// Found a list with surplus, reset original hint
// and split out a free chunk which is returned.
......@@ -1873,7 +1875,7 @@ FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
}
/* Requires fl->size >= numWords + MinChunkSize */
FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
size_t numWords) {
FreeChunk *curr = fl->head();
size_t oldNumWords = curr->size();
......@@ -2155,7 +2157,7 @@ void CompactibleFreeListSpace::beginSweepFLCensus(
assert_locked();
size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk>* fl = &_indexedFreeList[i];
AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
if (PrintFLSStatistics > 1) {
gclog_or_tty->print("size[%d] : ", i);
}
......@@ -2174,7 +2176,7 @@ void CompactibleFreeListSpace::setFLSurplus() {
assert_locked();
size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_surplus(fl->count() -
(ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
}
......@@ -2185,7 +2187,7 @@ void CompactibleFreeListSpace::setFLHints() {
size_t i;
size_t h = IndexSetSize;
for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_hint(h);
if (fl->surplus() > 0) {
h = i;
......@@ -2197,7 +2199,7 @@ void CompactibleFreeListSpace::clearFLCensus() {
assert_locked();
size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList<FreeChunk> *fl = &_indexedFreeList[i];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
fl->set_prev_sweep(fl->count());
fl->set_coal_births(0);
fl->set_coal_deaths(0);
......@@ -2224,7 +2226,7 @@ void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
if (size < SmallForDictionary) {
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
return (fl->coal_desired() < 0) ||
((int)fl->count() > fl->coal_desired());
} else {
......@@ -2234,14 +2236,14 @@ bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coal_births();
fl->increment_surplus();
}
void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_coal_deaths();
fl->decrement_surplus();
}
......@@ -2250,7 +2252,7 @@ void CompactibleFreeListSpace::coalBirth(size_t size) {
if (size < SmallForDictionary) {
smallCoalBirth(size);
} else {
dictionary()->dict_census_udpate(size,
dictionary()->dict_census_update(size,
false /* split */,
true /* birth */);
}
......@@ -2260,7 +2262,7 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
if(size < SmallForDictionary) {
smallCoalDeath(size);
} else {
dictionary()->dict_census_udpate(size,
dictionary()->dict_census_update(size,
false /* split */,
false /* birth */);
}
......@@ -2268,14 +2270,14 @@ void CompactibleFreeListSpace::coalDeath(size_t size) {
void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_split_births();
fl->increment_surplus();
}
void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
assert(size < SmallForDictionary, "Size too large for indexed list");
FreeList<FreeChunk> *fl = &_indexedFreeList[size];
AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
fl->increment_split_deaths();
fl->decrement_surplus();
}
......@@ -2284,7 +2286,7 @@ void CompactibleFreeListSpace::split_birth(size_t size) {
if (size < SmallForDictionary) {
smallSplitBirth(size);
} else {
dictionary()->dict_census_udpate(size,
dictionary()->dict_census_update(size,
true /* split */,
true /* birth */);
}
......@@ -2294,7 +2296,7 @@ void CompactibleFreeListSpace::splitDeath(size_t size) {
if (size < SmallForDictionary) {
smallSplitDeath(size);
} else {
dictionary()->dict_census_udpate(size,
dictionary()->dict_census_update(size,
true /* split */,
false /* birth */);
}
......@@ -2517,10 +2519,10 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
#ifndef PRODUCT
void CompactibleFreeListSpace::check_free_list_consistency() const {
assert(_dictionary->min_size() <= IndexSetSize,
assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
"Some sizes can't be allocated without recourse to"
" linear allocation buffers");
assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
"else MIN_TREE_CHUNK_SIZE is wrong");
assert(IndexSetStart != 0, "IndexSetStart not initialized");
assert(IndexSetStride != 0, "IndexSetStride not initialized");
......@@ -2529,15 +2531,15 @@ void CompactibleFreeListSpace::check_free_list_consistency() const {
void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
assert_lock_strong(&_freelistLock);
FreeList<FreeChunk> total;
AdaptiveFreeList<FreeChunk> total;
gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
size_t total_free = 0;
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
total_free += fl->count() * fl->size();
if (i % (40*IndexSetStride) == 0) {
FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
}
fl->print_on(gclog_or_tty);
total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
......@@ -2620,7 +2622,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
res = _cfls->getChunkFromDictionaryExact(word_sz);
if (res == NULL) return NULL;
} else {
FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
if (fl->count() == 0) {
// Attempt to refill this local free list.
get_from_global_pool(word_sz, fl);
......@@ -2640,7 +2642,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
// Get a chunk of blocks of the right size and update related
// book-keeping stats
void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
// Get the #blocks we want to claim
size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
assert(n_blks > 0, "Error");
......@@ -2722,7 +2724,7 @@ void CFLS_LAB::retire(int tid) {
if (num_retire > 0) {
_cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
// Reset this list.
_indexedFreeList[i] = FreeList<FreeChunk>();
_indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
_indexedFreeList[i].set_size(i);
}
}
......@@ -2736,7 +2738,7 @@ void CFLS_LAB::retire(int tid) {
}
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
......@@ -2752,12 +2754,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
(cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
(CMSSplitIndexedFreeListBlocks || k <= 1);
k++, cur_sz = k * word_sz) {
FreeList<FreeChunk> fl_for_cur_sz; // Empty.
AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
fl_for_cur_sz.set_size(cur_sz);
{
MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
Mutex::_no_safepoint_check_flag);
FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
if (gfl->count() != 0) {
// nn is the number of chunks of size cur_sz that
// we'd need to split k-ways each, in order to create
......@@ -2832,12 +2834,11 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
MutexLockerEx x(parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
while (n > 0) {
fc = dictionary()->get_chunk(MAX2(n * word_sz,
_dictionary->min_size()),
fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_udpate(fc->size(),
dictionary()->dict_census_update(fc->size(),
true /*split*/,
false /*birth*/);
break;
......@@ -2890,7 +2891,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fc->set_size(prefix_size);
if (rem >= IndexSetSize) {
returnChunkToDictionary(rem_fc);
dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
rem_fc = NULL;
}
// Otherwise, return it to the small list below.
......
......@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/blockOffsetTable.inline.hpp"
......@@ -38,6 +39,7 @@
class CompactibleFreeListSpace;
class BlkClosure;
class BlkClosureCareful;
class FreeChunk;
class UpwardsObjectClosure;
class ObjectClosureCareful;
class Klass;
......@@ -131,7 +133,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks
FreeList<FreeChunk> _indexedFreeList[IndexSetSize];
AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
// indexed array for small size blocks
// allocation stategy
bool _fitStrategy; // Use best fit strategy.
......@@ -168,7 +170,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// If the count of "fl" is negative, it's absolute value indicates a
// number of free chunks that had been previously "borrowed" from global
// list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists
......@@ -214,7 +216,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// and return it. The split off remainder is returned to
// the free lists. The old name for getFromListGreater
// was lookInListGreater.
FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords);
FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
// Get a chunk in the indexed free list or dictionary,
// by considering a larger chunk and splitting it.
FreeChunk* getChunkFromGreater(size_t numWords);
......@@ -621,7 +623,7 @@ class CFLS_LAB : public CHeapObj<mtGC> {
CompactibleFreeListSpace* _cfls;
// Our local free lists.
FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
// Initialized from a command-line arg.
......@@ -634,7 +636,7 @@ class CFLS_LAB : public CHeapObj<mtGC> {
size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
// Internal work method
void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl);
void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
public:
CFLS_LAB(CompactibleFreeListSpace* cfls);
......
......@@ -9143,7 +9143,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
size_t shrinkable_size_in_bytes = chunk_at_end->size();
size_t aligned_shrinkable_size_in_bytes =
align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
assert(unallocated_start <= chunk_at_end->end(),
assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
"Inconsistent chunk at end of space");
size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
size_t word_size_before = heap_word_size(_virtual_space.committed_size());
......@@ -9210,7 +9210,7 @@ void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
"Inconsistency at end of space");
assert(chunk_at_end->end() == _cmsSpace->end(),
assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
"Shrinking is inconsistent");
return;
}
......
......@@ -133,7 +133,7 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
}
// Return the address past the end of this chunk
HeapWord* end() const { return ((HeapWord*) this) + size(); }
uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
// debugging
void verify() const PRODUCT_RETURN;
......
......@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_VMSTRUCTS_CMS_HPP
typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
#define VM_STRUCTS_CMS(nonstatic_field, \
volatile_nonstatic_field, \
static_field) \
......@@ -38,14 +40,8 @@
nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \
nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \
static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \
volatile_nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(BinaryTreeDictionary<FreeChunk>,_total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _dictionary, FreeBlockDictionary<FreeChunk>*) \
nonstatic_field(AFLBinaryTreeDictionary, _total_size, size_t) \
nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], FreeList<FreeChunk>) \
nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock)
......@@ -60,19 +56,17 @@
declare_toplevel_type(CMSCollector) \
declare_toplevel_type(CMSBitMap) \
declare_toplevel_type(FreeChunk) \
declare_toplevel_type(Metablock) \
declare_toplevel_type(ConcurrentMarkSweepThread*) \
declare_toplevel_type(ConcurrentMarkSweepGeneration*) \
declare_toplevel_type(SurrogateLockerThread*) \
declare_toplevel_type(CompactibleFreeListSpace*) \
declare_toplevel_type(CMSCollector*) \
declare_toplevel_type(FreeChunk*) \
declare_toplevel_type(BinaryTreeDictionary<FreeChunk>*) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>) \
declare_toplevel_type(AFLBinaryTreeDictionary*) \
declare_toplevel_type(LinearAllocBlock) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>) \
declare_type(BinaryTreeDictionary<FreeChunk>, FreeBlockDictionary<FreeChunk>)
declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
declare_type(AFLBinaryTreeDictionary, FreeBlockDictionary<FreeChunk>) \
#define VM_INT_CONSTANTS_CMS(declare_constant) \
declare_constant(Generation::ConcurrentMarkSweep) \
......
......@@ -191,7 +191,7 @@ class VM_GenCollectFull: public VM_GC_Operation {
class VM_CollectForMetadataAllocation: public VM_GC_Operation {
private:
MetaWord* _result;
size_t _size; // size of object to be allocated
size_t _size; // size of object to be allocated
Metaspace::MetadataType _mdtype;
ClassLoaderData* _loader_data;
public:
......
......@@ -37,77 +37,78 @@
// A TreeList is a FreeList which can be used to maintain a
// binary tree of free lists.
template <class Chunk> class TreeChunk;
template <class Chunk> class BinaryTreeDictionary;
template <class Chunk> class AscendTreeCensusClosure;
template <class Chunk> class DescendTreeCensusClosure;
template <class Chunk> class DescendTreeSearchClosure;
template <class Chunk>
class TreeList: public FreeList<Chunk> {
friend class TreeChunk<Chunk>;
friend class BinaryTreeDictionary<Chunk>;
friend class AscendTreeCensusClosure<Chunk>;
friend class DescendTreeCensusClosure<Chunk>;
friend class DescendTreeSearchClosure<Chunk>;
TreeList<Chunk>* _parent;
TreeList<Chunk>* _left;
TreeList<Chunk>* _right;
template <class Chunk_t, template <class> class FreeList_t> class TreeChunk;
template <class Chunk_t, template <class> class FreeList_t> class BinaryTreeDictionary;
template <class Chunk_t, template <class> class FreeList_t> class AscendTreeCensusClosure;
template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
template <class Chunk_t, template <class> class FreeList_t>
class TreeList : public FreeList_t<Chunk_t> {
friend class TreeChunk<Chunk_t, FreeList_t>;
friend class BinaryTreeDictionary<Chunk_t, FreeList_t>;
friend class AscendTreeCensusClosure<Chunk_t, FreeList_t>;
friend class DescendTreeCensusClosure<Chunk_t, FreeList_t>;
friend class DescendTreeSearchClosure<Chunk_t, FreeList_t>;
TreeList<Chunk_t, FreeList_t>* _parent;
TreeList<Chunk_t, FreeList_t>* _left;
TreeList<Chunk_t, FreeList_t>* _right;
protected:
TreeList<Chunk>* parent() const { return _parent; }
TreeList<Chunk>* left() const { return _left; }
TreeList<Chunk>* right() const { return _right; }
// Explicitly import these names into our namespace to fix name lookup with templates
using FreeList<Chunk>::head;
using FreeList<Chunk>::set_head;
TreeList<Chunk_t, FreeList_t>* parent() const { return _parent; }
TreeList<Chunk_t, FreeList_t>* left() const { return _left; }
TreeList<Chunk_t, FreeList_t>* right() const { return _right; }
using FreeList<Chunk>::tail;
using FreeList<Chunk>::set_tail;
using FreeList<Chunk>::link_tail;
// Wrapper on call to base class, to get the template to compile.
Chunk_t* head() const { return FreeList_t<Chunk_t>::head(); }
Chunk_t* tail() const { return FreeList_t<Chunk_t>::tail(); }
void set_head(Chunk_t* head) { FreeList_t<Chunk_t>::set_head(head); }
void set_tail(Chunk_t* tail) { FreeList_t<Chunk_t>::set_tail(tail); }
using FreeList<Chunk>::increment_count;
NOT_PRODUCT(using FreeList<Chunk>::increment_returned_bytes_by;)
using FreeList<Chunk>::verify_chunk_in_free_list;
using FreeList<Chunk>::size;
size_t size() const { return FreeList_t<Chunk_t>::size(); }
// Accessors for links in tree.
void set_left(TreeList<Chunk>* tl) {
void set_left(TreeList<Chunk_t, FreeList_t>* tl) {
_left = tl;
if (tl != NULL)
tl->set_parent(this);
}
void set_right(TreeList<Chunk>* tl) {
void set_right(TreeList<Chunk_t, FreeList_t>* tl) {
_right = tl;
if (tl != NULL)
tl->set_parent(this);
}
void set_parent(TreeList<Chunk>* tl) { _parent = tl; }
void set_parent(TreeList<Chunk_t, FreeList_t>* tl) { _parent = tl; }
void clearLeft() { _left = NULL; }
void clear_left() { _left = NULL; }
void clear_right() { _right = NULL; }
void clear_parent() { _parent = NULL; }
void initialize() { clearLeft(); clear_right(), clear_parent(); }
void initialize() { clear_left(); clear_right(), clear_parent(); FreeList_t<Chunk_t>::initialize(); }
// For constructing a TreeList from a Tree chunk or
// address and size.
static TreeList<Chunk>* as_TreeList(TreeChunk<Chunk>* tc);
static TreeList<Chunk>* as_TreeList(HeapWord* addr, size_t size);
TreeList();
static TreeList<Chunk_t, FreeList_t>*
as_TreeList(TreeChunk<Chunk_t, FreeList_t>* tc);
static TreeList<Chunk_t, FreeList_t>* as_TreeList(HeapWord* addr, size_t size);
// Returns the head of the free list as a pointer to a TreeChunk.
TreeChunk<Chunk>* head_as_TreeChunk();
TreeChunk<Chunk_t, FreeList_t>* head_as_TreeChunk();
// Returns the first available chunk in the free list as a pointer
// to a TreeChunk.
TreeChunk<Chunk>* first_available();
TreeChunk<Chunk_t, FreeList_t>* first_available();
// Returns the block with the largest heap address amongst
// those in the list for this size; potentially slow and expensive,
// use with caution!
TreeChunk<Chunk>* largest_address();
TreeChunk<Chunk_t, FreeList_t>* largest_address();
TreeList<Chunk_t, FreeList_t>* get_better_list(
BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary);
// remove_chunk_replace_if_needed() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the
......@@ -115,10 +116,10 @@ class TreeList: public FreeList<Chunk> {
// returns the possibly replaced TreeList* for the node in
// the tree. It also updates the parent of the original
// node to point to the new node.
TreeList<Chunk>* remove_chunk_replace_if_needed(TreeChunk<Chunk>* tc);
TreeList<Chunk_t, FreeList_t>* remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc);
// See FreeList.
void return_chunk_at_head(TreeChunk<Chunk>* tc);
void return_chunk_at_tail(TreeChunk<Chunk>* tc);
void return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* tc);
void return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* tc);
};
// A TreeChunk is a subclass of a Chunk that additionally
......@@ -134,52 +135,54 @@ class TreeList: public FreeList<Chunk> {
// on the free list for a node in the tree and is only removed if
// it is the last chunk on the free list.
template <class Chunk>
class TreeChunk : public Chunk {
friend class TreeList<Chunk>;
TreeList<Chunk>* _list;
TreeList<Chunk> _embedded_list; // if non-null, this chunk is on _list
template <class Chunk_t, template <class> class FreeList_t>
class TreeChunk : public Chunk_t {
friend class TreeList<Chunk_t, FreeList_t>;
TreeList<Chunk_t, FreeList_t>* _list;
TreeList<Chunk_t, FreeList_t> _embedded_list; // if non-null, this chunk is on _list
static size_t _min_tree_chunk_size;
protected:
TreeList<Chunk>* embedded_list() const { return (TreeList<Chunk>*) &_embedded_list; }
void set_embedded_list(TreeList<Chunk>* v) { _embedded_list = *v; }
TreeList<Chunk_t, FreeList_t>* embedded_list() const { return (TreeList<Chunk_t, FreeList_t>*) &_embedded_list; }
void set_embedded_list(TreeList<Chunk_t, FreeList_t>* v) { _embedded_list = *v; }
public:
TreeList<Chunk>* list() { return _list; }
void set_list(TreeList<Chunk>* v) { _list = v; }
static TreeChunk<Chunk>* as_TreeChunk(Chunk* fc);
TreeList<Chunk_t, FreeList_t>* list() { return _list; }
void set_list(TreeList<Chunk_t, FreeList_t>* v) { _list = v; }
static TreeChunk<Chunk_t, FreeList_t>* as_TreeChunk(Chunk_t* fc);
// Initialize fields in a TreeChunk that should be
// initialized when the TreeChunk is being added to
// a free list in the tree.
void initialize() { embedded_list()->initialize(); }
Chunk* next() const { return Chunk::next(); }
Chunk* prev() const { return Chunk::prev(); }
size_t size() const volatile { return Chunk::size(); }
Chunk_t* next() const { return Chunk_t::next(); }
Chunk_t* prev() const { return Chunk_t::prev(); }
size_t size() const volatile { return Chunk_t::size(); }
static size_t min_size() {
return _min_tree_chunk_size;
}
// debugging
void verify_tree_chunk_list() const;
void assert_is_mangled() const;
};
template <class Chunk>
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
template <class Chunk_t, template <class> class FreeList_t>
class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
friend class VMStructs;
bool _splay;
bool _adaptive_freelists;
size_t _total_size;
size_t _total_free_blocks;
TreeList<Chunk>* _root;
TreeList<Chunk_t, FreeList_t>* _root;
// private accessors
bool splay() const { return _splay; }
void set_splay(bool v) { _splay = v; }
void set_total_size(size_t v) { _total_size = v; }
virtual void inc_total_size(size_t v);
virtual void dec_total_size(size_t v);
size_t total_free_blocks() const { return _total_free_blocks; }
void set_total_free_blocks(size_t v) { _total_free_blocks = v; }
TreeList<Chunk>* root() const { return _root; }
void set_root(TreeList<Chunk>* v) { _root = v; }
bool adaptive_freelists() { return _adaptive_freelists; }
TreeList<Chunk_t, FreeList_t>* root() const { return _root; }
void set_root(TreeList<Chunk_t, FreeList_t>* v) { _root = v; }
// This field is added and can be set to point to the
// the Mutex used to synchronize access to the
......@@ -191,54 +194,55 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// return it. If the chunk
// is the last chunk of that size, remove the node for that size
// from the tree.
TreeChunk<Chunk>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither, bool splay);
// Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList<Chunk>* find_list (size_t size) const;
TreeChunk<Chunk_t, FreeList_t>* get_chunk_from_tree(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither);
// Remove this chunk from the tree. If the removal results
// in an empty list in the tree, remove the empty list.
TreeChunk<Chunk>* remove_chunk_from_tree(TreeChunk<Chunk>* tc);
TreeChunk<Chunk_t, FreeList_t>* remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc);
// Remove the node in the trees starting at tl that has the
// minimum value and return it. Repair the tree as needed.
TreeList<Chunk>* remove_tree_minimum(TreeList<Chunk>* tl);
void semi_splay_step(TreeList<Chunk>* tl);
TreeList<Chunk_t, FreeList_t>* remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl);
// Add this free chunk to the tree.
void insert_chunk_in_tree(Chunk* freeChunk);
void insert_chunk_in_tree(Chunk_t* freeChunk);
public:
static const size_t min_tree_chunk_size = sizeof(TreeChunk<Chunk>)/HeapWordSize;
// Return a list of the specified size or NULL from the tree.
// The list is not removed from the tree.
TreeList<Chunk_t, FreeList_t>* find_list (size_t size) const;
void verify_tree() const;
// verify that the given chunk is in the tree.
bool verify_chunk_in_free_list(Chunk* tc) const;
bool verify_chunk_in_free_list(Chunk_t* tc) const;
private:
void verify_tree_helper(TreeList<Chunk>* tl) const;
static size_t verify_prev_free_ptrs(TreeList<Chunk>* tl);
void verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
static size_t verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl);
// Returns the total number of chunks in the list.
size_t total_list_length(TreeList<Chunk>* tl) const;
size_t total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const;
// Returns the total number of words in the chunks in the tree
// starting at "tl".
size_t total_size_in_tree(TreeList<Chunk>* tl) const;
size_t total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
// Returns the sum of the square of the size of each block
// in the tree starting at "tl".
double sum_of_squared_block_sizes(TreeList<Chunk>* const tl) const;
double sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const;
// Returns the total number of free blocks in the tree starting
// at "tl".
size_t total_free_blocks_in_tree(TreeList<Chunk>* tl) const;
size_t num_free_blocks() const;
size_t treeHeight() const;
size_t tree_height_helper(TreeList<Chunk>* tl) const;
size_t total_nodes_in_tree(TreeList<Chunk>* tl) const;
size_t total_nodes_helper(TreeList<Chunk>* tl) const;
size_t total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t num_free_blocks() const;
size_t tree_height() const;
size_t tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const;
size_t total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const;
public:
// Constructor
BinaryTreeDictionary(bool adaptive_freelists, bool splay = false);
BinaryTreeDictionary(MemRegion mr, bool adaptive_freelists, bool splay = false);
BinaryTreeDictionary() :
_total_size(0), _total_free_blocks(0), _root(0) {}
BinaryTreeDictionary(MemRegion mr);
// Public accessors
size_t total_size() const { return _total_size; }
size_t total_free_blocks() const { return _total_free_blocks; }
// Reset the dictionary to the initial conditions with
// a single free chunk.
......@@ -249,23 +253,24 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// Return a chunk of size "size" or greater from
// the tree.
// want a better dynamic splay strategy for the future.
Chunk* get_chunk(size_t size, enum FreeBlockDictionary<Chunk>::Dither dither) {
FreeBlockDictionary<Chunk>::verify_par_locked();
Chunk* res = get_chunk_from_tree(size, dither, splay());
Chunk_t* get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
FreeBlockDictionary<Chunk_t>::verify_par_locked();
Chunk_t* res = get_chunk_from_tree(size, dither);
assert(res == NULL || res->is_free(),
"Should be returning a free chunk");
assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
res->size() == size, "Not correct size");
return res;
}
void return_chunk(Chunk* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked();
void return_chunk(Chunk_t* chunk) {
FreeBlockDictionary<Chunk_t>::verify_par_locked();
insert_chunk_in_tree(chunk);
}
void remove_chunk(Chunk* chunk) {
FreeBlockDictionary<Chunk>::verify_par_locked();
remove_chunk_from_tree((TreeChunk<Chunk>*)chunk);
void remove_chunk(Chunk_t* chunk) {
FreeBlockDictionary<Chunk_t>::verify_par_locked();
remove_chunk_from_tree((TreeChunk<Chunk_t, FreeList_t>*)chunk);
assert(chunk->is_free(), "Should still be a free chunk");
}
......@@ -281,19 +286,19 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
}
size_t min_size() const {
return min_tree_chunk_size;
return TreeChunk<Chunk_t, FreeList_t>::min_size();
}
double sum_of_squared_block_sizes() const {
return sum_of_squared_block_sizes(root());
}
Chunk* find_chunk_ends_at(HeapWord* target) const;
Chunk_t* find_chunk_ends_at(HeapWord* target) const;
// Find the list with size "size" in the binary tree and update
// the statistics in the list according to "split" (chunk was
// split or coalesce) and "birth" (chunk was added or removed).
void dict_census_udpate(size_t size, bool split, bool birth);
void dict_census_update(size_t size, bool split, bool birth);
// Return true if the dictionary is overpopulated (more chunks of
// this size than desired) for size "size".
bool coal_dict_over_populated(size_t size);
......@@ -307,7 +312,7 @@ class BinaryTreeDictionary: public FreeBlockDictionary<Chunk> {
// statistics for the sweep.
void end_sweep_dict_census(double splitSurplusPercent);
// Return the largest free chunk in the tree.
Chunk* find_largest_dict() const;
Chunk_t* find_largest_dict() const;
// Accessors for statistics
void set_tree_surplus(double splitSurplusPercent);
void set_tree_hints(void);
......
......@@ -27,6 +27,8 @@
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#endif // SERIALGC
#include "memory/freeBlockDictionary.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "thread_linux.inline.hpp"
#endif
......@@ -62,6 +64,9 @@ template <class Chunk> void FreeBlockDictionary<Chunk>::verify_par_locked() cons
}
#endif
template class FreeBlockDictionary<Metablock>;
template class FreeBlockDictionary<Metachunk>;
#ifndef SERIALGC
// Explicitly instantiate for FreeChunk
template class FreeBlockDictionary<FreeChunk>;
......
......@@ -66,7 +66,7 @@ class FreeBlockDictionary: public CHeapObj<mtGC> {
virtual void reset(HeapWord* addr, size_t size) = 0;
virtual void reset() = 0;
virtual void dict_census_udpate(size_t size, bool split, bool birth) = 0;
virtual void dict_census_update(size_t size, bool split, bool birth) = 0;
virtual bool coal_dict_over_populated(size_t size) = 0;
virtual void begin_sweep_dict_census(double coalSurplusPercent,
float inter_sweep_current, float inter_sweep_estimate,
......
......@@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/metablock.hpp"
#include "memory/metachunk.hpp"
#include "memory/sharedHeap.hpp"
#include "runtime/globals.hpp"
#include "runtime/mutex.hpp"
......@@ -49,8 +51,6 @@ FreeList<Chunk>::FreeList() :
{
_size = 0;
_count = 0;
_hint = 0;
init_statistics();
}
template <class Chunk>
......@@ -62,34 +62,50 @@ FreeList<Chunk>::FreeList(Chunk* fc) :
{
_size = fc->size();
_count = 1;
_hint = 0;
init_statistics();
#ifndef PRODUCT
_allocation_stats.set_returned_bytes(size() * HeapWordSize);
#endif
}
template <class Chunk>
void FreeList<Chunk>::reset(size_t hint) {
void FreeList<Chunk>::link_head(Chunk* v) {
assert_proper_lock_protection();
set_head(v);
// If this method is not used (just set the head instead),
// this check can be avoided.
if (v != NULL) {
v->link_prev(NULL);
}
}
template <class Chunk>
void FreeList<Chunk>::reset() {
// Don't set the _size to 0 because this method is
// used with a existing list that has a size but which has
// been emptied.
// Don't clear the _protecting_lock of an existing list.
set_count(0);
set_head(NULL);
set_tail(NULL);
set_hint(hint);
}
template <class Chunk>
void FreeList<Chunk>::init_statistics(bool split_birth) {
_allocation_stats.initialize(split_birth);
void FreeList<Chunk>::initialize() {
#ifdef ASSERT
// Needed early because it might be checked in other initializing code.
set_protecting_lock(NULL);
#endif
reset();
set_size(0);
}
template <class Chunk>
Chunk* FreeList<Chunk>::get_chunk_at_head() {
template <class Chunk_t>
Chunk_t* FreeList<Chunk_t>::get_chunk_at_head() {
assert_proper_lock_protection();
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
Chunk* fc = head();
Chunk_t* fc = head();
if (fc != NULL) {
Chunk* nextFC = fc->next();
Chunk_t* nextFC = fc->next();
if (nextFC != NULL) {
// The chunk fc being removed has a "next". Set the "next" to the
// "prev" of fc.
......@@ -197,11 +213,6 @@ void FreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
link_tail(chunk);
}
increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list");
......@@ -233,11 +244,6 @@ void FreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
}
link_tail(chunk);
increment_count(); // of # of chunks in list
DEBUG_ONLY(
if (record_return) {
increment_returned_bytes_by(size()*HeapWordSize);
}
)
assert(head() == NULL || head()->prev() == NULL, "list invariant");
assert(tail() == NULL || tail()->next() == NULL, "list invariant");
assert(head() == NULL || head()->size() == size(), "wrong item on list");
......@@ -273,7 +279,7 @@ void FreeList<Chunk>::prepend(FreeList<Chunk>* fl) {
}
}
// verify_chunk_in_free_list() is used to verify that an item is in this free list.
// verify_chunk_in_free_lists() is used to verify that an item is in this free list.
// It is used as a debugging aid.
template <class Chunk>
bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
......@@ -293,41 +299,15 @@ bool FreeList<Chunk>::verify_chunk_in_free_list(Chunk* fc) const {
}
#ifndef PRODUCT
template <class Chunk>
void FreeList<Chunk>::verify_stats() const {
// The +1 of the LH comparand is to allow some "looseness" in
// checking: we usually call this interface when adding a block
// and we'll subsequently update the stats; we cannot update the
// stats beforehand because in the case of the large-block BT
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
+ _allocation_stats.coal_births() + 1) // Total Production Stock + 1
>= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
+ (ssize_t)count()), // Total Current Stock + depletion
err_msg("FreeList " PTR_FORMAT " of size " SIZE_FORMAT
" violates Conservation Principle: "
"prev_sweep(" SIZE_FORMAT ")"
" + split_births(" SIZE_FORMAT ")"
" + coal_births(" SIZE_FORMAT ") + 1 >= "
" split_deaths(" SIZE_FORMAT ")"
" coal_deaths(" SIZE_FORMAT ")"
" + count(" SSIZE_FORMAT ")",
this, _size, _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
_allocation_stats.split_births(), _allocation_stats.split_deaths(),
_allocation_stats.coal_deaths(), count()));
}
template <class Chunk>
void FreeList<Chunk>::assert_proper_lock_protection_work() const {
assert(_protecting_lock != NULL, "Don't call this directly");
assert(protecting_lock() != NULL, "Don't call this directly");
assert(ParallelGCThreads > 0, "Don't call this directly");
Thread* thr = Thread::current();
if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
// assert that we are holding the freelist lock
} else if (thr->is_GC_task_thread()) {
assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
assert(protecting_lock()->owned_by_self(), "FreeList RACE DETECTED");
} else if (thr->is_Java_thread()) {
assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
} else {
......@@ -350,21 +330,17 @@ void FreeList<Chunk>::print_labels_on(outputStream* st, const char* c) {
// to the call is a non-null string, it is printed in the first column;
// otherwise, if the argument is null (the default), then the size of the
// (free list) block is printed in the first column.
template <class Chunk>
void FreeList<Chunk>::print_on(outputStream* st, const char* c) const {
template <class Chunk_t>
void FreeList<Chunk_t>::print_on(outputStream* st, const char* c) const {
if (c != NULL) {
st->print("%16s", c);
} else {
st->print(SIZE_FORMAT_W(16), size());
}
st->print("\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
count(), coal_births(), coal_deaths(), split_births(), split_deaths());
}
template class FreeList<Metablock>;
template class FreeList<Metachunk>;
#ifndef SERIALGC
// Needs to be after the definitions have been seen.
template class FreeList<FreeChunk>;
#endif // SERIALGC
......@@ -40,23 +40,19 @@ class CompactibleFreeListSpace;
// for that implementation.
class Mutex;
template <class Chunk> class TreeList;
template <class Chunk> class PrintTreeCensusClosure;
template <class Chunk>
template <class Chunk_t>
class FreeList VALUE_OBJ_CLASS_SPEC {
friend class CompactibleFreeListSpace;
friend class VMStructs;
friend class PrintTreeCensusClosure<Chunk>;
private:
Chunk* _head; // Head of list of free chunks
Chunk* _tail; // Tail of list of free chunks
Chunk_t* _head; // Head of list of free chunks
Chunk_t* _tail; // Tail of list of free chunks
size_t _size; // Size in Heap words of each chunk
ssize_t _count; // Number of entries in list
size_t _hint; // next larger size list with a positive surplus
AllocationStats _allocation_stats; // allocation-related statistics
protected:
#ifdef ASSERT
Mutex* _protecting_lock;
......@@ -71,10 +67,6 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
#endif
}
// Initialize the allocation statistics.
protected:
void init_statistics(bool split_birth = false);
void set_count(ssize_t v) { _count = v;}
void increment_count() {
_count++;
}
......@@ -89,52 +81,48 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
// Construct a list without any entries.
FreeList();
// Construct a list with "fc" as the first (and lone) entry in the list.
FreeList(Chunk* fc);
FreeList(Chunk_t* fc);
// Reset the head, tail, hint, and count of a free list.
void reset(size_t hint);
// Do initialization
void initialize();
// Reset the head, tail, and count of a free list.
void reset();
// Declare the current free list to be protected by the given lock.
#ifdef ASSERT
void set_protecting_lock(Mutex* protecting_lock) {
_protecting_lock = protecting_lock;
Mutex* protecting_lock() const { return _protecting_lock; }
void set_protecting_lock(Mutex* v) {
_protecting_lock = v;
}
#endif
// Accessors.
Chunk* head() const {
Chunk_t* head() const {
assert_proper_lock_protection();
return _head;
}
void set_head(Chunk* v) {
void set_head(Chunk_t* v) {
assert_proper_lock_protection();
_head = v;
assert(!_head || _head->size() == _size, "bad chunk size");
}
// Set the head of the list and set the prev field of non-null
// values to NULL.
void link_head(Chunk* v) {
assert_proper_lock_protection();
set_head(v);
// If this method is not used (just set the head instead),
// this check can be avoided.
if (v != NULL) {
v->link_prev(NULL);
}
}
void link_head(Chunk_t* v);
Chunk* tail() const {
Chunk_t* tail() const {
assert_proper_lock_protection();
return _tail;
}
void set_tail(Chunk* v) {
void set_tail(Chunk_t* v) {
assert_proper_lock_protection();
_tail = v;
assert(!_tail || _tail->size() == _size, "bad chunk size");
}
// Set the tail of the list and set the next field of non-null
// values to NULL.
void link_tail(Chunk* v) {
void link_tail(Chunk_t* v) {
assert_proper_lock_protection();
set_tail(v);
if (v != NULL) {
......@@ -152,174 +140,45 @@ class FreeList VALUE_OBJ_CLASS_SPEC {
assert_proper_lock_protection();
_size = v;
}
ssize_t count() const {
return _count;
}
size_t hint() const {
return _hint;
}
void set_hint(size_t v) {
assert_proper_lock_protection();
assert(v == 0 || _size < v, "Bad hint"); _hint = v;
}
// Accessors for statistics
AllocationStats* allocation_stats() {
assert_proper_lock_protection();
return &_allocation_stats;
}
ssize_t desired() const {
return _allocation_stats.desired();
}
void set_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current,
float inter_sweep_estimate,
float intra_sweep_estimate) {
assert_proper_lock_protection();
_allocation_stats.compute_desired(_count,
inter_sweep_current,
inter_sweep_estimate,
intra_sweep_estimate);
}
ssize_t coal_desired() const {
return _allocation_stats.coal_desired();
}
void set_coal_desired(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_desired(v);
}
ssize_t surplus() const {
return _allocation_stats.surplus();
}
void set_surplus(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_surplus(v);
}
void increment_surplus() {
assert_proper_lock_protection();
_allocation_stats.increment_surplus();
}
void decrement_surplus() {
assert_proper_lock_protection();
_allocation_stats.decrement_surplus();
}
ssize_t bfr_surp() const {
return _allocation_stats.bfr_surp();
}
void set_bfr_surp(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_bfr_surp(v);
}
ssize_t prev_sweep() const {
return _allocation_stats.prev_sweep();
}
void set_prev_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_prev_sweep(v);
}
ssize_t before_sweep() const {
return _allocation_stats.before_sweep();
}
void set_before_sweep(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_before_sweep(v);
}
ssize_t coal_births() const {
return _allocation_stats.coal_births();
}
void set_coal_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_births(v);
}
void increment_coal_births() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_births();
}
ssize_t coal_deaths() const {
return _allocation_stats.coal_deaths();
}
void set_coal_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_coal_deaths(v);
}
void increment_coal_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_coal_deaths();
}
ssize_t split_births() const {
return _allocation_stats.split_births();
}
void set_split_births(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_births(v);
}
void increment_split_births() {
assert_proper_lock_protection();
_allocation_stats.increment_split_births();
}
ssize_t count() const { return _count; }
void set_count(ssize_t v) { _count = v;}
ssize_t split_deaths() const {
return _allocation_stats.split_deaths();
}
void set_split_deaths(ssize_t v) {
assert_proper_lock_protection();
_allocation_stats.set_split_deaths(v);
}
void increment_split_deaths() {
assert_proper_lock_protection();
_allocation_stats.increment_split_deaths();
}
size_t get_better_size() { return size(); }
NOT_PRODUCT(
// For debugging. The "_returned_bytes" in all the lists are summed
// and compared with the total number of bytes swept during a
// collection.
size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
void increment_returned_bytes_by(size_t v) {
_allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
}
)
size_t returned_bytes() const { ShouldNotReachHere(); return 0; }
void set_returned_bytes(size_t v) {}
void increment_returned_bytes_by(size_t v) {}
// Unlink head of list and return it. Returns NULL if
// the list is empty.
Chunk* get_chunk_at_head();
Chunk_t* get_chunk_at_head();
// Remove the first "n" or "count", whichever is smaller, chunks from the
// list, setting "fl", which is required to be empty, to point to them.
void getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl);
void getFirstNChunksFromList(size_t n, FreeList<Chunk_t>* fl);
// Unlink this chunk from it's free list
void remove_chunk(Chunk* fc);
void remove_chunk(Chunk_t* fc);
// Add this chunk to this free list.
void return_chunk_at_head(Chunk* fc);
void return_chunk_at_tail(Chunk* fc);
void return_chunk_at_head(Chunk_t* fc);
void return_chunk_at_tail(Chunk_t* fc);
// Similar to returnChunk* but also records some diagnostic
// information.
void return_chunk_at_head(Chunk* fc, bool record_return);
void return_chunk_at_tail(Chunk* fc, bool record_return);
void return_chunk_at_head(Chunk_t* fc, bool record_return);
void return_chunk_at_tail(Chunk_t* fc, bool record_return);
// Prepend "fl" (whose size is required to be the same as that of "this")
// to the front of "this" list.
void prepend(FreeList<Chunk>* fl);
void prepend(FreeList<Chunk_t>* fl);
// Verify that the chunk is in the list.
// found. Return NULL if "fc" is not found.
bool verify_chunk_in_free_list(Chunk* fc) const;
bool verify_chunk_in_free_list(Chunk_t* fc) const;
// Stats verification
void verify_stats() const PRODUCT_RETURN;
// void verify_stats() const { ShouldNotReachHere(); };
// Printing support
static void print_labels_on(outputStream* st, const char* c);
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METABLOCK_HPP
#define SHARE_VM_MEMORY_METABLOCK_HPP
// Metablock are the unit of allocation from a Chunk. It is initialized
// with the size of the requested allocation. That size is overwritten
// once the allocation returns.
//
// A Metablock may be reused by its SpaceManager but are never moved between
// SpaceManagers. There is no explicit link to the Metachunk
// from which it was allocated. Metablock may be deallocated and
// put on a freelist but the space is never freed, rather
// the Metachunk it is a part of will be deallocated when it's
// associated class loader is collected.
class Metablock VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
// Used to align the allocation (see below).
union block_t {
void* _data[3];
struct header_t {
size_t _word_size;
Metablock* _next;
Metablock* _prev;
} _header;
} _block;
static size_t _min_block_byte_size;
static size_t _overhead;
typedef union block_t Block;
typedef struct header_t Header;
const Block* block() const { return &_block; }
const Block::header_t* header() const { return &(block()->_header); }
public:
static Metablock* initialize(MetaWord* p, size_t word_size);
// This places the body of the block at a 2 word boundary
// because every block starts on a 2 word boundary. Work out
// how to make the body on a 2 word boundary if the block
// starts on a arbitrary boundary. JJJ
size_t word_size() const { return header()->_word_size; }
void set_word_size(size_t v) { _block._header._word_size = v; }
size_t size() const volatile { return _block._header._word_size; }
void set_size(size_t v) { _block._header._word_size = v; }
Metablock* next() const { return header()->_next; }
void set_next(Metablock* v) { _block._header._next = v; }
Metablock* prev() const { return header()->_prev; }
void set_prev(Metablock* v) { _block._header._prev = v; }
static size_t min_block_byte_size() { return _min_block_byte_size; }
static size_t overhead() { return _overhead; }
bool is_free() { return header()->_word_size != 0; }
void clear_next() { set_next(NULL); }
void link_prev(Metablock* ptr) { set_prev(ptr); }
uintptr_t* end() { return ((uintptr_t*) this) + size(); }
bool cantCoalesce() const { return false; }
void link_next(Metablock* ptr) { set_next(ptr); }
void link_after(Metablock* ptr){
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
// Should not be needed in a free list of Metablocks
void markNotFree() { ShouldNotReachHere(); }
// Debug support
#ifdef ASSERT
void* prev_addr() const { return (void*)&_block._header._prev; }
void* next_addr() const { return (void*)&_block._header._next; }
void* size_addr() const { return (void*)&_block._header._word_size; }
#endif
bool verify_chunk_in_free_list(Metablock* tc) const { return true; }
bool verify_par_locked() { return true; }
void assert_is_mangled() const {/* Don't check "\*/}
};
#endif // SHARE_VM_MEMORY_METABLOCK_HPP
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_METACHUNK_HPP
#define SHARE_VM_MEMORY_METACHUNK_HPP
// Metachunk - Quantum of allocation from a Virtualspace
// Metachunks are reused (when freed are put on a global freelist) and
// have no permanent association to a SpaceManager.
// +--------------+ <- end
// | | --+ ---+
// | | | free |
// | | | |
// | | | | capacity
// | | | |
// | | <- top --+ |
// | | ---+ |
// | | | used |
// | | | |
// | | | |
// +--------------+ <- bottom ---+ ---+
class Metachunk VALUE_OBJ_CLASS_SPEC {
// link to support lists of chunks
Metachunk* _next;
Metachunk* _prev;
MetaWord* _bottom;
MetaWord* _end;
MetaWord* _top;
size_t _word_size;
// Used in a guarantee() so included in the Product builds
// even through it is only for debugging.
bool _is_free;
// Metachunks are allocated out of a MetadataVirtualSpace and
// and use some of its space to describe itself (plus alignment
// considerations). Metadata is allocated in the rest of the chunk.
// This size is the overhead of maintaining the Metachunk within
// the space.
static size_t _overhead;
void set_bottom(MetaWord* v) { _bottom = v; }
void set_end(MetaWord* v) { _end = v; }
void set_top(MetaWord* v) { _top = v; }
void set_word_size(size_t v) { _word_size = v; }
public:
#ifdef ASSERT
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false) {}
#else
Metachunk() : _bottom(NULL), _end(NULL), _top(NULL) {}
#endif
// Used to add a Metachunk to a list of Metachunks
void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
MetaWord* allocate(size_t word_size);
static Metachunk* initialize(MetaWord* ptr, size_t word_size);
// Accessors
Metachunk* next() const { return _next; }
Metachunk* prev() const { return _prev; }
MetaWord* bottom() const { return _bottom; }
MetaWord* end() const { return _end; }
MetaWord* top() const { return _top; }
size_t word_size() const { return _word_size; }
size_t size() const volatile { return _word_size; }
void set_size(size_t v) { _word_size = v; }
bool is_free() { return _is_free; }
void set_is_free(bool v) { _is_free = v; }
static size_t overhead() { return _overhead; }
void clear_next() { set_next(NULL); }
void link_prev(Metachunk* ptr) { set_prev(ptr); }
uintptr_t* end() { return ((uintptr_t*) this) + size(); }
bool cantCoalesce() const { return false; }
void link_next(Metachunk* ptr) { set_next(ptr); }
void link_after(Metachunk* ptr){
link_next(ptr);
if (ptr != NULL) ptr->link_prev(this);
}
// Reset top to bottom so chunk can be reused.
void reset_empty() { _top = (_bottom + _overhead); }
bool is_empty() { return _top == (_bottom + _overhead); }
// used (has been allocated)
// free (available for future allocations)
// capacity (total size of chunk)
size_t used_word_size();
size_t free_word_size();
size_t capacity_word_size();
// Debug support
#ifdef ASSERT
void* prev_addr() const { return (void*)&_prev; }
void* next_addr() const { return (void*)&_next; }
void* size_addr() const { return (void*)&_word_size; }
#endif
bool verify_chunk_in_free_list(Metachunk* tc) const { return true; }
bool verify_par_locked() { return true; }
void assert_is_mangled() const {/* Don't check "\*/}
#ifdef ASSERT
void mangle();
#endif // ASSERT
void print_on(outputStream* st) const;
void verify();
};
#endif // SHARE_VM_MEMORY_METACHUNK_HPP
此差异已折叠。
......@@ -57,12 +57,10 @@
//
class ClassLoaderData;
class Metablock;
class MetaWord;
class Mutex;
class outputStream;
class FreeChunk;
template <class Chunk_t> class FreeList;
template <class Chunk_t> class BinaryTreeDictionary;
class SpaceManager;
// Metaspaces each have a SpaceManager and allocations
......@@ -128,7 +126,7 @@ class Metaspace : public CHeapObj<mtClass> {
size_t capacity_words(MetadataType mdtype) const;
size_t waste_words(MetadataType mdtype) const;
static MetaWord* allocate(ClassLoaderData* loader_data, size_t size,
static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
bool read_only, MetadataType mdtype, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
......
......@@ -59,6 +59,7 @@
#include "memory/generation.hpp"
#include "memory/generationSpec.hpp"
#include "memory/heap.hpp"
#include "memory/metablock.hpp"
#include "memory/space.hpp"
#include "memory/tenuredGeneration.hpp"
#include "memory/universe.hpp"
......@@ -249,6 +250,7 @@ typedef TwoOopHashtable<Klass*, mtClass> KlassTwoOopHashtable;
typedef Hashtable<Klass*, mtClass> KlassHashtable;
typedef HashtableEntry<Klass*, mtClass> KlassHashtableEntry;
typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
//--------------------------------------------------------------------------------
// VM_STRUCTS
......@@ -1237,7 +1239,15 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
nonstatic_field(AccessFlags, _flags, jint) \
nonstatic_field(elapsedTimer, _counter, jlong) \
nonstatic_field(elapsedTimer, _active, bool) \
nonstatic_field(InvocationCounter, _counter, unsigned int)
nonstatic_field(InvocationCounter, _counter, unsigned int) \
volatile_nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(FreeList<FreeChunk>, _size, size_t) \
nonstatic_field(FreeList<Metablock>, _size, size_t) \
nonstatic_field(FreeList<FreeChunk>, _count, ssize_t) \
nonstatic_field(FreeList<Metablock>, _count, ssize_t) \
nonstatic_field(MetablockTreeDictionary, _total_size, size_t)
/* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_STRUCTS_OS_CPU macro (and must */
......@@ -2080,7 +2090,24 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
declare_toplevel_type(Universe) \
declare_toplevel_type(vframeArray) \
declare_toplevel_type(vframeArrayElement) \
declare_toplevel_type(Annotations*)
declare_toplevel_type(Annotations*) \
\
/***************/ \
/* Miscellaneous types */ \
/***************/ \
\
/* freelist */ \
declare_toplevel_type(FreeChunk*) \
declare_toplevel_type(Metablock*) \
declare_toplevel_type(FreeBlockDictionary<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>*) \
declare_toplevel_type(FreeList<FreeChunk>) \
declare_toplevel_type(FreeBlockDictionary<Metablock>*) \
declare_toplevel_type(FreeList<Metablock>*) \
declare_toplevel_type(FreeList<Metablock>) \
declare_toplevel_type(MetablockTreeDictionary*) \
declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>) \
declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
/* NOTE that we do not use the last_entry() macro here; it is used */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册