提交 78c00a06 编写于 作者: T tschatzl

Merge

...@@ -88,7 +88,7 @@ ifeq ($(INCLUDE_ALL_GCS), false) ...@@ -88,7 +88,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \ g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \ g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \ g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \ g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \ ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \ adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \ cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
......
...@@ -341,7 +341,7 @@ Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) { ...@@ -341,7 +341,7 @@ Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) {
Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len, Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
unsigned int hashValue_arg, bool c_heap, TRAPS) { unsigned int hashValue_arg, bool c_heap, TRAPS) {
assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(), assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable"); "proposed name of symbol must be stable");
// Don't allow symbols to be created which cannot fit in a Symbol*. // Don't allow symbols to be created which cannot fit in a Symbol*.
...@@ -685,7 +685,7 @@ oop StringTable::intern(Handle string_or_null, jchar* name, ...@@ -685,7 +685,7 @@ oop StringTable::intern(Handle string_or_null, jchar* name,
if (found_string != NULL) return found_string; if (found_string != NULL) return found_string;
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0]))); debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(), assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable"); "proposed name of symbol must be stable");
Handle string; Handle string;
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#ifndef PRODUCT
void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
guarantee(_base != NULL, "Array not initialized");
guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
}
void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
guarantee(_biased_base != NULL, "Array not initialized");
guarantee(biased_index >= bias() && biased_index < (bias() + length()),
err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
}
void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
guarantee(_biased_base != NULL, "Array not initialized");
guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
}
class TestMappedArray : public G1BiasedMappedArray<int> {
protected:
virtual int default_value() const { return 0xBAADBABE; }
public:
static void test_biasedarray() {
const size_t REGION_SIZE_IN_WORDS = 512;
const size_t NUM_REGIONS = 20;
HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
TestMappedArray array;
array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
REGION_SIZE_IN_WORDS * HeapWordSize);
// Check address calculation (bounds)
assert(array.bottom_address_mapped() == fake_heap,
err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
int* bottom = array.address_mapped_to(fake_heap);
assert((void*)bottom == (void*) array.base(), "must be");
int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
assert((void*)end == (void*)(array.base() + array.length()), "must be");
// The entire array should contain default value elements
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Test setting values in the table
HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
// Set/get by address tests: invert some value; first retrieve one
int actual_value = array.get_by_index(NUM_REGIONS / 2);
array.set_by_index(NUM_REGIONS / 2, ~actual_value);
// Get the same value by address, should correspond to the start of the "region"
int value = array.get_by_address(region_start_address);
assert(value == ~actual_value, "must be");
// Get the same value by address, at one HeapWord before the start
value = array.get_by_address(region_start_address - 1);
assert(value == array.default_value(), "must be");
// Get the same value by address, at the end of the "region"
value = array.get_by_address(region_end_address);
assert(value == ~actual_value, "must be");
// Make sure the next value maps to another index
value = array.get_by_address(region_end_address + 1);
assert(value == array.default_value(), "must be");
// Reset the value in the array
array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
// The entire array should have the default value again
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value(), "must be");
}
// Set/get by index tests: invert some value
idx_t index = NUM_REGIONS / 2;
actual_value = array.get_by_index(index);
array.set_by_index(index, ~actual_value);
value = array.get_by_index(index);
assert(value == ~actual_value, "must be");
value = array.get_by_index(index - 1);
assert(value == array.default_value(), "must be");
value = array.get_by_index(index + 1);
assert(value == array.default_value(), "must be");
array.set_by_index(0, 0);
value = array.get_by_index(0);
assert(value == 0, "must be");
array.set_by_index(array.length() - 1, 0);
value = array.get_by_index(array.length() - 1);
assert(value == 0, "must be");
array.set_by_index(index, 0);
// The array should have three zeros, and default values otherwise
size_t num_zeros = 0;
for (int* current = bottom; current < end; current++) {
assert(*current == array.default_value() || *current == 0, "must be");
if (*current == 0) {
num_zeros++;
}
}
assert(num_zeros == 3, "must be");
}
};
void TestG1BiasedArray_test() {
TestMappedArray::test_biasedarray();
}
#endif
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
#include "utilities/debug.hpp"
#include "memory/allocation.inline.hpp"
// Implements the common base functionality for arrays that contain provisions
// for accessing its elements using a biased index.
// The element type is defined by the instantiating the template.
class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
public:
typedef size_t idx_t;
protected:
address _base; // the real base address
size_t _length; // the length of the array
address _biased_base; // base address biased by "bias" elements
size_t _bias; // the bias, i.e. the offset biased_base is located to the right in elements
uint _shift_by; // the amount of bits to shift right when mapping to an index of the array.
protected:
G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
_bias(0), _shift_by(0) { }
// Allocate a new array, generic version.
static address create_new_base_array(size_t length, size_t elem_size) {
assert(length > 0, "just checking");
assert(elem_size > 0, "just checking");
return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
}
// Initialize the members of this class. The biased start address of this array
// is the bias (in elements) multiplied by the element size.
void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
assert(base != NULL, "just checking");
assert(length > 0, "just checking");
assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
_base = base;
_length = length;
_biased_base = base - (bias * elem_size);
_bias = bias;
_shift_by = shift_by;
}
// Allocate and initialize this array to cover the heap addresses in the range
// of [bottom, end).
void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
assert(mapping_granularity_in_bytes > 0, "just checking");
assert(is_power_of_2(mapping_granularity_in_bytes),
err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
mapping_granularity_in_bytes, bottom));
assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
mapping_granularity_in_bytes, end));
size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
}
size_t bias() const { return _bias; }
uint shift_by() const { return _shift_by; }
void verify_index(idx_t index) const PRODUCT_RETURN;
void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
public:
// Return the length of the array in elements.
size_t length() const { return _length; }
};
// Array that provides biased access and mapping from (valid) addresses in the
// heap into this array.
template<class T>
class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
public:
typedef G1BiasedMappedArrayBase::idx_t idx_t;
T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
// Return the element of the given array at the given index. Assume
// the index is valid. This is a convenience method that does sanity
// checking on the index.
T get_by_index(idx_t index) const {
verify_index(index);
return this->base()[index];
}
// Set the element of the given array at the given index to the
// given value. Assume the index is valid. This is a convenience
// method that does sanity checking on the index.
void set_by_index(idx_t index, T value) {
verify_index(index);
this->base()[index] = value;
}
// The raw biased base pointer.
T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
// Return the element of the given array that covers the given word in the
// heap. Assumes the index is valid.
T get_by_address(HeapWord* value) const {
idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
this->verify_biased_index(biased_index);
return biased_base()[biased_index];
}
// Set the value of the array entry that corresponds to the given array.
void set_by_address(HeapWord * address, T value) {
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
this->verify_biased_index(biased_index);
biased_base()[biased_index] = value;
}
protected:
// Returns the address of the element the given address maps to
T* address_mapped_to(HeapWord* address) {
idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
this->verify_biased_index_inclusive_end(biased_index);
return biased_base() + biased_index;
}
public:
// Return the smallest address (inclusive) in the heap that this array covers.
HeapWord* bottom_address_mapped() const {
return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
}
// Return the highest address (exclusive) in the heap that this array covers.
HeapWord* end_address_mapped() const {
return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
}
protected:
virtual T default_value() const = 0;
// Set all elements of the given array to the given value.
void clear() {
T value = default_value();
for (idx_t i = 0; i < length(); i++) {
set_by_index(i, value);
}
}
public:
G1BiasedMappedArray() {}
// Allocate and initialize this array to cover the heap addresses in the range
// of [bottom, end).
void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
this->clear();
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
...@@ -2069,8 +2069,10 @@ jint G1CollectedHeap::initialize() { ...@@ -2069,8 +2069,10 @@ jint G1CollectedHeap::initialize() {
_g1_storage.initialize(g1_rs, 0); _g1_storage.initialize(g1_rs, 0);
_g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
_hrs.initialize((HeapWord*) _g1_reserved.start(), _hrs.initialize((HeapWord*) _g1_reserved.start(),
(HeapWord*) _g1_reserved.end(), (HeapWord*) _g1_reserved.end());
_expansion_regions); assert(_hrs.max_length() == _expansion_regions,
err_msg("max length: %u expansion regions: %u",
_hrs.max_length(), _expansion_regions));
// Do later initialization work for concurrent refinement. // Do later initialization work for concurrent refinement.
_cg1r->init(); _cg1r->init();
......
...@@ -71,27 +71,16 @@ uint HeapRegionSeq::find_contiguous_from(uint from, uint num) { ...@@ -71,27 +71,16 @@ uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
// Public // Public
void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end, void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
uint max_length) {
assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0, assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned"); "bottom should be heap region aligned");
assert((uintptr_t) end % HeapRegion::GrainBytes == 0, assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned"); "end should be heap region aligned");
_length = 0;
_heap_bottom = bottom;
_heap_end = end;
_region_shift = HeapRegion::LogOfHRGrainBytes;
_next_search_index = 0; _next_search_index = 0;
_allocated_length = 0; _allocated_length = 0;
_max_length = max_length;
_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC); _regions.initialize(bottom, end, HeapRegion::GrainBytes);
memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
_regions_biased = _regions - ((uintx) bottom >> _region_shift);
assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
"bottom should be included in the region with index 0");
} }
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
...@@ -101,15 +90,15 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, ...@@ -101,15 +90,15 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* next_bottom = old_end; HeapWord* next_bottom = old_end;
assert(_heap_bottom <= next_bottom, "invariant"); assert(heap_bottom() <= next_bottom, "invariant");
while (next_bottom < new_end) { while (next_bottom < new_end) {
assert(next_bottom < _heap_end, "invariant"); assert(next_bottom < heap_end(), "invariant");
uint index = length(); uint index = length();
assert(index < _max_length, "otherwise we cannot expand further"); assert(index < max_length(), "otherwise we cannot expand further");
if (index == 0) { if (index == 0) {
// We have not allocated any regions so far // We have not allocated any regions so far
assert(next_bottom == _heap_bottom, "invariant"); assert(next_bottom == heap_bottom(), "invariant");
} else { } else {
// next_bottom should match the end of the last/previous region // next_bottom should match the end of the last/previous region
assert(next_bottom == at(index - 1)->end(), "invariant"); assert(next_bottom == at(index - 1)->end(), "invariant");
...@@ -122,8 +111,8 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, ...@@ -122,8 +111,8 @@ MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
// allocation failed, we bail out and return what we have done so far // allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom); return MemRegion(old_end, next_bottom);
} }
assert(_regions[index] == NULL, "invariant"); assert(_regions.get_by_index(index) == NULL, "invariant");
_regions[index] = new_hr; _regions.set_by_index(index, new_hr);
increment_allocated_length(); increment_allocated_length();
} }
// Have to increment the length first, otherwise we will get an // Have to increment the length first, otherwise we will get an
...@@ -228,26 +217,26 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) { ...@@ -228,26 +217,26 @@ uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
#ifndef PRODUCT #ifndef PRODUCT
void HeapRegionSeq::verify_optional() { void HeapRegionSeq::verify_optional() {
guarantee(_length <= _allocated_length, guarantee(length() <= _allocated_length,
err_msg("invariant: _length: %u _allocated_length: %u", err_msg("invariant: _length: %u _allocated_length: %u",
_length, _allocated_length)); length(), _allocated_length));
guarantee(_allocated_length <= _max_length, guarantee(_allocated_length <= max_length(),
err_msg("invariant: _allocated_length: %u _max_length: %u", err_msg("invariant: _allocated_length: %u _max_length: %u",
_allocated_length, _max_length)); _allocated_length, max_length()));
guarantee(_next_search_index <= _length, guarantee(_next_search_index <= length(),
err_msg("invariant: _next_search_index: %u _length: %u", err_msg("invariant: _next_search_index: %u _length: %u",
_next_search_index, _length)); _next_search_index, length()));
HeapWord* prev_end = _heap_bottom; HeapWord* prev_end = heap_bottom();
for (uint i = 0; i < _allocated_length; i += 1) { for (uint i = 0; i < _allocated_length; i += 1) {
HeapRegion* hr = _regions[i]; HeapRegion* hr = _regions.get_by_index(i);
guarantee(hr != NULL, err_msg("invariant: i: %u", i)); guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end, guarantee(hr->bottom() == prev_end,
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end)); i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i, guarantee(hr->hrs_index() == i,
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index())); err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
if (i < _length) { if (i < length()) {
// Asserts will fire if i is >= _length // Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom(); HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity"); guarantee(addr_to_region(addr) == hr, "sanity");
...@@ -265,8 +254,8 @@ void HeapRegionSeq::verify_optional() { ...@@ -265,8 +254,8 @@ void HeapRegionSeq::verify_optional() {
prev_end = hr->end(); prev_end = hr->end();
} }
} }
for (uint i = _allocated_length; i < _max_length; i += 1) { for (uint i = _allocated_length; i < max_length(); i += 1) {
guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i)); guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
} }
} }
#endif // PRODUCT #endif // PRODUCT
...@@ -25,10 +25,17 @@ ...@@ -25,10 +25,17 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/g1BiasedArray.hpp"
class HeapRegion; class HeapRegion;
class HeapRegionClosure; class HeapRegionClosure;
class FreeRegionList; class FreeRegionList;
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
protected:
virtual HeapRegion* default_value() const { return NULL; }
};
// This class keeps track of the region metadata (i.e., HeapRegion // This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address // instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in // order. A region's index in the array corresponds to its index in
...@@ -44,35 +51,21 @@ class FreeRegionList; ...@@ -44,35 +51,21 @@ class FreeRegionList;
// //
// We keep track of three lengths: // We keep track of three lengths:
// //
// * _length (returned by length()) is the number of currently // * _committed_length (returned by length()) is the number of currently
// committed regions. // committed regions.
// * _allocated_length (not exposed outside this class) is the // * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions. // number of regions for which we have HeapRegions.
// * _max_length (returned by max_length()) is the maximum number of // * max_length() returns the maximum number of regions the heap can have.
// regions the heap can have.
// //
// and maintain that: _length <= _allocated_length <= _max_length // and maintain that: _committed_length <= _allocated_length <= max_length()
class HeapRegionSeq: public CHeapObj<mtGC> { class HeapRegionSeq: public CHeapObj<mtGC> {
friend class VMStructs; friend class VMStructs;
// The array that holds the HeapRegions. G1HeapRegionTable _regions;
HeapRegion** _regions;
// Version of _regions biased to address 0
HeapRegion** _regions_biased;
// The number of regions committed in the heap. // The number of regions committed in the heap.
uint _length; uint _committed_length;
// The address of the first reserved word in the heap.
HeapWord* _heap_bottom;
// The address of the last reserved word in the heap - 1.
HeapWord* _heap_end;
// The log of the region byte size.
uint _region_shift;
// A hint for which index to start searching from for humongous // A hint for which index to start searching from for humongous
// allocations. // allocations.
...@@ -81,37 +74,33 @@ class HeapRegionSeq: public CHeapObj<mtGC> { ...@@ -81,37 +74,33 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// The number of regions for which we have allocated HeapRegions for. // The number of regions for which we have allocated HeapRegions for.
uint _allocated_length; uint _allocated_length;
// The maximum number of regions in the heap.
uint _max_length;
// Find a contiguous set of empty regions of length num, starting // Find a contiguous set of empty regions of length num, starting
// from the given index. // from the given index.
uint find_contiguous_from(uint from, uint num); uint find_contiguous_from(uint from, uint num);
// Map a heap address to a biased region index. Assume that the
// address is valid.
inline uintx addr_to_index_biased(HeapWord* addr) const;
void increment_allocated_length() { void increment_allocated_length() {
assert(_allocated_length < _max_length, "pre-condition"); assert(_allocated_length < max_length(), "pre-condition");
_allocated_length++; _allocated_length++;
} }
void increment_length() { void increment_length() {
assert(_length < _max_length, "pre-condition"); assert(length() < max_length(), "pre-condition");
_length++; _committed_length++;
} }
void decrement_length() { void decrement_length() {
assert(_length > 0, "pre-condition"); assert(length() > 0, "pre-condition");
_length--; _committed_length--;
} }
HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
HeapWord* heap_end() const {return _regions.end_address_mapped(); }
public: public:
// Empty contructor, we'll initialize it with the initialize() method. // Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq() { } HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
void initialize(HeapWord* bottom, HeapWord* end, uint max_length); void initialize(HeapWord* bottom, HeapWord* end);
// Return the HeapRegion at the given index. Assume that the index // Return the HeapRegion at the given index. Assume that the index
// is valid. // is valid.
...@@ -126,10 +115,10 @@ class HeapRegionSeq: public CHeapObj<mtGC> { ...@@ -126,10 +115,10 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const; inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap. // Return the number of regions that have been committed in the heap.
uint length() const { return _length; } uint length() const { return _committed_length; }
// Return the maximum number of regions in the heap. // Return the maximum number of regions in the heap.
uint max_length() const { return _max_length; } uint max_length() const { return (uint)_regions.length(); }
// Expand the sequence to reflect that the heap has grown from // Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use // old_end to new_end. Either create new HeapRegions, or re-use
......
...@@ -28,28 +28,16 @@ ...@@ -28,28 +28,16 @@
#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp" #include "gc_implementation/g1/heapRegionSeq.hpp"
inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end,
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
uintx index = (uintx) addr >> _region_shift;
return index;
}
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const { inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
assert(_heap_bottom <= addr && addr < _heap_end, HeapRegion* hr = _regions.get_by_address(addr);
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
addr, _heap_bottom, _heap_end));
uintx index_biased = addr_to_index_biased(addr);
HeapRegion* hr = _regions_biased[index_biased];
assert(hr != NULL, "invariant"); assert(hr != NULL, "invariant");
return hr; return hr;
} }
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
if (addr != NULL && addr < _heap_end) { if (addr != NULL && addr < heap_end()) {
assert(addr >= _heap_bottom, assert(addr >= heap_bottom(),
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom)); err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
return addr_to_region_unsafe(addr); return addr_to_region_unsafe(addr);
} }
return NULL; return NULL;
...@@ -57,7 +45,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { ...@@ -57,7 +45,7 @@ inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
inline HeapRegion* HeapRegionSeq::at(uint index) const { inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition"); assert(index < length(), "pre-condition");
HeapRegion* hr = _regions[index]; HeapRegion* hr = _regions.get_by_index(index);
assert(hr != NULL, "sanity"); assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity"); assert(hr->hrs_index() == index, "sanity");
return hr; return hr;
......
...@@ -34,8 +34,14 @@ ...@@ -34,8 +34,14 @@
static_field(HeapRegion, GrainBytes, size_t) \ static_field(HeapRegion, GrainBytes, size_t) \
static_field(HeapRegion, LogOfHRGrainBytes, int) \ static_field(HeapRegion, LogOfHRGrainBytes, int) \
\ \
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \ nonstatic_field(G1HeapRegionTable, _base, address) \
nonstatic_field(HeapRegionSeq, _length, uint) \ nonstatic_field(G1HeapRegionTable, _length, size_t) \
nonstatic_field(G1HeapRegionTable, _biased_base, address) \
nonstatic_field(G1HeapRegionTable, _bias, size_t) \
nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
\
nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
nonstatic_field(HeapRegionSeq, _committed_length, uint) \
\ \
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \ nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \ nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
...@@ -58,6 +64,8 @@ ...@@ -58,6 +64,8 @@
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \ #define VM_TYPES_G1(declare_type, declare_toplevel_type) \
\ \
declare_toplevel_type(G1HeapRegionTable) \
\
declare_type(G1CollectedHeap, SharedHeap) \ declare_type(G1CollectedHeap, SharedHeap) \
\ \
declare_type(HeapRegion, ContiguousSpace) \ declare_type(HeapRegion, ContiguousSpace) \
......
...@@ -122,7 +122,7 @@ void GC_locker::jni_unlock(JavaThread* thread) { ...@@ -122,7 +122,7 @@ void GC_locker::jni_unlock(JavaThread* thread) {
// strictly needed. It's added here to make it clear that // strictly needed. It's added here to make it clear that
// the GC will NOT be performed if any other caller // the GC will NOT be performed if any other caller
// of GC_locker::lock() still needs GC locked. // of GC_locker::lock() still needs GC locked.
if (!is_active()) { if (!is_active_internal()) {
_doing_gc = true; _doing_gc = true;
{ {
// Must give up the lock while at a safepoint // Must give up the lock while at a safepoint
......
...@@ -88,7 +88,7 @@ class GC_locker: public AllStatic { ...@@ -88,7 +88,7 @@ class GC_locker: public AllStatic {
public: public:
// Accessors // Accessors
static bool is_active() { static bool is_active() {
assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
return is_active_internal(); return is_active_internal();
} }
static bool needs_gc() { return _needs_gc; } static bool needs_gc() { return _needs_gc; }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.hpp"
#include "memory/allocation.hpp"
#include "memory/binaryTreeDictionary.hpp" #include "memory/binaryTreeDictionary.hpp"
#include "memory/freeList.hpp" #include "memory/freeList.hpp"
#include "memory/collectorPolicy.hpp" #include "memory/collectorPolicy.hpp"
...@@ -111,7 +112,7 @@ typedef class FreeList<Metachunk> ChunkList; ...@@ -111,7 +112,7 @@ typedef class FreeList<Metachunk> ChunkList;
// Has three lists of free chunks, and a total size and // Has three lists of free chunks, and a total size and
// count that includes all three // count that includes all three
class ChunkManager VALUE_OBJ_CLASS_SPEC { class ChunkManager : public CHeapObj<mtInternal> {
// Free list of chunks of different sizes. // Free list of chunks of different sizes.
// SpecializedChunk // SpecializedChunk
...@@ -158,7 +159,12 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { ...@@ -158,7 +159,12 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
public: public:
ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {} ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
: _free_chunks_total(0), _free_chunks_count(0) {
_free_chunks[SpecializedIndex].set_size(specialized_size);
_free_chunks[SmallIndex].set_size(small_size);
_free_chunks[MediumIndex].set_size(medium_size);
}
// add or delete (return) a chunk to the global freelist. // add or delete (return) a chunk to the global freelist.
Metachunk* chunk_freelist_allocate(size_t word_size); Metachunk* chunk_freelist_allocate(size_t word_size);
...@@ -219,7 +225,7 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { ...@@ -219,7 +225,7 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC {
void locked_print_free_chunks(outputStream* st); void locked_print_free_chunks(outputStream* st);
void locked_print_sum_free_chunks(outputStream* st); void locked_print_sum_free_chunks(outputStream* st);
void print_on(outputStream* st); void print_on(outputStream* st) const;
}; };
// Used to manage the free list of Metablocks (a block corresponds // Used to manage the free list of Metablocks (a block corresponds
...@@ -276,11 +282,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> { ...@@ -276,11 +282,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// VirtualSpace // VirtualSpace
Metachunk* first_chunk() { return (Metachunk*) bottom(); } Metachunk* first_chunk() { return (Metachunk*) bottom(); }
void inc_container_count();
#ifdef ASSERT
uint container_count_slow();
#endif
public: public:
VirtualSpaceNode(size_t byte_size); VirtualSpaceNode(size_t byte_size);
...@@ -314,8 +315,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> { ...@@ -314,8 +315,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
void inc_top(size_t word_size) { _top += word_size; } void inc_top(size_t word_size) { _top += word_size; }
uintx container_count() { return _container_count; } uintx container_count() { return _container_count; }
void inc_container_count();
void dec_container_count(); void dec_container_count();
#ifdef ASSERT #ifdef ASSERT
uint container_count_slow();
void verify_container_count(); void verify_container_count();
#endif #endif
...@@ -421,8 +424,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -421,8 +424,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
VirtualSpaceNode* _virtual_space_list; VirtualSpaceNode* _virtual_space_list;
// virtual space currently being used for allocations // virtual space currently being used for allocations
VirtualSpaceNode* _current_virtual_space; VirtualSpaceNode* _current_virtual_space;
// Free chunk list for all other metadata
ChunkManager _chunk_manager;
// Can this virtual list allocate >1 spaces? Also, used to determine // Can this virtual list allocate >1 spaces? Also, used to determine
// whether to allocate unlimited small chunks in this virtual space // whether to allocate unlimited small chunks in this virtual space
...@@ -475,7 +476,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -475,7 +476,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
return _current_virtual_space; return _current_virtual_space;
} }
ChunkManager* chunk_manager() { return &_chunk_manager; }
bool is_class() const { return _is_class; } bool is_class() const { return _is_class; }
// Allocate the first virtualspace. // Allocate the first virtualspace.
...@@ -494,14 +494,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -494,14 +494,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
void dec_virtual_space_count(); void dec_virtual_space_count();
// Unlink empty VirtualSpaceNodes and free it. // Unlink empty VirtualSpaceNodes and free it.
void purge(); void purge(ChunkManager* chunk_manager);
// Used and capacity in the entire list of virtual spaces.
// These are global values shared by all Metaspaces
size_t capacity_words_sum();
size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
size_t used_words_sum();
size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
bool contains(const void *ptr); bool contains(const void *ptr);
...@@ -582,18 +575,12 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -582,18 +575,12 @@ class SpaceManager : public CHeapObj<mtClass> {
// Type of metadata allocated. // Type of metadata allocated.
Metaspace::MetadataType _mdtype; Metaspace::MetadataType _mdtype;
// Chunk related size
size_t _medium_chunk_bunch;
// List of chunks in use by this SpaceManager. Allocations // List of chunks in use by this SpaceManager. Allocations
// are done from the current chunk. The list is used for deallocating // are done from the current chunk. The list is used for deallocating
// chunks when the SpaceManager is freed. // chunks when the SpaceManager is freed.
Metachunk* _chunks_in_use[NumberOfInUseLists]; Metachunk* _chunks_in_use[NumberOfInUseLists];
Metachunk* _current_chunk; Metachunk* _current_chunk;
// Virtual space where allocation comes from.
VirtualSpaceList* _vs_list;
// Number of small chunks to allocate to a manager // Number of small chunks to allocate to a manager
// If class space manager, small chunks are unlimited // If class space manager, small chunks are unlimited
static uint const _small_chunk_limit; static uint const _small_chunk_limit;
...@@ -626,7 +613,9 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -626,7 +613,9 @@ class SpaceManager : public CHeapObj<mtClass> {
} }
Metaspace::MetadataType mdtype() { return _mdtype; } Metaspace::MetadataType mdtype() { return _mdtype; }
VirtualSpaceList* vs_list() const { return _vs_list; }
VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
Metachunk* current_chunk() const { return _current_chunk; } Metachunk* current_chunk() const { return _current_chunk; }
void set_current_chunk(Metachunk* v) { void set_current_chunk(Metachunk* v) {
...@@ -648,18 +637,19 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -648,18 +637,19 @@ class SpaceManager : public CHeapObj<mtClass> {
public: public:
SpaceManager(Metaspace::MetadataType mdtype, SpaceManager(Metaspace::MetadataType mdtype,
Mutex* lock, Mutex* lock);
VirtualSpaceList* vs_list);
~SpaceManager(); ~SpaceManager();
enum ChunkMultiples { enum ChunkMultiples {
MediumChunkMultiple = 4 MediumChunkMultiple = 4
}; };
bool is_class() { return _mdtype == Metaspace::ClassType; }
// Accessors // Accessors
size_t specialized_chunk_size() { return SpecializedChunk; } size_t specialized_chunk_size() { return SpecializedChunk; }
size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; } size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; } size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
size_t allocated_blocks_words() const { return _allocated_blocks_words; } size_t allocated_blocks_words() const { return _allocated_blocks_words; }
...@@ -762,7 +752,7 @@ void VirtualSpaceNode::inc_container_count() { ...@@ -762,7 +752,7 @@ void VirtualSpaceNode::inc_container_count() {
_container_count++; _container_count++;
assert(_container_count == container_count_slow(), assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
"container_count_slow() " SIZE_FORMAT, " container_count_slow() " SIZE_FORMAT,
_container_count, container_count_slow())); _container_count, container_count_slow()));
} }
...@@ -775,7 +765,7 @@ void VirtualSpaceNode::dec_container_count() { ...@@ -775,7 +765,7 @@ void VirtualSpaceNode::dec_container_count() {
void VirtualSpaceNode::verify_container_count() { void VirtualSpaceNode::verify_container_count() {
assert(_container_count == container_count_slow(), assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
"container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
} }
#endif #endif
...@@ -1020,7 +1010,7 @@ void ChunkManager::remove_chunk(Metachunk* chunk) { ...@@ -1020,7 +1010,7 @@ void ChunkManager::remove_chunk(Metachunk* chunk) {
// Walk the list of VirtualSpaceNodes and delete // Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count. Remove Metachunks in // nodes with a 0 container_count. Remove Metachunks in
// the node from their respective freelists. // the node from their respective freelists.
void VirtualSpaceList::purge() { void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
assert_lock_strong(SpaceManager::expand_lock()); assert_lock_strong(SpaceManager::expand_lock());
// Don't use a VirtualSpaceListIterator because this // Don't use a VirtualSpaceListIterator because this
// list is being changed and a straightforward use of an iterator is not safe. // list is being changed and a straightforward use of an iterator is not safe.
...@@ -1042,7 +1032,7 @@ void VirtualSpaceList::purge() { ...@@ -1042,7 +1032,7 @@ void VirtualSpaceList::purge() {
prev_vsl->set_next(vsl->next()); prev_vsl->set_next(vsl->next());
} }
vsl->purge(chunk_manager()); vsl->purge(chunk_manager);
dec_reserved_words(vsl->reserved_words()); dec_reserved_words(vsl->reserved_words());
dec_committed_words(vsl->committed_words()); dec_committed_words(vsl->committed_words());
dec_virtual_space_count(); dec_virtual_space_count();
...@@ -1064,36 +1054,6 @@ void VirtualSpaceList::purge() { ...@@ -1064,36 +1054,6 @@ void VirtualSpaceList::purge() {
#endif #endif
} }
size_t VirtualSpaceList::used_words_sum() {
size_t allocated_by_vs = 0;
VirtualSpaceListIterator iter(virtual_space_list());
while (iter.repeat()) {
VirtualSpaceNode* vsl = iter.get_next();
// Sum used region [bottom, top) in each virtualspace
allocated_by_vs += vsl->used_words_in_vs();
}
assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
err_msg("Total in free chunks " SIZE_FORMAT
" greater than total from virtual_spaces " SIZE_FORMAT,
allocated_by_vs, chunk_manager()->free_chunks_total_words()));
size_t used =
allocated_by_vs - chunk_manager()->free_chunks_total_words();
return used;
}
// Space available in all MetadataVirtualspaces allocated
// for metadata. This is the upper limit on the capacity
// of chunks allocated out of all the MetadataVirtualspaces.
size_t VirtualSpaceList::capacity_words_sum() {
size_t capacity = 0;
VirtualSpaceListIterator iter(virtual_space_list());
while (iter.repeat()) {
VirtualSpaceNode* vsl = iter.get_next();
capacity += vsl->capacity_words_in_vs();
}
return capacity;
}
VirtualSpaceList::VirtualSpaceList(size_t word_size ) : VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
_is_class(false), _is_class(false),
_virtual_space_list(NULL), _virtual_space_list(NULL),
...@@ -1104,10 +1064,6 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size ) : ...@@ -1104,10 +1064,6 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
MutexLockerEx cl(SpaceManager::expand_lock(), MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
bool initialization_succeeded = grow_vs(word_size); bool initialization_succeeded = grow_vs(word_size);
_chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
_chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
_chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
assert(initialization_succeeded, assert(initialization_succeeded,
" VirtualSpaceList initialization should not fail"); " VirtualSpaceList initialization should not fail");
} }
...@@ -1123,9 +1079,6 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : ...@@ -1123,9 +1079,6 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
bool succeeded = class_entry->initialize(); bool succeeded = class_entry->initialize();
_chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
_chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
_chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
assert(succeeded, " VirtualSpaceList initialization should not fail"); assert(succeeded, " VirtualSpaceList initialization should not fail");
link_vs(class_entry); link_vs(class_entry);
} }
...@@ -1142,7 +1095,7 @@ bool VirtualSpaceList::grow_vs(size_t vs_word_size) { ...@@ -1142,7 +1095,7 @@ bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
} }
// Reserve the space // Reserve the space
size_t vs_byte_size = vs_word_size * BytesPerWord; size_t vs_byte_size = vs_word_size * BytesPerWord;
assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned"); assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
// Allocate the meta virtual space and initialize it. // Allocate the meta virtual space and initialize it.
VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
...@@ -1195,15 +1148,8 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, ...@@ -1195,15 +1148,8 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words, size_t grow_chunks_by_words,
size_t medium_chunk_bunch) { size_t medium_chunk_bunch) {
// Get a chunk from the chunk freelist // Allocate a chunk out of the current virtual space.
Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
if (next != NULL) {
next->container()->inc_container_count();
} else {
// Allocate a chunk out of the current virtual space.
next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
}
if (next == NULL) { if (next == NULL) {
// Not enough room in current virtual space. Try to commit // Not enough room in current virtual space. Try to commit
...@@ -1221,12 +1167,14 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, ...@@ -1221,12 +1167,14 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
// being used for CompressedHeaders, don't allocate a new virtualspace. // being used for CompressedHeaders, don't allocate a new virtualspace.
if (can_grow() && MetaspaceGC::should_expand(this, word_size)) { if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
// Get another virtual space. // Get another virtual space.
size_t grow_vs_words = size_t allocation_aligned_expand_words =
MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words); align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
size_t grow_vs_words =
MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
if (grow_vs(grow_vs_words)) { if (grow_vs(grow_vs_words)) {
// Got it. It's on the list now. Get a chunk from it. // Got it. It's on the list now. Get a chunk from it.
assert(current_virtual_space()->expanded_words() == 0, assert(current_virtual_space()->expanded_words() == 0,
"New virtuals space nodes should not have expanded"); "New virtual space nodes should not have expanded");
size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words, size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
page_size_words); page_size_words);
...@@ -1342,8 +1290,9 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { ...@@ -1342,8 +1290,9 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
// reserved space, because this is a larger space prereserved for compressed // reserved space, because this is a larger space prereserved for compressed
// class pointers. // class pointers.
if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
size_t real_allocated = Metaspace::space_list()->reserved_words() + size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
size_t real_allocated = nonclass_allocated + class_allocated;
if (real_allocated >= MaxMetaspaceSize) { if (real_allocated >= MaxMetaspaceSize) {
return false; return false;
} }
...@@ -1536,15 +1485,15 @@ void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm, ...@@ -1536,15 +1485,15 @@ void Metadebug::deallocate_chunk_a_lot(SpaceManager* sm,
if (dummy_chunk == NULL) { if (dummy_chunk == NULL) {
break; break;
} }
vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk); sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
if (TraceMetadataChunkAllocation && Verbose) { if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ", gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
sm->sum_count_in_chunks_in_use()); sm->sum_count_in_chunks_in_use());
dummy_chunk->print_on(gclog_or_tty); dummy_chunk->print_on(gclog_or_tty);
gclog_or_tty->print_cr(" Free chunks total %d count %d", gclog_or_tty->print_cr(" Free chunks total %d count %d",
vsl->chunk_manager()->free_chunks_total_words(), sm->chunk_manager()->free_chunks_total_words(),
vsl->chunk_manager()->free_chunks_count()); sm->chunk_manager()->free_chunks_count());
} }
} }
} else { } else {
...@@ -1796,6 +1745,8 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { ...@@ -1796,6 +1745,8 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
// work. // work.
chunk->set_is_free(false); chunk->set_is_free(false);
#endif #endif
chunk->container()->inc_container_count();
slow_locked_verify(); slow_locked_verify();
return chunk; return chunk;
} }
...@@ -1830,9 +1781,9 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { ...@@ -1830,9 +1781,9 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) {
return chunk; return chunk;
} }
void ChunkManager::print_on(outputStream* out) { void ChunkManager::print_on(outputStream* out) const {
if (PrintFLSStatistics != 0) { if (PrintFLSStatistics != 0) {
humongous_dictionary()->report_statistics(); const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
} }
} }
...@@ -1979,8 +1930,8 @@ void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { ...@@ -1979,8 +1930,8 @@ void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const {
} }
} }
vs_list()->chunk_manager()->locked_print_free_chunks(st); chunk_manager()->locked_print_free_chunks(st);
vs_list()->chunk_manager()->locked_print_sum_free_chunks(st); chunk_manager()->locked_print_sum_free_chunks(st);
} }
size_t SpaceManager::calc_chunk_size(size_t word_size) { size_t SpaceManager::calc_chunk_size(size_t word_size) {
...@@ -2084,9 +2035,7 @@ void SpaceManager::print_on(outputStream* st) const { ...@@ -2084,9 +2035,7 @@ void SpaceManager::print_on(outputStream* st) const {
} }
SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
Mutex* lock, Mutex* lock) :
VirtualSpaceList* vs_list) :
_vs_list(vs_list),
_mdtype(mdtype), _mdtype(mdtype),
_allocated_blocks_words(0), _allocated_blocks_words(0),
_allocated_chunks_words(0), _allocated_chunks_words(0),
...@@ -2172,9 +2121,7 @@ SpaceManager::~SpaceManager() { ...@@ -2172,9 +2121,7 @@ SpaceManager::~SpaceManager() {
MutexLockerEx fcl(SpaceManager::expand_lock(), MutexLockerEx fcl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
ChunkManager* chunk_manager = vs_list()->chunk_manager(); chunk_manager()->slow_locked_verify();
chunk_manager->slow_locked_verify();
dec_total_from_size_metrics(); dec_total_from_size_metrics();
...@@ -2188,8 +2135,8 @@ SpaceManager::~SpaceManager() { ...@@ -2188,8 +2135,8 @@ SpaceManager::~SpaceManager() {
// Have to update before the chunks_in_use lists are emptied // Have to update before the chunks_in_use lists are emptied
// below. // below.
chunk_manager->inc_free_chunks_total(allocated_chunks_words(), chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
sum_count_in_chunks_in_use()); sum_count_in_chunks_in_use());
// Add all the chunks in use by this space manager // Add all the chunks in use by this space manager
// to the global list of free chunks. // to the global list of free chunks.
...@@ -2204,11 +2151,11 @@ SpaceManager::~SpaceManager() { ...@@ -2204,11 +2151,11 @@ SpaceManager::~SpaceManager() {
chunk_size_name(i)); chunk_size_name(i));
} }
Metachunk* chunks = chunks_in_use(i); Metachunk* chunks = chunks_in_use(i);
chunk_manager->return_chunks(i, chunks); chunk_manager()->return_chunks(i, chunks);
set_chunks_in_use(i, NULL); set_chunks_in_use(i, NULL);
if (TraceMetadataChunkAllocation && Verbose) { if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("updated freelist count %d %s", gclog_or_tty->print_cr("updated freelist count %d %s",
chunk_manager->free_chunks(i)->count(), chunk_manager()->free_chunks(i)->count(),
chunk_size_name(i)); chunk_size_name(i));
} }
assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
...@@ -2245,16 +2192,16 @@ SpaceManager::~SpaceManager() { ...@@ -2245,16 +2192,16 @@ SpaceManager::~SpaceManager() {
humongous_chunks->word_size(), HumongousChunkGranularity)); humongous_chunks->word_size(), HumongousChunkGranularity));
Metachunk* next_humongous_chunks = humongous_chunks->next(); Metachunk* next_humongous_chunks = humongous_chunks->next();
humongous_chunks->container()->dec_container_count(); humongous_chunks->container()->dec_container_count();
chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks); chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
humongous_chunks = next_humongous_chunks; humongous_chunks = next_humongous_chunks;
} }
if (TraceMetadataChunkAllocation && Verbose) { if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("updated dictionary count %d %s", gclog_or_tty->print_cr("updated dictionary count %d %s",
chunk_manager->humongous_dictionary()->total_count(), chunk_manager()->humongous_dictionary()->total_count(),
chunk_size_name(HumongousIndex)); chunk_size_name(HumongousIndex));
} }
chunk_manager->slow_locked_verify(); chunk_manager()->slow_locked_verify();
} }
const char* SpaceManager::chunk_size_name(ChunkIndex index) const { const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
...@@ -2343,9 +2290,7 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { ...@@ -2343,9 +2290,7 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
gclog_or_tty->print("SpaceManager::add_chunk: %d) ", gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
sum_count_in_chunks_in_use()); sum_count_in_chunks_in_use());
new_chunk->print_on(gclog_or_tty); new_chunk->print_on(gclog_or_tty);
if (vs_list() != NULL) { chunk_manager()->locked_print_free_chunks(gclog_or_tty);
vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
}
} }
} }
...@@ -2361,10 +2306,14 @@ void SpaceManager::retire_current_chunk() { ...@@ -2361,10 +2306,14 @@ void SpaceManager::retire_current_chunk() {
Metachunk* SpaceManager::get_new_chunk(size_t word_size, Metachunk* SpaceManager::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words) { size_t grow_chunks_by_words) {
// Get a chunk from the chunk freelist
Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
Metachunk* next = vs_list()->get_new_chunk(word_size, if (next == NULL) {
grow_chunks_by_words, next = vs_list()->get_new_chunk(word_size,
medium_chunk_bunch()); grow_chunks_by_words,
medium_chunk_bunch());
}
if (TraceMetadataHumongousAllocation && next != NULL && if (TraceMetadataHumongousAllocation && next != NULL &&
SpaceManager::is_humongous(next->word_size())) { SpaceManager::is_humongous(next->word_size())) {
...@@ -2644,13 +2593,12 @@ size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { ...@@ -2644,13 +2593,12 @@ size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype); ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
if (list == NULL) { if (chunk_manager == NULL) {
return 0; return 0;
} }
ChunkManager* chunk = list->chunk_manager(); chunk_manager->slow_verify();
chunk->slow_verify(); return chunk_manager->free_chunks_total_words();
return chunk->free_chunks_total_words();
} }
size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
...@@ -2801,9 +2749,9 @@ void MetaspaceAux::dump(outputStream* out) { ...@@ -2801,9 +2749,9 @@ void MetaspaceAux::dump(outputStream* out) {
} }
void MetaspaceAux::verify_free_chunks() { void MetaspaceAux::verify_free_chunks() {
Metaspace::space_list()->chunk_manager()->verify(); Metaspace::chunk_manager_metadata()->verify();
if (Metaspace::using_class_space()) { if (Metaspace::using_class_space()) {
Metaspace::class_space_list()->chunk_manager()->verify(); Metaspace::chunk_manager_class()->verify();
} }
} }
...@@ -2874,6 +2822,9 @@ Metaspace::~Metaspace() { ...@@ -2874,6 +2822,9 @@ Metaspace::~Metaspace() {
VirtualSpaceList* Metaspace::_space_list = NULL; VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL; VirtualSpaceList* Metaspace::_class_space_list = NULL;
ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
ChunkManager* Metaspace::_chunk_manager_class = NULL;
#define VIRTUALSPACEMULTIPLIER 2 #define VIRTUALSPACEMULTIPLIER 2
#ifdef _LP64 #ifdef _LP64
...@@ -2981,6 +2932,7 @@ void Metaspace::initialize_class_space(ReservedSpace rs) { ...@@ -2981,6 +2932,7 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize)); err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
assert(using_class_space(), "Must be using class space"); assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs); _class_space_list = new VirtualSpaceList(rs);
_chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
} }
#endif #endif
...@@ -3006,6 +2958,7 @@ void Metaspace::global_initialize() { ...@@ -3006,6 +2958,7 @@ void Metaspace::global_initialize() {
// remainder is the misc code and data chunks. // remainder is the misc code and data chunks.
cds_total = FileMapInfo::shared_spaces_size(); cds_total = FileMapInfo::shared_spaces_size();
_space_list = new VirtualSpaceList(cds_total/wordSize); _space_list = new VirtualSpaceList(cds_total/wordSize);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
#ifdef _LP64 #ifdef _LP64
// Set the compressed klass pointer base so that decoding of these pointers works // Set the compressed klass pointer base so that decoding of these pointers works
...@@ -3073,15 +3026,30 @@ void Metaspace::global_initialize() { ...@@ -3073,15 +3026,30 @@ void Metaspace::global_initialize() {
size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
// Initialize the list of virtual spaces. // Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size); _space_list = new VirtualSpaceList(word_size);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
} }
} }
Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
size_t chunk_word_size,
size_t chunk_bunch) {
// Get a chunk from the chunk freelist
Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
if (chunk != NULL) {
return chunk;
}
return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
}
void Metaspace::initialize(Mutex* lock, MetaspaceType type) { void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
assert(space_list() != NULL, assert(space_list() != NULL,
"Metadata VirtualSpaceList has not been initialized"); "Metadata VirtualSpaceList has not been initialized");
assert(chunk_manager_metadata() != NULL,
"Metadata ChunkManager has not been initialized");
_vsm = new SpaceManager(NonClassType, lock, space_list()); _vsm = new SpaceManager(NonClassType, lock);
if (_vsm == NULL) { if (_vsm == NULL) {
return; return;
} }
...@@ -3090,11 +3058,13 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) { ...@@ -3090,11 +3058,13 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
if (using_class_space()) { if (using_class_space()) {
assert(class_space_list() != NULL, assert(class_space_list() != NULL,
"Class VirtualSpaceList has not been initialized"); "Class VirtualSpaceList has not been initialized");
assert(chunk_manager_class() != NULL,
"Class ChunkManager has not been initialized");
// Allocate SpaceManager for classes. // Allocate SpaceManager for classes.
_class_vsm = new SpaceManager(ClassType, lock, class_space_list()); _class_vsm = new SpaceManager(ClassType, lock);
if (_class_vsm == NULL) { if (_class_vsm == NULL) {
return; return;
} }
...@@ -3103,9 +3073,9 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) { ...@@ -3103,9 +3073,9 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
// Allocate chunk for metadata objects // Allocate chunk for metadata objects
Metachunk* new_chunk = Metachunk* new_chunk = get_initialization_chunk(NonClassType,
space_list()->get_initialization_chunk(word_size, word_size,
vsm()->medium_chunk_bunch()); vsm()->medium_chunk_bunch());
assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
if (new_chunk != NULL) { if (new_chunk != NULL) {
// Add to this manager's list of chunks in use and current_chunk(). // Add to this manager's list of chunks in use and current_chunk().
...@@ -3114,9 +3084,9 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) { ...@@ -3114,9 +3084,9 @@ void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
// Allocate chunk for class metadata objects // Allocate chunk for class metadata objects
if (using_class_space()) { if (using_class_space()) {
Metachunk* class_chunk = Metachunk* class_chunk = get_initialization_chunk(ClassType,
class_space_list()->get_initialization_chunk(class_word_size, class_word_size,
class_vsm()->medium_chunk_bunch()); class_vsm()->medium_chunk_bunch());
if (class_chunk != NULL) { if (class_chunk != NULL) {
class_vsm()->add_chunk(class_chunk, true); class_vsm()->add_chunk(class_chunk, true);
} }
...@@ -3333,12 +3303,16 @@ void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { ...@@ -3333,12 +3303,16 @@ void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
} }
} }
void Metaspace::purge(MetadataType mdtype) {
get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
}
void Metaspace::purge() { void Metaspace::purge() {
MutexLockerEx cl(SpaceManager::expand_lock(), MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
space_list()->purge(); purge(NonClassType);
if (using_class_space()) { if (using_class_space()) {
class_space_list()->purge(); purge(ClassType);
} }
} }
...@@ -3385,7 +3359,7 @@ void Metaspace::dump(outputStream* const out) const { ...@@ -3385,7 +3359,7 @@ void Metaspace::dump(outputStream* const out) const {
#ifndef PRODUCT #ifndef PRODUCT
class MetaspaceAuxTest : AllStatic { class TestMetaspaceAuxTest : AllStatic {
public: public:
static void test_reserved() { static void test_reserved() {
size_t reserved = MetaspaceAux::reserved_bytes(); size_t reserved = MetaspaceAux::reserved_bytes();
...@@ -3425,14 +3399,25 @@ class MetaspaceAuxTest : AllStatic { ...@@ -3425,14 +3399,25 @@ class MetaspaceAuxTest : AllStatic {
} }
} }
static void test_virtual_space_list_large_chunk() {
VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
// A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
// vm_allocation_granularity aligned on Windows.
size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
large_size += (os::vm_page_size()/BytesPerWord);
vs_list->get_new_chunk(large_size, large_size, 0);
}
static void test() { static void test() {
test_reserved(); test_reserved();
test_committed(); test_committed();
test_virtual_space_list_large_chunk();
} }
}; };
void MetaspaceAux_test() { void TestMetaspaceAux_test() {
MetaspaceAuxTest::test(); TestMetaspaceAuxTest::test();
} }
#endif #endif
...@@ -56,12 +56,15 @@ ...@@ -56,12 +56,15 @@
// +-------------------+ // +-------------------+
// //
class ChunkManager;
class ClassLoaderData; class ClassLoaderData;
class Metablock; class Metablock;
class Metachunk;
class MetaWord; class MetaWord;
class Mutex; class Mutex;
class outputStream; class outputStream;
class SpaceManager; class SpaceManager;
class VirtualSpaceList;
// Metaspaces each have a SpaceManager and allocations // Metaspaces each have a SpaceManager and allocations
// are done by the SpaceManager. Allocations are done // are done by the SpaceManager. Allocations are done
...@@ -76,8 +79,6 @@ class SpaceManager; ...@@ -76,8 +79,6 @@ class SpaceManager;
// allocate() method returns a block for use as a // allocate() method returns a block for use as a
// quantum of metadata. // quantum of metadata.
class VirtualSpaceList;
class Metaspace : public CHeapObj<mtClass> { class Metaspace : public CHeapObj<mtClass> {
friend class VMStructs; friend class VMStructs;
friend class SpaceManager; friend class SpaceManager;
...@@ -102,6 +103,10 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -102,6 +103,10 @@ class Metaspace : public CHeapObj<mtClass> {
private: private:
void initialize(Mutex* lock, MetaspaceType type); void initialize(Mutex* lock, MetaspaceType type);
Metachunk* get_initialization_chunk(MetadataType mdtype,
size_t chunk_word_size,
size_t chunk_bunch);
// Align up the word size to the allocation word size // Align up the word size to the allocation word size
static size_t align_word_size_up(size_t); static size_t align_word_size_up(size_t);
...@@ -134,6 +139,10 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -134,6 +139,10 @@ class Metaspace : public CHeapObj<mtClass> {
static VirtualSpaceList* _space_list; static VirtualSpaceList* _space_list;
static VirtualSpaceList* _class_space_list; static VirtualSpaceList* _class_space_list;
static ChunkManager* _chunk_manager_metadata;
static ChunkManager* _chunk_manager_class;
public:
static VirtualSpaceList* space_list() { return _space_list; } static VirtualSpaceList* space_list() { return _space_list; }
static VirtualSpaceList* class_space_list() { return _class_space_list; } static VirtualSpaceList* class_space_list() { return _class_space_list; }
static VirtualSpaceList* get_space_list(MetadataType mdtype) { static VirtualSpaceList* get_space_list(MetadataType mdtype) {
...@@ -141,6 +150,14 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -141,6 +150,14 @@ class Metaspace : public CHeapObj<mtClass> {
return mdtype == ClassType ? class_space_list() : space_list(); return mdtype == ClassType ? class_space_list() : space_list();
} }
static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
static ChunkManager* chunk_manager_class() { return _chunk_manager_class; }
static ChunkManager* get_chunk_manager(MetadataType mdtype) {
assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
}
private:
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now. // maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size); void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
...@@ -199,6 +216,7 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -199,6 +216,7 @@ class Metaspace : public CHeapObj<mtClass> {
void dump(outputStream* const out) const; void dump(outputStream* const out) const;
// Free empty virtualspaces // Free empty virtualspaces
static void purge(MetadataType mdtype);
static void purge(); static void purge();
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
......
...@@ -5046,7 +5046,10 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) { ...@@ -5046,7 +5046,10 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
void TestReservedSpace_test(); void TestReservedSpace_test();
void TestReserveMemorySpecial_test(); void TestReserveMemorySpecial_test();
void TestVirtualSpace_test(); void TestVirtualSpace_test();
void MetaspaceAux_test(); void TestMetaspaceAux_test();
#if INCLUDE_ALL_GCS
void TestG1BiasedArray_test();
#endif
void execute_internal_vm_tests() { void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) { if (ExecuteInternalVMTests) {
...@@ -5054,7 +5057,7 @@ void execute_internal_vm_tests() { ...@@ -5054,7 +5057,7 @@ void execute_internal_vm_tests() {
run_unit_test(TestReservedSpace_test()); run_unit_test(TestReservedSpace_test());
run_unit_test(TestReserveMemorySpecial_test()); run_unit_test(TestReserveMemorySpecial_test());
run_unit_test(TestVirtualSpace_test()); run_unit_test(TestVirtualSpace_test());
run_unit_test(MetaspaceAux_test()); run_unit_test(TestMetaspaceAux_test());
run_unit_test(GlobalDefinitions::test_globals()); run_unit_test(GlobalDefinitions::test_globals());
run_unit_test(GCTimerAllTest::all()); run_unit_test(GCTimerAllTest::all());
run_unit_test(arrayOopDesc::test_max_array_length()); run_unit_test(arrayOopDesc::test_max_array_length());
...@@ -5066,6 +5069,7 @@ void execute_internal_vm_tests() { ...@@ -5066,6 +5069,7 @@ void execute_internal_vm_tests() {
run_unit_test(VMStructs::test()); run_unit_test(VMStructs::test());
#endif #endif
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
run_unit_test(TestG1BiasedArray_test());
run_unit_test(HeapRegionRemSet::test_prt()); run_unit_test(HeapRegionRemSet::test_prt());
#endif #endif
tty->print_cr("All internal VM tests passed"); tty->print_cr("All internal VM tests passed");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册