提交 1beac383 编写于 作者: Y ysr

7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot...

7099817: CMS: +FLSVerifyLists +FLSVerifyIndexTable asserts: odd slot non-empty, chunk not on free list
Summary: Suitably weaken asserts that were in each case a tad too strong; fix up some loose uses of parameters in code related to size-indexed free list table.
Reviewed-by: jmasa, brutisso, stefank
上级 15519f74
...@@ -62,7 +62,7 @@ void CompactibleFreeListSpace::set_cms_values() { ...@@ -62,7 +62,7 @@ void CompactibleFreeListSpace::set_cms_values() {
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment; MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
assert(IndexSetStart == 0 && IndexSetStride == 0, "already set"); assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
IndexSetStart = MinObjAlignment; IndexSetStart = (int) MinChunkSize;
IndexSetStride = MinObjAlignment; IndexSetStride = MinObjAlignment;
} }
...@@ -138,7 +138,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, ...@@ -138,7 +138,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
} else { } else {
_fitStrategy = FreeBlockStrategyNone; _fitStrategy = FreeBlockStrategyNone;
} }
checkFreeListConsistency(); check_free_list_consistency();
// Initialize locks for parallel case. // Initialize locks for parallel case.
...@@ -1358,17 +1358,29 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { ...@@ -1358,17 +1358,29 @@ FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
ShouldNotReachHere(); ShouldNotReachHere();
} }
bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
const {
assert(fc->size() < IndexSetSize, "Size of chunk is too large"); assert(fc->size() < IndexSetSize, "Size of chunk is too large");
return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc); return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
} }
bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
(_smallLinearAllocBlock._word_size == fc->size()),
"Linear allocation block shows incorrect size");
return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
(_smallLinearAllocBlock._word_size == fc->size()));
}
// Check if the purported free chunk is present either as a linear
// allocation block, the size-indexed table of (smaller) free blocks,
// or the larger free blocks kept in the binary tree dictionary.
bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const { bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
if (fc->size() >= IndexSetSize) { if (verify_chunk_is_linear_alloc_block(fc)) {
return dictionary()->verifyChunkInFreeLists(fc); return true;
} else { } else if (fc->size() < IndexSetSize) {
return verifyChunkInIndexedFreeLists(fc); return verifyChunkInIndexedFreeLists(fc);
} else {
return dictionary()->verifyChunkInFreeLists(fc);
} }
} }
...@@ -2495,7 +2507,8 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { ...@@ -2495,7 +2507,8 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
FreeChunk* tail = _indexedFreeList[size].tail(); FreeChunk* tail = _indexedFreeList[size].tail();
size_t num = _indexedFreeList[size].count(); size_t num = _indexedFreeList[size].count();
size_t n = 0; size_t n = 0;
guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty"); guarantee(((size >= MinChunkSize) && (size % IndexSetStride == 0)) || fc == NULL,
"Slot should have been empty");
for (; fc != NULL; fc = fc->next(), n++) { for (; fc != NULL; fc = fc->next(), n++) {
guarantee(fc->size() == size, "Size inconsistency"); guarantee(fc->size() == size, "Size inconsistency");
guarantee(fc->isFree(), "!free?"); guarantee(fc->isFree(), "!free?");
...@@ -2506,14 +2519,14 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { ...@@ -2506,14 +2519,14 @@ void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
} }
#ifndef PRODUCT #ifndef PRODUCT
void CompactibleFreeListSpace::checkFreeListConsistency() const { void CompactibleFreeListSpace::check_free_list_consistency() const {
assert(_dictionary->minSize() <= IndexSetSize, assert(_dictionary->minSize() <= IndexSetSize,
"Some sizes can't be allocated without recourse to" "Some sizes can't be allocated without recourse to"
" linear allocation buffers"); " linear allocation buffers");
assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk), assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
"else MIN_TREE_CHUNK_SIZE is wrong"); "else MIN_TREE_CHUNK_SIZE is wrong");
assert((IndexSetStride == 2 && IndexSetStart == 2) || assert((IndexSetStride == 2 && IndexSetStart == 4) || // 32-bit
(IndexSetStride == 1 && IndexSetStart == 1), "just checking"); (IndexSetStride == 1 && IndexSetStart == 3), "just checking"); // 64-bit
assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0), assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
"Some for-loops may be incorrectly initialized"); "Some for-loops may be incorrectly initialized");
assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1), assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
...@@ -2688,33 +2701,27 @@ void CFLS_LAB::compute_desired_plab_size() { ...@@ -2688,33 +2701,27 @@ void CFLS_LAB::compute_desired_plab_size() {
} }
} }
// If this is changed in the future to allow parallel
// access, one would need to take the FL locks and,
// depending on how it is used, stagger access from
// parallel threads to reduce contention.
void CFLS_LAB::retire(int tid) { void CFLS_LAB::retire(int tid) {
// We run this single threaded with the world stopped; // We run this single threaded with the world stopped;
// so no need for locks and such. // so no need for locks and such.
#define CFLS_LAB_PARALLEL_ACCESS 0
NOT_PRODUCT(Thread* t = Thread::current();) NOT_PRODUCT(Thread* t = Thread::current();)
assert(Thread::current()->is_VM_thread(), "Error"); assert(Thread::current()->is_VM_thread(), "Error");
assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
"Will access to uninitialized slot below");
#if CFLS_LAB_PARALLEL_ACCESS
for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
i > 0;
i -= CompactibleFreeListSpace::IndexSetStride) {
#else // CFLS_LAB_PARALLEL_ACCESS
for (size_t i = CompactibleFreeListSpace::IndexSetStart; for (size_t i = CompactibleFreeListSpace::IndexSetStart;
i < CompactibleFreeListSpace::IndexSetSize; i < CompactibleFreeListSpace::IndexSetSize;
i += CompactibleFreeListSpace::IndexSetStride) { i += CompactibleFreeListSpace::IndexSetStride) {
#endif // !CFLS_LAB_PARALLEL_ACCESS
assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
"Can't retire more than what we obtained"); "Can't retire more than what we obtained");
if (_num_blocks[i] > 0) { if (_num_blocks[i] > 0) {
size_t num_retire = _indexedFreeList[i].count(); size_t num_retire = _indexedFreeList[i].count();
assert(_num_blocks[i] > num_retire, "Should have used at least one"); assert(_num_blocks[i] > num_retire, "Should have used at least one");
{ {
#if CFLS_LAB_PARALLEL_ACCESS // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
MutexLockerEx x(_cfls->_indexedFreeListParLocks[i], // Mutex::_no_safepoint_check_flag);
Mutex::_no_safepoint_check_flag);
#endif // CFLS_LAB_PARALLEL_ACCESS
// Update globals stats for num_blocks used // Update globals stats for num_blocks used
_global_num_blocks[i] += (_num_blocks[i] - num_retire); _global_num_blocks[i] += (_num_blocks[i] - num_retire);
_global_num_workers[i]++; _global_num_workers[i]++;
......
...@@ -502,10 +502,14 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -502,10 +502,14 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void verifyFreeLists() const PRODUCT_RETURN; void verifyFreeLists() const PRODUCT_RETURN;
void verifyIndexedFreeLists() const; void verifyIndexedFreeLists() const;
void verifyIndexedFreeList(size_t size) const; void verifyIndexedFreeList(size_t size) const;
// verify that the given chunk is in the free lists. // Verify that the given chunk is in the free lists:
// i.e. either the binary tree dictionary, the indexed free lists
// or the linear allocation block.
bool verifyChunkInFreeLists(FreeChunk* fc) const; bool verifyChunkInFreeLists(FreeChunk* fc) const;
// Verify that the given chunk is the linear allocation block
bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
// Do some basic checks on the the free lists. // Do some basic checks on the the free lists.
void checkFreeListConsistency() const PRODUCT_RETURN; void check_free_list_consistency() const PRODUCT_RETURN;
// Printing support // Printing support
void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册