提交 78e9ba4b 编写于 作者: T tonyp

Merge

/* /*
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -402,6 +402,29 @@ size_t CompactibleFreeListSpace::max_alloc_in_words() const { ...@@ -402,6 +402,29 @@ size_t CompactibleFreeListSpace::max_alloc_in_words() const {
return res; return res;
} }
void LinearAllocBlock::print_on(outputStream* st) const {
st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
_ptr, _word_size, _refillSize, _allocation_size_limit);
}
void CompactibleFreeListSpace::print_on(outputStream* st) const {
st->print_cr("COMPACTIBLE FREELIST SPACE");
st->print_cr(" Space:");
Space::print_on(st);
st->print_cr("promoInfo:");
_promoInfo.print_on(st);
st->print_cr("_smallLinearAllocBlock");
_smallLinearAllocBlock.print_on(st);
// dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
_fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
}
void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
const { const {
reportIndexedFreeListStatistics(); reportIndexedFreeListStatistics();
...@@ -557,13 +580,15 @@ size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { ...@@ -557,13 +580,15 @@ size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
void CompactibleFreeListSpace::set_end(HeapWord* value) { void CompactibleFreeListSpace::set_end(HeapWord* value) {
HeapWord* prevEnd = end(); HeapWord* prevEnd = end();
assert(prevEnd != value, "unnecessary set_end call"); assert(prevEnd != value, "unnecessary set_end call");
assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block"); assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
"New end is below unallocated block");
_end = value; _end = value;
if (prevEnd != NULL) { if (prevEnd != NULL) {
// Resize the underlying block offset table. // Resize the underlying block offset table.
_bt.resize(pointer_delta(value, bottom())); _bt.resize(pointer_delta(value, bottom()));
if (value <= prevEnd) { if (value <= prevEnd) {
assert(value >= unallocated_block(), "New end is below unallocated block"); assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
"New end is below unallocated block");
} else { } else {
// Now, take this new chunk and add it to the free blocks. // Now, take this new chunk and add it to the free blocks.
// Note that the BOT has not yet been updated for this block. // Note that the BOT has not yet been updated for this block.
...@@ -938,7 +963,6 @@ HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { ...@@ -938,7 +963,6 @@ HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
NOT_PRODUCT(verify_objects_initialized()); NOT_PRODUCT(verify_objects_initialized());
assert(MemRegion(bottom(), end()).contains(p), "p not in space");
// This must be volatile, or else there is a danger that the compiler // This must be volatile, or else there is a danger that the compiler
// will compile the code below into a sometimes-infinite loop, by keeping // will compile the code below into a sometimes-infinite loop, by keeping
// the value read the first time in a register. // the value read the first time in a register.
...@@ -957,7 +981,7 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { ...@@ -957,7 +981,7 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
// must read from what 'p' points to in each loop. // must read from what 'p' points to in each loop.
klassOop k = ((volatile oopDesc*)p)->klass_or_null(); klassOop k = ((volatile oopDesc*)p)->klass_or_null();
if (k != NULL) { if (k != NULL) {
assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
oop o = (oop)p; oop o = (oop)p;
assert(o->is_parsable(), "Should be parsable"); assert(o->is_parsable(), "Should be parsable");
assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
...@@ -1231,7 +1255,6 @@ HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { ...@@ -1231,7 +1255,6 @@ HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
// satisfy the request. This is different that // satisfy the request. This is different that
// evm. // evm.
// Don't record chunk off a LinAB? smallSplitBirth(size); // Don't record chunk off a LinAB? smallSplitBirth(size);
} else { } else {
// Raid the exact free lists larger than size, even if they are not // Raid the exact free lists larger than size, even if they are not
// overpopulated. // overpopulated.
...@@ -1449,6 +1472,7 @@ CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, ...@@ -1449,6 +1472,7 @@ CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
// Update BOT last so that other (parallel) GC threads see a consistent // Update BOT last so that other (parallel) GC threads see a consistent
// view of the BOT and free blocks. // view of the BOT and free blocks.
// Above must occur before BOT is updated below. // Above must occur before BOT is updated below.
OrderAccess::storestore();
_bt.split_block(res, blk_size, size); // adjust block offset table _bt.split_block(res, blk_size, size); // adjust block offset table
} }
return res; return res;
...@@ -1477,6 +1501,7 @@ HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( ...@@ -1477,6 +1501,7 @@ HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
// Update BOT last so that other (parallel) GC threads see a consistent // Update BOT last so that other (parallel) GC threads see a consistent
// view of the BOT and free blocks. // view of the BOT and free blocks.
// Above must occur before BOT is updated below. // Above must occur before BOT is updated below.
OrderAccess::storestore();
_bt.split_block(res, blk_size, size); // adjust block offset table _bt.split_block(res, blk_size, size); // adjust block offset table
_bt.allocated(res, size); _bt.allocated(res, size);
} }
...@@ -1856,6 +1881,8 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, ...@@ -1856,6 +1881,8 @@ CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
// Above must occur before BOT is updated below. // Above must occur before BOT is updated below.
// adjust block offset table // adjust block offset table
OrderAccess::storestore();
assert(chunk->isFree() && ffc->isFree(), "Error");
_bt.split_block((HeapWord*)chunk, chunk->size(), new_size); _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
if (rem_size < SmallForDictionary) { if (rem_size < SmallForDictionary) {
bool is_par = (SharedHeap::heap()->n_par_threads() > 0); bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
...@@ -1911,8 +1938,7 @@ void CompactibleFreeListSpace::save_marks() { ...@@ -1911,8 +1938,7 @@ void CompactibleFreeListSpace::save_marks() {
// mark the "end" of the used space at the time of this call; // mark the "end" of the used space at the time of this call;
// note, however, that promoted objects from this point // note, however, that promoted objects from this point
// on are tracked in the _promoInfo below. // on are tracked in the _promoInfo below.
set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? set_saved_mark_word(unallocated_block());
unallocated_block() : end());
// inform allocator that promotions should be tracked. // inform allocator that promotions should be tracked.
assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
_promoInfo.startTrackingPromotions(); _promoInfo.startTrackingPromotions();
...@@ -2238,8 +2264,7 @@ void CompactibleFreeListSpace::split(size_t from, size_t to1) { ...@@ -2238,8 +2264,7 @@ void CompactibleFreeListSpace::split(size_t from, size_t to1) {
} }
void CompactibleFreeListSpace::print() const { void CompactibleFreeListSpace::print() const {
tty->print(" CompactibleFreeListSpace"); Space::print_on(tty);
Space::print();
} }
void CompactibleFreeListSpace::prepare_for_verify() { void CompactibleFreeListSpace::prepare_for_verify() {
...@@ -2253,18 +2278,28 @@ class VerifyAllBlksClosure: public BlkClosure { ...@@ -2253,18 +2278,28 @@ class VerifyAllBlksClosure: public BlkClosure {
private: private:
const CompactibleFreeListSpace* _sp; const CompactibleFreeListSpace* _sp;
const MemRegion _span; const MemRegion _span;
HeapWord* _last_addr;
size_t _last_size;
bool _last_was_obj;
bool _last_was_live;
public: public:
VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
MemRegion span) : _sp(sp), _span(span) { } MemRegion span) : _sp(sp), _span(span),
_last_addr(NULL), _last_size(0),
_last_was_obj(false), _last_was_live(false) { }
virtual size_t do_blk(HeapWord* addr) { virtual size_t do_blk(HeapWord* addr) {
size_t res; size_t res;
bool was_obj = false;
bool was_live = false;
if (_sp->block_is_obj(addr)) { if (_sp->block_is_obj(addr)) {
was_obj = true;
oop p = oop(addr); oop p = oop(addr);
guarantee(p->is_oop(), "Should be an oop"); guarantee(p->is_oop(), "Should be an oop");
res = _sp->adjustObjectSize(p->size()); res = _sp->adjustObjectSize(p->size());
if (_sp->obj_is_alive(addr)) { if (_sp->obj_is_alive(addr)) {
was_live = true;
p->verify(); p->verify();
} }
} else { } else {
...@@ -2275,7 +2310,20 @@ class VerifyAllBlksClosure: public BlkClosure { ...@@ -2275,7 +2310,20 @@ class VerifyAllBlksClosure: public BlkClosure {
"Chunk should be on a free list"); "Chunk should be on a free list");
} }
} }
guarantee(res != 0, "Livelock: no rank reduction!"); if (res == 0) {
gclog_or_tty->print_cr("Livelock: no rank reduction!");
gclog_or_tty->print_cr(
" Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
" Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
addr, res, was_obj ?"true":"false", was_live ?"true":"false",
_last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
_sp->print_on(gclog_or_tty);
guarantee(false, "Seppuku!");
}
_last_addr = addr;
_last_size = res;
_last_was_obj = was_obj;
_last_was_live = was_live;
return res; return res;
} }
}; };
...@@ -2521,7 +2569,7 @@ void CFLS_LAB::modify_initialization(size_t n, unsigned wt) { ...@@ -2521,7 +2569,7 @@ void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
HeapWord* CFLS_LAB::alloc(size_t word_sz) { HeapWord* CFLS_LAB::alloc(size_t word_sz) {
FreeChunk* res; FreeChunk* res;
word_sz = _cfls->adjustObjectSize(word_sz); guarantee(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
// This locking manages sync with other large object allocations. // This locking manages sync with other large object allocations.
MutexLockerEx x(_cfls->parDictionaryAllocLock(), MutexLockerEx x(_cfls->parDictionaryAllocLock(),
...@@ -2667,12 +2715,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2667,12 +2715,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
(cur_sz < CompactibleFreeListSpace::IndexSetSize) && (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
(CMSSplitIndexedFreeListBlocks || k <= 1); (CMSSplitIndexedFreeListBlocks || k <= 1);
k++, cur_sz = k * word_sz) { k++, cur_sz = k * word_sz) {
FreeList* gfl = &_indexedFreeList[cur_sz];
FreeList fl_for_cur_sz; // Empty. FreeList fl_for_cur_sz; // Empty.
fl_for_cur_sz.set_size(cur_sz); fl_for_cur_sz.set_size(cur_sz);
{ {
MutexLockerEx x(_indexedFreeListParLocks[cur_sz], MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
FreeList* gfl = &_indexedFreeList[cur_sz];
if (gfl->count() != 0) { if (gfl->count() != 0) {
// nn is the number of chunks of size cur_sz that // nn is the number of chunks of size cur_sz that
// we'd need to split k-ways each, in order to create // we'd need to split k-ways each, in order to create
...@@ -2685,9 +2733,9 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2685,9 +2733,9 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// we increment the split death count by the number of blocks // we increment the split death count by the number of blocks
// we just took from the cur_sz-size blocks list and which // we just took from the cur_sz-size blocks list and which
// we will be splitting below. // we will be splitting below.
ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() + ssize_t deaths = gfl->splitDeaths() +
fl_for_cur_sz.count(); fl_for_cur_sz.count();
_indexedFreeList[cur_sz].set_splitDeaths(deaths); gfl->set_splitDeaths(deaths);
} }
} }
} }
...@@ -2703,18 +2751,25 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2703,18 +2751,25 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// access the main chunk sees it as a single free block until we // access the main chunk sees it as a single free block until we
// change it. // change it.
size_t fc_size = fc->size(); size_t fc_size = fc->size();
assert(fc->isFree(), "Error");
for (int i = k-1; i >= 0; i--) { for (int i = k-1; i >= 0; i--) {
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
assert((i != 0) ||
((fc == ffc) && ffc->isFree() &&
(ffc->size() == k*word_sz) && (fc_size == word_sz)),
"Counting error");
ffc->setSize(word_sz); ffc->setSize(word_sz);
ffc->linkNext(NULL);
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ffc->linkNext(NULL);
// Above must occur before BOT is updated below. // Above must occur before BOT is updated below.
// splitting from the right, fc_size == (k - i + 1) * wordsize OrderAccess::storestore();
_bt.mark_block((HeapWord*)ffc, word_sz); // splitting from the right, fc_size == i * word_sz
_bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
fc_size -= word_sz; fc_size -= word_sz;
_bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); assert(fc_size == i*word_sz, "Error");
_bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
_bt.verify_single_block((HeapWord*)fc, fc_size); _bt.verify_single_block((HeapWord*)fc, fc_size);
_bt.verify_single_block((HeapWord*)ffc, ffc->size()); _bt.verify_single_block((HeapWord*)ffc, word_sz);
// Push this on "fl". // Push this on "fl".
fl->returnChunkAtHead(ffc); fl->returnChunkAtHead(ffc);
} }
...@@ -2744,7 +2799,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2744,7 +2799,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
_dictionary->minSize()), _dictionary->minSize()),
FreeBlockDictionary::atLeast); FreeBlockDictionary::atLeast);
if (fc != NULL) { if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size()); // update _unallocated_blk _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dictCensusUpdate(fc->size(), dictionary()->dictCensusUpdate(fc->size(),
true /*split*/, true /*split*/,
false /*birth*/); false /*birth*/);
...@@ -2754,8 +2809,10 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2754,8 +2809,10 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
} }
} }
if (fc == NULL) return; if (fc == NULL) return;
assert((ssize_t)n >= 1, "Control point invariant");
// Otherwise, split up that block. // Otherwise, split up that block.
assert((ssize_t)n >= 1, "Control point invariant");
assert(fc->isFree(), "Error: should be a free block");
_bt.verify_single_block((HeapWord*)fc, fc->size());
const size_t nn = fc->size() / word_sz; const size_t nn = fc->size() / word_sz;
n = MIN2(nn, n); n = MIN2(nn, n);
assert((ssize_t)n >= 1, "Control point invariant"); assert((ssize_t)n >= 1, "Control point invariant");
...@@ -2773,6 +2830,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2773,6 +2830,7 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// dictionary and return, leaving "fl" empty. // dictionary and return, leaving "fl" empty.
if (n == 0) { if (n == 0) {
returnChunkToDictionary(fc); returnChunkToDictionary(fc);
assert(fl->count() == 0, "We never allocated any blocks");
return; return;
} }
...@@ -2785,11 +2843,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2785,11 +2843,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
size_t prefix_size = n * word_sz; size_t prefix_size = n * word_sz;
rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
rem_fc->setSize(rem); rem_fc->setSize(rem);
rem_fc->linkNext(NULL);
rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
rem_fc->linkNext(NULL);
// Above must occur before BOT is updated below. // Above must occur before BOT is updated below.
assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error"); assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
OrderAccess::storestore();
_bt.split_block((HeapWord*)fc, fc->size(), prefix_size); _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
assert(fc->isFree(), "Error");
fc->setSize(prefix_size);
if (rem >= IndexSetSize) { if (rem >= IndexSetSize) {
returnChunkToDictionary(rem_fc); returnChunkToDictionary(rem_fc);
dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/); dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
...@@ -2815,11 +2876,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2815,11 +2876,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
for (ssize_t i = n-1; i > 0; i--) { for (ssize_t i = n-1; i > 0; i--) {
FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
ffc->setSize(word_sz); ffc->setSize(word_sz);
ffc->linkNext(NULL);
ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads. ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
ffc->linkNext(NULL);
// Above must occur before BOT is updated below. // Above must occur before BOT is updated below.
OrderAccess::storestore();
// splitting from the right, fc_size == (n - i + 1) * wordsize // splitting from the right, fc_size == (n - i + 1) * wordsize
_bt.mark_block((HeapWord*)ffc, word_sz); _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
fc_size -= word_sz; fc_size -= word_sz;
_bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
_bt.verify_single_block((HeapWord*)ffc, ffc->size()); _bt.verify_single_block((HeapWord*)ffc, ffc->size());
...@@ -2828,9 +2890,11 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2828,9 +2890,11 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fl->returnChunkAtHead(ffc); fl->returnChunkAtHead(ffc);
} }
// First chunk // First chunk
assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
// The blocks above should show their new sizes before the first block below
fc->setSize(word_sz); fc->setSize(word_sz);
fc->linkPrev(NULL); // idempotent wrt free-ness, see assert above
fc->linkNext(NULL); fc->linkNext(NULL);
fc->linkPrev(NULL);
_bt.verify_not_unallocated((HeapWord*)fc, fc->size()); _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
_bt.verify_single_block((HeapWord*)fc, fc->size()); _bt.verify_single_block((HeapWord*)fc, fc->size());
fl->returnChunkAtHead(fc); fl->returnChunkAtHead(fc);
......
/* /*
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -48,6 +48,8 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { ...@@ -48,6 +48,8 @@ class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
size_t _word_size; size_t _word_size;
size_t _refillSize; size_t _refillSize;
size_t _allocation_size_limit; // largest size that will be allocated size_t _allocation_size_limit; // largest size that will be allocated
void print_on(outputStream* st) const;
}; };
// Concrete subclass of CompactibleSpace that implements // Concrete subclass of CompactibleSpace that implements
...@@ -249,10 +251,14 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -249,10 +251,14 @@ class CompactibleFreeListSpace: public CompactibleSpace {
size_t numFreeBlocksInIndexedFreeLists() const; size_t numFreeBlocksInIndexedFreeLists() const;
// Accessor // Accessor
HeapWord* unallocated_block() const { HeapWord* unallocated_block() const {
HeapWord* ub = _bt.unallocated_block(); if (BlockOffsetArrayUseUnallocatedBlock) {
assert(ub >= bottom() && HeapWord* ub = _bt.unallocated_block();
ub <= end(), "space invariant"); assert(ub >= bottom() &&
return ub; ub <= end(), "space invariant");
return ub;
} else {
return end();
}
} }
void freed(HeapWord* start, size_t size) { void freed(HeapWord* start, size_t size) {
_bt.freed(start, size); _bt.freed(start, size);
...@@ -476,6 +482,7 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -476,6 +482,7 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// Debugging support // Debugging support
void print() const; void print() const;
void print_on(outputStream* st) const;
void prepare_for_verify(); void prepare_for_verify();
void verify(bool allow_dirty) const; void verify(bool allow_dirty) const;
void verifyFreeLists() const PRODUCT_RETURN; void verifyFreeLists() const PRODUCT_RETURN;
......
...@@ -1019,7 +1019,7 @@ HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, ...@@ -1019,7 +1019,7 @@ HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
} }
HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
bool tlab) { bool tlab /* ignored */) {
assert_lock_strong(freelistLock()); assert_lock_strong(freelistLock());
size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
HeapWord* res = cmsSpace()->allocate(adjustedSize); HeapWord* res = cmsSpace()->allocate(adjustedSize);
...@@ -1032,6 +1032,11 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, ...@@ -1032,6 +1032,11 @@ HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
// allowing the object to be blackened (and its references scanned) // allowing the object to be blackened (and its references scanned)
// either during a preclean phase or at the final checkpoint. // either during a preclean phase or at the final checkpoint.
if (res != NULL) { if (res != NULL) {
// We may block here with an uninitialized object with
// its mark-bit or P-bits not yet set. Such objects need
// to be safely navigable by block_start().
assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)res)->isFree(), "Error, block will look free but show wrong size");
collector()->direct_allocated(res, adjustedSize); collector()->direct_allocated(res, adjustedSize);
_direct_allocated_words += adjustedSize; _direct_allocated_words += adjustedSize;
// allocation counters // allocation counters
...@@ -1061,8 +1066,14 @@ void CMSCollector::direct_allocated(HeapWord* start, size_t size) { ...@@ -1061,8 +1066,14 @@ void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
// [see comments preceding SweepClosure::do_blk() below for details] // [see comments preceding SweepClosure::do_blk() below for details]
// 1. need to mark the object as live so it isn't collected // 1. need to mark the object as live so it isn't collected
// 2. need to mark the 2nd bit to indicate the object may be uninitialized // 2. need to mark the 2nd bit to indicate the object may be uninitialized
// 3. need to mark the end of the object so sweeper can skip over it // 3. need to mark the end of the object so marking, precleaning or sweeping
// if it's uninitialized when the sweeper reaches it. // can skip over uninitialized or unparsable objects. An allocated
// object is considered uninitialized for our purposes as long as
// its klass word is NULL. (Unparsable objects are those which are
// initialized in the sense just described, but whose sizes can still
// not be correctly determined. Note that the class of unparsable objects
// can only occur in the perm gen. All old gen objects are parsable
// as soon as they are initialized.)
_markBitMap.mark(start); // object is live _markBitMap.mark(start); // object is live
_markBitMap.mark(start + 1); // object is potentially uninitialized? _markBitMap.mark(start + 1); // object is potentially uninitialized?
_markBitMap.mark(start + size - 1); _markBitMap.mark(start + size - 1);
...@@ -1088,7 +1099,13 @@ void CMSCollector::promoted(bool par, HeapWord* start, ...@@ -1088,7 +1099,13 @@ void CMSCollector::promoted(bool par, HeapWord* start,
// We don't need to mark the object as uninitialized (as // We don't need to mark the object as uninitialized (as
// in direct_allocated above) because this is being done with the // in direct_allocated above) because this is being done with the
// world stopped and the object will be initialized by the // world stopped and the object will be initialized by the
// time the sweeper gets to look at it. // time the marking, precleaning or sweeping get to look at it.
// But see the code for copying objects into the CMS generation,
// where we need to ensure that concurrent readers of the
// block offset table are able to safely navigate a block that
// is in flux from being free to being allocated (and in
// transition while being copied into) and subsequently
// becoming a bona-fide object when the copy/promotion is complete.
assert(SafepointSynchronize::is_at_safepoint(), assert(SafepointSynchronize::is_at_safepoint(),
"expect promotion only at safepoints"); "expect promotion only at safepoints");
...@@ -1304,6 +1321,48 @@ ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space, ...@@ -1304,6 +1321,48 @@ ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
return collector()->allocation_limit_reached(space, top, word_sz); return collector()->allocation_limit_reached(space, top, word_sz);
} }
// IMPORTANT: Notes on object size recognition in CMS.
// ---------------------------------------------------
// A block of storage in the CMS generation is always in
// one of three states. A free block (FREE), an allocated
// object (OBJECT) whose size() method reports the correct size,
// and an intermediate state (TRANSIENT) in which its size cannot
// be accurately determined.
// STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
// -----------------------------------------------------
// FREE: klass_word & 1 == 1; mark_word holds block size
//
// OBJECT: klass_word installed; klass_word != 0 && klass_word & 0 == 0;
// obj->size() computes correct size
// [Perm Gen objects needs to be "parsable" before they can be navigated]
//
// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
//
// STATE IDENTIFICATION: (64 bit+COOPS)
// ------------------------------------
// FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
//
// OBJECT: klass_word installed; klass_word != 0;
// obj->size() computes correct size
// [Perm Gen comment above continues to hold]
//
// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
//
//
// STATE TRANSITION DIAGRAM
//
// mut / parnew mut / parnew
// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
// ^ |
// |------------------------ DEAD <------------------------------------|
// sweep mut
//
// While a block is in TRANSIENT state its size cannot be determined
// so readers will either need to come back later or stall until
// the size can be determined. Note that for the case of direct
// allocation, P-bits, when available, may be used to determine the
// size of an object that may not yet have been initialized.
// Things to support parallel young-gen collection. // Things to support parallel young-gen collection.
oop oop
ConcurrentMarkSweepGeneration::par_promote(int thread_num, ConcurrentMarkSweepGeneration::par_promote(int thread_num,
...@@ -1331,33 +1390,39 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num, ...@@ -1331,33 +1390,39 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
} }
} }
assert(promoInfo->has_spooling_space(), "Control point invariant"); assert(promoInfo->has_spooling_space(), "Control point invariant");
HeapWord* obj_ptr = ps->lab.alloc(word_sz); const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
if (obj_ptr == NULL) { if (obj_ptr == NULL) {
obj_ptr = expand_and_par_lab_allocate(ps, word_sz); obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
if (obj_ptr == NULL) { if (obj_ptr == NULL) {
return NULL; return NULL;
} }
} }
oop obj = oop(obj_ptr); oop obj = oop(obj_ptr);
OrderAccess::storestore();
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
// IMPORTANT: See note on object initialization for CMS above.
// Otherwise, copy the object. Here we must be careful to insert the // Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object. // klass pointer last, since this marks the block as an allocated object.
// Except with compressed oops it's the mark word. // Except with compressed oops it's the mark word.
HeapWord* old_ptr = (HeapWord*)old; HeapWord* old_ptr = (HeapWord*)old;
// Restore the mark word copied above.
obj->set_mark(m);
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
OrderAccess::storestore();
if (UseCompressedOops) {
// Copy gap missed by (aligned) header size calculation below
obj->set_klass_gap(old->klass_gap());
}
if (word_sz > (size_t)oopDesc::header_size()) { if (word_sz > (size_t)oopDesc::header_size()) {
Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
obj_ptr + oopDesc::header_size(), obj_ptr + oopDesc::header_size(),
word_sz - oopDesc::header_size()); word_sz - oopDesc::header_size());
} }
if (UseCompressedOops) {
// Copy gap missed by (aligned) header size calculation above
obj->set_klass_gap(old->klass_gap());
}
// Restore the mark word copied above.
obj->set_mark(m);
// Now we can track the promoted object, if necessary. We take care // Now we can track the promoted object, if necessary. We take care
// to delay the transition from uninitialized to full object // to delay the transition from uninitialized to full object
// (i.e., insertion of klass pointer) until after, so that it // (i.e., insertion of klass pointer) until after, so that it
...@@ -1365,18 +1430,22 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num, ...@@ -1365,18 +1430,22 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
if (promoInfo->tracking()) { if (promoInfo->tracking()) {
promoInfo->track((PromotedObject*)obj, old->klass()); promoInfo->track((PromotedObject*)obj, old->klass());
} }
assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
assert(!((FreeChunk*)obj_ptr)->isFree(), "Error, block will look free but show wrong size");
assert(old->is_oop(), "Will use and dereference old klass ptr below");
// Finally, install the klass pointer (this should be volatile). // Finally, install the klass pointer (this should be volatile).
OrderAccess::storestore();
obj->set_klass(old->klass()); obj->set_klass(old->klass());
// We should now be able to calculate the right size for this object
assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
assert(old->is_oop(), "Will dereference klass ptr below");
collector()->promoted(true, // parallel collector()->promoted(true, // parallel
obj_ptr, old->is_objArray(), word_sz); obj_ptr, old->is_objArray(), word_sz);
NOT_PRODUCT( NOT_PRODUCT(
Atomic::inc(&_numObjectsPromoted); Atomic::inc_ptr(&_numObjectsPromoted);
Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()), Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
&_numWordsPromoted);
) )
return obj; return obj;
...@@ -7868,14 +7937,20 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) { ...@@ -7868,14 +7937,20 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
FreeChunk* fc = (FreeChunk*)addr; FreeChunk* fc = (FreeChunk*)addr;
size_t res; size_t res;
// check if we are done sweepinrg // Check if we are done sweeping. Below we check "addr >= _limit" rather
if (addr == _limit) { // we have swept up to the limit, do nothing more // than "addr == _limit" because although _limit was a block boundary when
// we started the sweep, it may no longer be one because heap expansion
// may have caused us to coalesce the block ending at the address _limit
// with a newly expanded chunk (this happens when _limit was set to the
// previous _end of the space), so we may have stepped past _limit; see CR 6977970.
if (addr >= _limit) { // we have swept up to or past the limit, do nothing more
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
assert(addr < _sp->end(), "addr out of bounds");
// help the closure application finish // help the closure application finish
return pointer_delta(_sp->end(), _limit); return pointer_delta(_sp->end(), addr);
} }
assert(addr <= _limit, "sweep invariant"); assert(addr < _limit, "sweep invariant");
// check if we should yield // check if we should yield
do_yield_check(addr); do_yield_check(addr);
......
...@@ -1010,10 +1010,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration { ...@@ -1010,10 +1010,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Non-product stat counters // Non-product stat counters
NOT_PRODUCT( NOT_PRODUCT(
int _numObjectsPromoted; size_t _numObjectsPromoted;
int _numWordsPromoted; size_t _numWordsPromoted;
int _numObjectsAllocated; size_t _numObjectsAllocated;
int _numWordsAllocated; size_t _numWordsAllocated;
) )
// Used for sizing decisions // Used for sizing decisions
......
/* /*
* Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -110,15 +110,21 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC { ...@@ -110,15 +110,21 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
} }
void linkNext(FreeChunk* ptr) { _next = ptr; } void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) { void linkPrev(FreeChunk* ptr) {
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else) LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
_prev = (FreeChunk*)((intptr_t)ptr | 0x1); _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
} }
void clearPrev() { _prev = NULL; } void clearPrev() { _prev = NULL; }
void clearNext() { _next = NULL; } void clearNext() { _next = NULL; }
void markNotFree() { void markNotFree() {
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::prototype());) // Set _prev (klass) to null before (if) clearing the mark word below
// Also set _prev to null _prev = NULL;
_prev = NULL; #ifdef _LP64
if (UseCompressedOops) {
OrderAccess::storestore();
set_mark(markOopDesc::prototype());
}
#endif
assert(!isFree(), "Error");
} }
// Return the address past the end of this chunk // Return the address past the end of this chunk
......
...@@ -330,7 +330,7 @@ void PromotionInfo::verify() const { ...@@ -330,7 +330,7 @@ void PromotionInfo::verify() const {
void PromotionInfo::print_on(outputStream* st) const { void PromotionInfo::print_on(outputStream* st) const {
SpoolBlock* curSpool = NULL; SpoolBlock* curSpool = NULL;
size_t i = 0; size_t i = 0;
st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")", st->print_cr(" start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
_firstIndex, _nextIndex); _firstIndex, _nextIndex);
for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL; for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
curSpool = curSpool->nextSpoolBlock) { curSpool = curSpool->nextSpoolBlock) {
...@@ -350,7 +350,7 @@ void PromotionInfo::print_on(outputStream* st) const { ...@@ -350,7 +350,7 @@ void PromotionInfo::print_on(outputStream* st) const {
st->print_cr(" free "); st->print_cr(" free ");
i++; i++;
} }
st->print_cr(SIZE_FORMAT " header spooling blocks", i); st->print_cr(" " SIZE_FORMAT " header spooling blocks", i);
} }
void SpoolBlock::print_on(outputStream* st) const { void SpoolBlock::print_on(outputStream* st) const {
......
...@@ -2586,9 +2586,6 @@ void ConcurrentMark::complete_marking_in_collection_set() { ...@@ -2586,9 +2586,6 @@ void ConcurrentMark::complete_marking_in_collection_set() {
double end_time = os::elapsedTime(); double end_time = os::elapsedTime();
double elapsed_time_ms = (end_time - start) * 1000.0; double elapsed_time_ms = (end_time - start) * 1000.0;
g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms); g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
if (PrintGCDetails) {
gclog_or_tty->print_cr("Mark closure took %5.2f ms.", elapsed_time_ms);
}
ClearMarksInHRClosure clr(nextMarkBitMap()); ClearMarksInHRClosure clr(nextMarkBitMap());
g1h->collection_set_iterate(&clr); g1h->collection_set_iterate(&clr);
......
...@@ -1044,29 +1044,56 @@ resize_if_necessary_after_full_collection(size_t word_size) { ...@@ -1044,29 +1044,56 @@ resize_if_necessary_after_full_collection(size_t word_size) {
const size_t capacity_after_gc = capacity(); const size_t capacity_after_gc = capacity();
const size_t free_after_gc = capacity_after_gc - used_after_gc; const size_t free_after_gc = capacity_after_gc - used_after_gc;
// This is enforced in arguments.cpp.
assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
"otherwise the code below doesn't make sense");
// We don't have floating point command-line arguments // We don't have floating point command-line arguments
const double minimum_free_percentage = (double) MinHeapFreeRatio / 100; const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
const double maximum_used_percentage = 1.0 - minimum_free_percentage; const double maximum_used_percentage = 1.0 - minimum_free_percentage;
const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage; const double minimum_used_percentage = 1.0 - maximum_free_percentage;
size_t minimum_desired_capacity = (size_t) (used_after_gc / maximum_used_percentage); const size_t min_heap_size = collector_policy()->min_heap_byte_size();
size_t maximum_desired_capacity = (size_t) (used_after_gc / minimum_used_percentage); const size_t max_heap_size = collector_policy()->max_heap_byte_size();
// Don't shrink less than the initial size. // We have to be careful here as these two calculations can overflow
minimum_desired_capacity = // 32-bit size_t's.
MAX2(minimum_desired_capacity, double used_after_gc_d = (double) used_after_gc;
collector_policy()->initial_heap_byte_size()); double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
maximum_desired_capacity = double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
MAX2(maximum_desired_capacity,
collector_policy()->initial_heap_byte_size()); // Let's make sure that they are both under the max heap size, which
// by default will make them fit into a size_t.
// We are failing here because minimum_desired_capacity is double desired_capacity_upper_bound = (double) max_heap_size;
assert(used_after_gc <= minimum_desired_capacity, "sanity check"); minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
assert(minimum_desired_capacity <= maximum_desired_capacity, "sanity check"); desired_capacity_upper_bound);
maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
desired_capacity_upper_bound);
// We can now safely turn them into size_t's.
size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
// This assert only makes sense here, before we adjust them
// with respect to the min and max heap size.
assert(minimum_desired_capacity <= maximum_desired_capacity,
err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
"maximum_desired_capacity = "SIZE_FORMAT,
minimum_desired_capacity, maximum_desired_capacity));
// Should not be greater than the heap max size. No need to adjust
// it with respect to the heap min size as it's a lower bound (i.e.,
// we'll try to make the capacity larger than it, not smaller).
minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
// Should not be less than the heap min size. No need to adjust it
// with respect to the heap max size as it's an upper bound (i.e.,
// we'll try to make the capacity smaller than it, not greater).
maximum_desired_capacity = MAX2(maximum_desired_capacity, min_heap_size);
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
const double free_percentage = ((double)free_after_gc) / capacity(); const double free_percentage =
(double) free_after_gc / (double) capacity_after_gc;
gclog_or_tty->print_cr("Computing new size after full GC "); gclog_or_tty->print_cr("Computing new size after full GC ");
gclog_or_tty->print_cr(" " gclog_or_tty->print_cr(" "
" minimum_free_percentage: %6.2f", " minimum_free_percentage: %6.2f",
...@@ -1078,45 +1105,47 @@ resize_if_necessary_after_full_collection(size_t word_size) { ...@@ -1078,45 +1105,47 @@ resize_if_necessary_after_full_collection(size_t word_size) {
" capacity: %6.1fK" " capacity: %6.1fK"
" minimum_desired_capacity: %6.1fK" " minimum_desired_capacity: %6.1fK"
" maximum_desired_capacity: %6.1fK", " maximum_desired_capacity: %6.1fK",
capacity() / (double) K, (double) capacity_after_gc / (double) K,
minimum_desired_capacity / (double) K, (double) minimum_desired_capacity / (double) K,
maximum_desired_capacity / (double) K); (double) maximum_desired_capacity / (double) K);
gclog_or_tty->print_cr(" " gclog_or_tty->print_cr(" "
" free_after_gc : %6.1fK" " free_after_gc: %6.1fK"
" used_after_gc : %6.1fK", " used_after_gc: %6.1fK",
free_after_gc / (double) K, (double) free_after_gc / (double) K,
used_after_gc / (double) K); (double) used_after_gc / (double) K);
gclog_or_tty->print_cr(" " gclog_or_tty->print_cr(" "
" free_percentage: %6.2f", " free_percentage: %6.2f",
free_percentage); free_percentage);
} }
if (capacity() < minimum_desired_capacity) { if (capacity_after_gc < minimum_desired_capacity) {
// Don't expand unless it's significant // Don't expand unless it's significant
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
expand(expand_bytes); expand(expand_bytes);
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" expanding:" gclog_or_tty->print_cr(" "
" expanding:"
" max_heap_size: %6.1fK"
" minimum_desired_capacity: %6.1fK" " minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK", " expand_bytes: %6.1fK",
minimum_desired_capacity / (double) K, (double) max_heap_size / (double) K,
expand_bytes / (double) K); (double) minimum_desired_capacity / (double) K,
(double) expand_bytes / (double) K);
} }
// No expansion, now see if we want to shrink // No expansion, now see if we want to shrink
} else if (capacity() > maximum_desired_capacity) { } else if (capacity_after_gc > maximum_desired_capacity) {
// Capacity too large, compute shrinking size // Capacity too large, compute shrinking size
size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
shrink(shrink_bytes); shrink(shrink_bytes);
if (PrintGC && Verbose) { if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" " gclog_or_tty->print_cr(" "
" shrinking:" " shrinking:"
" initSize: %.1fK" " min_heap_size: %6.1fK"
" maximum_desired_capacity: %.1fK", " maximum_desired_capacity: %6.1fK"
collector_policy()->initial_heap_byte_size() / (double) K, " shrink_bytes: %6.1fK",
maximum_desired_capacity / (double) K); (double) min_heap_size / (double) K,
gclog_or_tty->print_cr(" " (double) maximum_desired_capacity / (double) K,
" shrink_bytes: %.1fK", (double) shrink_bytes / (double) K);
shrink_bytes / (double) K);
} }
} }
} }
...@@ -2165,9 +2194,12 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { ...@@ -2165,9 +2194,12 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
} }
} }
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t size) { HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
assert(!isHumongous(word_size),
err_msg("a TLAB should not be of humongous size, "
"word_size = "SIZE_FORMAT, word_size));
bool dummy; bool dummy;
return G1CollectedHeap::mem_allocate(size, false, true, &dummy); return G1CollectedHeap::mem_allocate(word_size, false, true, &dummy);
} }
bool G1CollectedHeap::allocs_are_zero_filled() { bool G1CollectedHeap::allocs_are_zero_filled() {
...@@ -3576,7 +3608,7 @@ void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { ...@@ -3576,7 +3608,7 @@ void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
if (!r->evacuation_failed()) { if (!r->evacuation_failed()) {
r->set_evacuation_failed(true); r->set_evacuation_failed(true);
if (G1PrintHeapRegions) { if (G1PrintHeapRegions) {
gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" " gclog_or_tty->print("overflow in heap region "PTR_FORMAT" "
"["PTR_FORMAT","PTR_FORMAT")\n", "["PTR_FORMAT","PTR_FORMAT")\n",
r, r->bottom(), r->end()); r, r->bottom(), r->end());
} }
...@@ -3610,6 +3642,10 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { ...@@ -3610,6 +3642,10 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
size_t word_size) { size_t word_size) {
assert(!isHumongous(word_size),
err_msg("we should not be seeing humongous allocation requests "
"during GC, word_size = "SIZE_FORMAT, word_size));
HeapRegion* alloc_region = _gc_alloc_regions[purpose]; HeapRegion* alloc_region = _gc_alloc_regions[purpose];
// let the caller handle alloc failure // let the caller handle alloc failure
if (alloc_region == NULL) return NULL; if (alloc_region == NULL) return NULL;
...@@ -3642,6 +3678,10 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose, ...@@ -3642,6 +3678,10 @@ G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
HeapRegion* alloc_region, HeapRegion* alloc_region,
bool par, bool par,
size_t word_size) { size_t word_size) {
assert(!isHumongous(word_size),
err_msg("we should not be seeing humongous allocation requests "
"during GC, word_size = "SIZE_FORMAT, word_size));
HeapWord* block = NULL; HeapWord* block = NULL;
// In the parallel case, a previous thread to obtain the lock may have // In the parallel case, a previous thread to obtain the lock may have
// already assigned a new gc_alloc_region. // already assigned a new gc_alloc_region.
...@@ -4281,7 +4321,7 @@ void G1CollectedHeap::evacuate_collection_set() { ...@@ -4281,7 +4321,7 @@ void G1CollectedHeap::evacuate_collection_set() {
if (evacuation_failed()) { if (evacuation_failed()) {
remove_self_forwarding_pointers(); remove_self_forwarding_pointers();
if (PrintGCDetails) { if (PrintGCDetails) {
gclog_or_tty->print(" (evacuation failed)"); gclog_or_tty->print(" (to-space overflow)");
} else if (PrintGC) { } else if (PrintGC) {
gclog_or_tty->print("--"); gclog_or_tty->print("--");
} }
......
...@@ -1032,7 +1032,7 @@ public: ...@@ -1032,7 +1032,7 @@ public:
virtual bool supports_tlab_allocation() const; virtual bool supports_tlab_allocation() const;
virtual size_t tlab_capacity(Thread* thr) const; virtual size_t tlab_capacity(Thread* thr) const;
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
virtual HeapWord* allocate_new_tlab(size_t size); virtual HeapWord* allocate_new_tlab(size_t word_size);
// Can a compiler initialize a new object without store barriers? // Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object // This permission only extends from the creation of a new object
......
...@@ -57,8 +57,9 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, ...@@ -57,8 +57,9 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
assert( SafepointSynchronize::is_at_safepoint() || assert( SafepointSynchronize::is_at_safepoint() ||
Heap_lock->owned_by_self(), "pre-condition of the call" ); Heap_lock->owned_by_self(), "pre-condition of the call" );
if (_cur_alloc_region != NULL) { // All humongous allocation requests should go through the slow path in
// attempt_allocation_slow().
if (!isHumongous(word_size) && _cur_alloc_region != NULL) {
// If this allocation causes a region to become non empty, // If this allocation causes a region to become non empty,
// then we need to update our free_regions count. // then we need to update our free_regions count.
...@@ -69,13 +70,14 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, ...@@ -69,13 +70,14 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
} else { } else {
res = _cur_alloc_region->allocate(word_size); res = _cur_alloc_region->allocate(word_size);
} }
}
if (res != NULL) { if (res != NULL) {
if (!SafepointSynchronize::is_at_safepoint()) { if (!SafepointSynchronize::is_at_safepoint()) {
assert( Heap_lock->owned_by_self(), "invariant" ); assert( Heap_lock->owned_by_self(), "invariant" );
Heap_lock->unlock(); Heap_lock->unlock();
}
return res;
} }
return res;
} }
// attempt_allocation_slow will also unlock the heap lock when appropriate. // attempt_allocation_slow will also unlock the heap lock when appropriate.
return attempt_allocation_slow(word_size, permit_collection_pause); return attempt_allocation_slow(word_size, permit_collection_pause);
......
...@@ -790,8 +790,18 @@ void HeapRegion::verify(bool allow_dirty, ...@@ -790,8 +790,18 @@ void HeapRegion::verify(bool allow_dirty,
int objs = 0; int objs = 0;
int blocks = 0; int blocks = 0;
VerifyLiveClosure vl_cl(g1, use_prev_marking); VerifyLiveClosure vl_cl(g1, use_prev_marking);
bool is_humongous = isHumongous();
size_t object_num = 0;
while (p < top()) { while (p < top()) {
size_t size = oop(p)->size(); size_t size = oop(p)->size();
if (is_humongous != g1->isHumongous(size)) {
gclog_or_tty->print_cr("obj "PTR_FORMAT" is of %shumongous size ("
SIZE_FORMAT" words) in a %shumongous region",
p, g1->isHumongous(size) ? "" : "non-",
size, is_humongous ? "" : "non-");
*failures = true;
}
object_num += 1;
if (blocks == BLOCK_SAMPLE_INTERVAL) { if (blocks == BLOCK_SAMPLE_INTERVAL) {
HeapWord* res = block_start_const(p + (size/2)); HeapWord* res = block_start_const(p + (size/2));
if (p != res) { if (p != res) {
...@@ -857,6 +867,13 @@ void HeapRegion::verify(bool allow_dirty, ...@@ -857,6 +867,13 @@ void HeapRegion::verify(bool allow_dirty,
} }
} }
if (is_humongous && object_num > 1) {
gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous "
"but has "SIZE_FORMAT", objects",
bottom(), end(), object_num);
*failures = true;
}
if (p != top()) { if (p != top()) {
gclog_or_tty->print_cr("end of last object "PTR_FORMAT" " gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
"does not match top "PTR_FORMAT, p, top()); "does not match top "PTR_FORMAT, p, top());
......
// //
// Copyright (c) 2004, 2009, Oracle and/or its affiliates. All rights reserved. // Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// //
// This code is free software; you can redistribute it and/or modify it // This code is free software; you can redistribute it and/or modify it
...@@ -34,6 +34,8 @@ binaryTreeDictionary.cpp spaceDecorator.hpp ...@@ -34,6 +34,8 @@ binaryTreeDictionary.cpp spaceDecorator.hpp
binaryTreeDictionary.hpp freeBlockDictionary.hpp binaryTreeDictionary.hpp freeBlockDictionary.hpp
binaryTreeDictionary.hpp freeList.hpp binaryTreeDictionary.hpp freeList.hpp
blockOffsetTable.inline.hpp concurrentMarkSweepGeneration.hpp
cmsAdaptiveSizePolicy.cpp cmsAdaptiveSizePolicy.hpp cmsAdaptiveSizePolicy.cpp cmsAdaptiveSizePolicy.hpp
cmsAdaptiveSizePolicy.cpp defNewGeneration.hpp cmsAdaptiveSizePolicy.cpp defNewGeneration.hpp
cmsAdaptiveSizePolicy.cpp gcStats.hpp cmsAdaptiveSizePolicy.cpp gcStats.hpp
...@@ -85,7 +87,7 @@ cmsOopClosures.hpp genOopClosures.hpp ...@@ -85,7 +87,7 @@ cmsOopClosures.hpp genOopClosures.hpp
cmsOopClosures.inline.hpp cmsOopClosures.hpp cmsOopClosures.inline.hpp cmsOopClosures.hpp
cmsOopClosures.inline.hpp concurrentMarkSweepGeneration.hpp cmsOopClosures.inline.hpp concurrentMarkSweepGeneration.hpp
cmsPermGen.cpp blockOffsetTable.hpp cmsPermGen.cpp blockOffsetTable.inline.hpp
cmsPermGen.cpp cSpaceCounters.hpp cmsPermGen.cpp cSpaceCounters.hpp
cmsPermGen.cpp cmsPermGen.hpp cmsPermGen.cpp cmsPermGen.hpp
cmsPermGen.cpp collectedHeap.inline.hpp cmsPermGen.cpp collectedHeap.inline.hpp
...@@ -121,6 +123,7 @@ compactibleFreeListSpace.cpp universe.inline.hpp ...@@ -121,6 +123,7 @@ compactibleFreeListSpace.cpp universe.inline.hpp
compactibleFreeListSpace.cpp vmThread.hpp compactibleFreeListSpace.cpp vmThread.hpp
compactibleFreeListSpace.hpp binaryTreeDictionary.hpp compactibleFreeListSpace.hpp binaryTreeDictionary.hpp
compactibleFreeListSpace.hpp blockOffsetTable.inline.hpp
compactibleFreeListSpace.hpp freeList.hpp compactibleFreeListSpace.hpp freeList.hpp
compactibleFreeListSpace.hpp promotionInfo.hpp compactibleFreeListSpace.hpp promotionInfo.hpp
compactibleFreeListSpace.hpp space.hpp compactibleFreeListSpace.hpp space.hpp
......
...@@ -225,7 +225,6 @@ arrayOop.cpp oop.inline.hpp ...@@ -225,7 +225,6 @@ arrayOop.cpp oop.inline.hpp
arrayOop.cpp symbolOop.hpp arrayOop.cpp symbolOop.hpp
arrayOop.hpp oop.hpp arrayOop.hpp oop.hpp
arrayOop.hpp universe.hpp
arrayOop.hpp universe.inline.hpp arrayOop.hpp universe.inline.hpp
assembler.cpp assembler.hpp assembler.cpp assembler.hpp
...@@ -236,7 +235,6 @@ assembler.cpp icache.hpp ...@@ -236,7 +235,6 @@ assembler.cpp icache.hpp
assembler.cpp os.hpp assembler.cpp os.hpp
assembler.hpp allocation.hpp assembler.hpp allocation.hpp
assembler.hpp allocation.inline.hpp
assembler.hpp debug.hpp assembler.hpp debug.hpp
assembler.hpp growableArray.hpp assembler.hpp growableArray.hpp
assembler.hpp oopRecorder.hpp assembler.hpp oopRecorder.hpp
...@@ -330,7 +328,7 @@ blockOffsetTable.cpp collectedHeap.inline.hpp ...@@ -330,7 +328,7 @@ blockOffsetTable.cpp collectedHeap.inline.hpp
blockOffsetTable.cpp iterator.hpp blockOffsetTable.cpp iterator.hpp
blockOffsetTable.cpp java.hpp blockOffsetTable.cpp java.hpp
blockOffsetTable.cpp oop.inline.hpp blockOffsetTable.cpp oop.inline.hpp
blockOffsetTable.cpp space.hpp blockOffsetTable.cpp space.inline.hpp
blockOffsetTable.cpp universe.hpp blockOffsetTable.cpp universe.hpp
blockOffsetTable.hpp globalDefinitions.hpp blockOffsetTable.hpp globalDefinitions.hpp
...@@ -338,6 +336,7 @@ blockOffsetTable.hpp memRegion.hpp ...@@ -338,6 +336,7 @@ blockOffsetTable.hpp memRegion.hpp
blockOffsetTable.hpp virtualspace.hpp blockOffsetTable.hpp virtualspace.hpp
blockOffsetTable.inline.hpp blockOffsetTable.hpp blockOffsetTable.inline.hpp blockOffsetTable.hpp
blockOffsetTable.inline.hpp safepoint.hpp
blockOffsetTable.inline.hpp space.hpp blockOffsetTable.inline.hpp space.hpp
bytecode.cpp bytecode.hpp bytecode.cpp bytecode.hpp
...@@ -1807,7 +1806,7 @@ generateOopMap.hpp signature.hpp ...@@ -1807,7 +1806,7 @@ generateOopMap.hpp signature.hpp
generateOopMap.hpp universe.inline.hpp generateOopMap.hpp universe.inline.hpp
generation.cpp allocation.inline.hpp generation.cpp allocation.inline.hpp
generation.cpp blockOffsetTable.hpp generation.cpp blockOffsetTable.inline.hpp
generation.cpp cardTableRS.hpp generation.cpp cardTableRS.hpp
generation.cpp collectedHeap.inline.hpp generation.cpp collectedHeap.inline.hpp
generation.cpp copy.hpp generation.cpp copy.hpp
...@@ -3436,7 +3435,7 @@ perfMemory_<os_family>.cpp perfMemory.hpp ...@@ -3436,7 +3435,7 @@ perfMemory_<os_family>.cpp perfMemory.hpp
perfMemory_<os_family>.cpp resourceArea.hpp perfMemory_<os_family>.cpp resourceArea.hpp
perfMemory_<os_family>.cpp vmSymbols.hpp perfMemory_<os_family>.cpp vmSymbols.hpp
permGen.cpp blockOffsetTable.hpp permGen.cpp blockOffsetTable.inline.hpp
permGen.cpp cSpaceCounters.hpp permGen.cpp cSpaceCounters.hpp
permGen.cpp collectedHeap.inline.hpp permGen.cpp collectedHeap.inline.hpp
permGen.cpp compactPermGen.hpp permGen.cpp compactPermGen.hpp
...@@ -3805,7 +3804,7 @@ sizes.cpp sizes.hpp ...@@ -3805,7 +3804,7 @@ sizes.cpp sizes.hpp
sizes.hpp allocation.hpp sizes.hpp allocation.hpp
sizes.hpp globalDefinitions.hpp sizes.hpp globalDefinitions.hpp
space.cpp blockOffsetTable.hpp space.cpp blockOffsetTable.inline.hpp
space.cpp copy.hpp space.cpp copy.hpp
space.cpp defNewGeneration.hpp space.cpp defNewGeneration.hpp
space.cpp genCollectedHeap.hpp space.cpp genCollectedHeap.hpp
...@@ -3835,7 +3834,6 @@ space.hpp prefetch.hpp ...@@ -3835,7 +3834,6 @@ space.hpp prefetch.hpp
space.hpp watermark.hpp space.hpp watermark.hpp
space.hpp workgroup.hpp space.hpp workgroup.hpp
space.inline.hpp blockOffsetTable.inline.hpp
space.inline.hpp collectedHeap.hpp space.inline.hpp collectedHeap.hpp
space.inline.hpp safepoint.hpp space.inline.hpp safepoint.hpp
space.inline.hpp space.hpp space.inline.hpp space.hpp
......
...@@ -58,7 +58,7 @@ void* ResourceObj::operator new(size_t size, allocation_type type) { ...@@ -58,7 +58,7 @@ void* ResourceObj::operator new(size_t size, allocation_type type) {
void ResourceObj::operator delete(void* p) { void ResourceObj::operator delete(void* p) {
assert(((ResourceObj *)p)->allocated_on_C_heap(), assert(((ResourceObj *)p)->allocated_on_C_heap(),
"delete only allowed for C_HEAP objects"); "delete only allowed for C_HEAP objects");
DEBUG_ONLY(((ResourceObj *)p)->_allocation = badHeapOopVal;) DEBUG_ONLY(((ResourceObj *)p)->_allocation = (uintptr_t) badHeapOopVal;)
FreeHeap(p); FreeHeap(p);
} }
...@@ -104,7 +104,7 @@ ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assi ...@@ -104,7 +104,7 @@ ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assi
ResourceObj::~ResourceObj() { ResourceObj::~ResourceObj() {
// allocated_on_C_heap() also checks that encoded (in _allocation) address == this. // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap. if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap.
_allocation = badHeapOopVal; // zap type _allocation = (uintptr_t) badHeapOopVal; // zap type
} }
} }
#endif // ASSERT #endif // ASSERT
......
/* /*
* Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -103,13 +103,13 @@ void BlockOffsetSharedArray::serialize(SerializeOopClosure* soc, ...@@ -103,13 +103,13 @@ void BlockOffsetSharedArray::serialize(SerializeOopClosure* soc,
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array, BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
MemRegion mr, bool init_to_zero) : MemRegion mr, bool init_to_zero_) :
BlockOffsetTable(mr.start(), mr.end()), BlockOffsetTable(mr.start(), mr.end()),
_array(array), _array(array)
_init_to_zero(init_to_zero)
{ {
assert(_bottom <= _end, "arguments out of order"); assert(_bottom <= _end, "arguments out of order");
if (!_init_to_zero) { set_init_to_zero(init_to_zero_);
if (!init_to_zero_) {
// initialize cards to point back to mr.start() // initialize cards to point back to mr.start()
set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
_array->set_offset_array(0, 0); // set first card to 0 _array->set_offset_array(0, 0); // set first card to 0
...@@ -121,8 +121,9 @@ BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array, ...@@ -121,8 +121,9 @@ BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
// a right-open interval: [start, end) // a right-open interval: [start, end)
void void
BlockOffsetArray:: BlockOffsetArray::
set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) { set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) {
check_reducing_assertion(reducing);
if (start >= end) { if (start >= end) {
// The start address is equal to the end address (or to // The start address is equal to the end address (or to
// the right of the end address) so there are not cards // the right of the end address) so there are not cards
...@@ -167,7 +168,7 @@ set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) { ...@@ -167,7 +168,7 @@ set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
size_t end_card = _array->index_for(end-1); size_t end_card = _array->index_for(end-1);
assert(start ==_array->address_for_index(start_card), "Precondition"); assert(start ==_array->address_for_index(start_card), "Precondition");
assert(end ==_array->address_for_index(end_card)+N_words, "Precondition"); assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval
} }
...@@ -175,7 +176,9 @@ set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) { ...@@ -175,7 +176,9 @@ set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
// a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start() // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
// above. // above.
void void
BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) { BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) {
check_reducing_assertion(reducing);
if (start_card > end_card) { if (start_card > end_card) {
return; return;
} }
...@@ -191,11 +194,11 @@ BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t ...@@ -191,11 +194,11 @@ BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t
size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1); size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1);
offset = N_words + i; offset = N_words + i;
if (reach >= end_card) { if (reach >= end_card) {
_array->set_offset_array(start_card_for_region, end_card, offset); _array->set_offset_array(start_card_for_region, end_card, offset, reducing);
start_card_for_region = reach + 1; start_card_for_region = reach + 1;
break; break;
} }
_array->set_offset_array(start_card_for_region, reach, offset); _array->set_offset_array(start_card_for_region, reach, offset, reducing);
start_card_for_region = reach + 1; start_card_for_region = reach + 1;
} }
assert(start_card_for_region > end_card, "Sanity check"); assert(start_card_for_region > end_card, "Sanity check");
...@@ -211,8 +214,10 @@ void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const ...@@ -211,8 +214,10 @@ void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const
return; return;
} }
guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card"); guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
u_char last_entry = N_words;
for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) { for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
u_char entry = _array->offset_array(c); u_char entry = _array->offset_array(c);
guarantee(entry >= last_entry, "Monotonicity");
if (c - start_card > power_to_cards_back(1)) { if (c - start_card > power_to_cards_back(1)) {
guarantee(entry > N_words, "Should be in logarithmic region"); guarantee(entry > N_words, "Should be in logarithmic region");
} }
...@@ -220,11 +225,13 @@ void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const ...@@ -220,11 +225,13 @@ void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const
size_t landing_card = c - backskip; size_t landing_card = c - backskip;
guarantee(landing_card >= (start_card - 1), "Inv"); guarantee(landing_card >= (start_card - 1), "Inv");
if (landing_card >= start_card) { if (landing_card >= start_card) {
guarantee(_array->offset_array(landing_card) <= entry, "monotonicity"); guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity");
} else { } else {
guarantee(landing_card == start_card - 1, "Tautology"); guarantee(landing_card == (start_card - 1), "Tautology");
// Note that N_words is the maximum offset value
guarantee(_array->offset_array(landing_card) <= N_words, "Offset value"); guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
} }
last_entry = entry; // remember for monotonicity test
} }
} }
...@@ -243,7 +250,7 @@ BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { ...@@ -243,7 +250,7 @@ BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
void void
BlockOffsetArray::do_block_internal(HeapWord* blk_start, BlockOffsetArray::do_block_internal(HeapWord* blk_start,
HeapWord* blk_end, HeapWord* blk_end,
Action action) { Action action, bool reducing) {
assert(Universe::heap()->is_in_reserved(blk_start), assert(Universe::heap()->is_in_reserved(blk_start),
"reference must be into the heap"); "reference must be into the heap");
assert(Universe::heap()->is_in_reserved(blk_end-1), assert(Universe::heap()->is_in_reserved(blk_end-1),
...@@ -275,18 +282,18 @@ BlockOffsetArray::do_block_internal(HeapWord* blk_start, ...@@ -275,18 +282,18 @@ BlockOffsetArray::do_block_internal(HeapWord* blk_start,
switch (action) { switch (action) {
case Action_mark: { case Action_mark: {
if (init_to_zero()) { if (init_to_zero()) {
_array->set_offset_array(start_index, boundary, blk_start); _array->set_offset_array(start_index, boundary, blk_start, reducing);
break; break;
} // Else fall through to the next case } // Else fall through to the next case
} }
case Action_single: { case Action_single: {
_array->set_offset_array(start_index, boundary, blk_start); _array->set_offset_array(start_index, boundary, blk_start, reducing);
// We have finished marking the "offset card". We need to now // We have finished marking the "offset card". We need to now
// mark the subsequent cards that this blk spans. // mark the subsequent cards that this blk spans.
if (start_index < end_index) { if (start_index < end_index) {
HeapWord* rem_st = _array->address_for_index(start_index) + N_words; HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
HeapWord* rem_end = _array->address_for_index(end_index) + N_words; HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
set_remainder_to_point_to_start(rem_st, rem_end); set_remainder_to_point_to_start(rem_st, rem_end, reducing);
} }
break; break;
} }
...@@ -395,7 +402,7 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, ...@@ -395,7 +402,7 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
// Indices for starts of prefix block and suffix block. // Indices for starts of prefix block and suffix block.
size_t pref_index = _array->index_for(pref_addr); size_t pref_index = _array->index_for(pref_addr);
if (_array->address_for_index(pref_index) != pref_addr) { if (_array->address_for_index(pref_index) != pref_addr) {
// pref_addr deos not begin pref_index // pref_addr does not begin pref_index
pref_index++; pref_index++;
} }
...@@ -430,18 +437,18 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, ...@@ -430,18 +437,18 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
if (num_suff_cards > 0) { if (num_suff_cards > 0) {
HeapWord* boundary = _array->address_for_index(suff_index); HeapWord* boundary = _array->address_for_index(suff_index);
// Set the offset card for suffix block // Set the offset card for suffix block
_array->set_offset_array(suff_index, boundary, suff_addr); _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */);
// Change any further cards that need changing in the suffix // Change any further cards that need changing in the suffix
if (num_pref_cards > 0) { if (num_pref_cards > 0) {
if (num_pref_cards >= num_suff_cards) { if (num_pref_cards >= num_suff_cards) {
// Unilaterally fix all of the suffix cards: closed card // Unilaterally fix all of the suffix cards: closed card
// index interval in args below. // index interval in args below.
set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1); set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */);
} else { } else {
// Unilaterally fix the first (num_pref_cards - 1) following // Unilaterally fix the first (num_pref_cards - 1) following
// the "offset card" in the suffix block. // the "offset card" in the suffix block.
set_remainder_to_point_to_start_incl(suff_index + 1, set_remainder_to_point_to_start_incl(suff_index + 1,
suff_index + num_pref_cards - 1); suff_index + num_pref_cards - 1, true /* reducing */);
// Fix the appropriate cards in the remainder of the // Fix the appropriate cards in the remainder of the
// suffix block -- these are the last num_pref_cards // suffix block -- these are the last num_pref_cards
// cards in each power block of the "new" range plumbed // cards in each power block of the "new" range plumbed
...@@ -461,7 +468,7 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, ...@@ -461,7 +468,7 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
// is non-null. // is non-null.
if (left_index <= right_index) { if (left_index <= right_index) {
_array->set_offset_array(left_index, right_index, _array->set_offset_array(left_index, right_index,
N_words + i - 1); N_words + i - 1, true /* reducing */);
} else { } else {
more = false; // we are done more = false; // we are done
} }
...@@ -482,7 +489,7 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, ...@@ -482,7 +489,7 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
more = false; more = false;
} }
assert(left_index <= right_index, "Error"); assert(left_index <= right_index, "Error");
_array->set_offset_array(left_index, right_index, N_words + i - 1); _array->set_offset_array(left_index, right_index, N_words + i - 1, true /* reducing */);
i++; i++;
} }
} }
...@@ -501,14 +508,13 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, ...@@ -501,14 +508,13 @@ void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
// any cards subsequent to the first one. // any cards subsequent to the first one.
void void
BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start, BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start,
HeapWord* blk_end) { HeapWord* blk_end, bool reducing) {
do_block_internal(blk_start, blk_end, Action_mark); do_block_internal(blk_start, blk_end, Action_mark, reducing);
} }
HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
const void* addr) const { const void* addr) const {
assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
assert(_bottom <= addr && addr < _end, assert(_bottom <= addr && addr < _end,
"addr must be covered by this Array"); "addr must be covered by this Array");
// Must read this exactly once because it can be modified by parallel // Must read this exactly once because it can be modified by parallel
...@@ -542,9 +548,10 @@ HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( ...@@ -542,9 +548,10 @@ HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
debug_only(HeapWord* last = q); // for debugging debug_only(HeapWord* last = q); // for debugging
q = n; q = n;
n += _sp->block_size(n); n += _sp->block_size(n);
assert(n > q, err_msg("Looping at: " INTPTR_FORMAT, n));
} }
assert(q <= addr, "wrong order for current and arg"); assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr));
assert(addr <= n, "wrong order for arg and next"); assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n));
return q; return q;
} }
...@@ -727,9 +734,8 @@ void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start, ...@@ -727,9 +734,8 @@ void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
_next_offset_index = end_index + 1; _next_offset_index = end_index + 1;
// Calculate _next_offset_threshold this way because end_index // Calculate _next_offset_threshold this way because end_index
// may be the last valid index in the covered region. // may be the last valid index in the covered region.
_next_offset_threshold = _array->address_for_index(end_index) + _next_offset_threshold = _array->address_for_index(end_index) + N_words;
N_words; assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold");
assert(_next_offset_threshold >= blk_end, "Incorrent offset threshold");
#ifdef ASSERT #ifdef ASSERT
// The offset can be 0 if the block starts on a boundary. That // The offset can be 0 if the block starts on a boundary. That
......
/* /*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -107,6 +107,8 @@ class BlockOffsetSharedArray: public CHeapObj { ...@@ -107,6 +107,8 @@ class BlockOffsetSharedArray: public CHeapObj {
N_words = 1 << LogN_words N_words = 1 << LogN_words
}; };
bool _init_to_zero;
// The reserved region covered by the shared array. // The reserved region covered by the shared array.
MemRegion _reserved; MemRegion _reserved;
...@@ -125,17 +127,28 @@ class BlockOffsetSharedArray: public CHeapObj { ...@@ -125,17 +127,28 @@ class BlockOffsetSharedArray: public CHeapObj {
assert(index < _vs.committed_size(), "index out of range"); assert(index < _vs.committed_size(), "index out of range");
return _offset_array[index]; return _offset_array[index];
} }
void set_offset_array(size_t index, u_char offset) { // An assertion-checking helper method for the set_offset_array() methods below.
void check_reducing_assertion(bool reducing);
void set_offset_array(size_t index, u_char offset, bool reducing = false) {
check_reducing_assertion(reducing);
assert(index < _vs.committed_size(), "index out of range"); assert(index < _vs.committed_size(), "index out of range");
assert(!reducing || _offset_array[index] >= offset, "Not reducing");
_offset_array[index] = offset; _offset_array[index] = offset;
} }
void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
check_reducing_assertion(reducing);
assert(index < _vs.committed_size(), "index out of range"); assert(index < _vs.committed_size(), "index out of range");
assert(high >= low, "addresses out of order"); assert(high >= low, "addresses out of order");
assert(pointer_delta(high, low) <= N_words, "offset too large"); assert(pointer_delta(high, low) <= N_words, "offset too large");
assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
"Not reducing");
_offset_array[index] = (u_char)pointer_delta(high, low); _offset_array[index] = (u_char)pointer_delta(high, low);
} }
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
check_reducing_assertion(reducing);
assert(index_for(right - 1) < _vs.committed_size(), assert(index_for(right - 1) < _vs.committed_size(),
"right address out of range"); "right address out of range");
assert(left < right, "Heap addresses out of order"); assert(left < right, "Heap addresses out of order");
...@@ -150,12 +163,14 @@ class BlockOffsetSharedArray: public CHeapObj { ...@@ -150,12 +163,14 @@ class BlockOffsetSharedArray: public CHeapObj {
size_t i = index_for(left); size_t i = index_for(left);
const size_t end = i + num_cards; const size_t end = i + num_cards;
for (; i < end; i++) { for (; i < end; i++) {
assert(!reducing || _offset_array[i] >= offset, "Not reducing");
_offset_array[i] = offset; _offset_array[i] = offset;
} }
} }
} }
void set_offset_array(size_t left, size_t right, u_char offset) { void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
check_reducing_assertion(reducing);
assert(right < _vs.committed_size(), "right address out of range"); assert(right < _vs.committed_size(), "right address out of range");
assert(left <= right, "indexes out of order"); assert(left <= right, "indexes out of order");
size_t num_cards = right - left + 1; size_t num_cards = right - left + 1;
...@@ -169,6 +184,7 @@ class BlockOffsetSharedArray: public CHeapObj { ...@@ -169,6 +184,7 @@ class BlockOffsetSharedArray: public CHeapObj {
size_t i = left; size_t i = left;
const size_t end = i + num_cards; const size_t end = i + num_cards;
for (; i < end; i++) { for (; i < end; i++) {
assert(!reducing || _offset_array[i] >= offset, "Not reducing");
_offset_array[i] = offset; _offset_array[i] = offset;
} }
} }
...@@ -212,6 +228,11 @@ public: ...@@ -212,6 +228,11 @@ public:
void set_bottom(HeapWord* new_bottom); void set_bottom(HeapWord* new_bottom);
// Whether entries should be initialized to zero. Used currently only for
// error checking.
void set_init_to_zero(bool val) { _init_to_zero = val; }
bool init_to_zero() { return _init_to_zero; }
// Updates all the BlockOffsetArray's sharing this shared array to // Updates all the BlockOffsetArray's sharing this shared array to
// reflect the current "top"'s of their spaces. // reflect the current "top"'s of their spaces.
void update_offset_arrays(); // Not yet implemented! void update_offset_arrays(); // Not yet implemented!
...@@ -285,17 +306,23 @@ class BlockOffsetArray: public BlockOffsetTable { ...@@ -285,17 +306,23 @@ class BlockOffsetArray: public BlockOffsetTable {
// initialized to point backwards to the beginning of the covered region. // initialized to point backwards to the beginning of the covered region.
bool _init_to_zero; bool _init_to_zero;
// An assertion-checking helper method for the set_remainder*() methods below.
void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
// Sets the entries // Sets the entries
// corresponding to the cards starting at "start" and ending at "end" // corresponding to the cards starting at "start" and ending at "end"
// to point back to the card before "start": the interval [start, end) // to point back to the card before "start": the interval [start, end)
// is right-open. // is right-open. The last parameter, reducing, indicates whether the
void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); // updates to individual entries always reduce the entry from a higher
// to a lower value. (For example this would hold true during a temporal
// regime during which only block splits were updating the BOT.
void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
// Same as above, except that the args here are a card _index_ interval // Same as above, except that the args here are a card _index_ interval
// that is closed: [start_index, end_index] // that is closed: [start_index, end_index]
void set_remainder_to_point_to_start_incl(size_t start, size_t end); void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
// A helper function for BOT adjustment/verification work // A helper function for BOT adjustment/verification work
void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);
public: public:
// The space may not have its bottom and top set yet, which is why the // The space may not have its bottom and top set yet, which is why the
...@@ -303,7 +330,7 @@ class BlockOffsetArray: public BlockOffsetTable { ...@@ -303,7 +330,7 @@ class BlockOffsetArray: public BlockOffsetTable {
// elements of the array are initialized to zero. Otherwise, they are // elements of the array are initialized to zero. Otherwise, they are
// initialized to point backwards to the beginning. // initialized to point backwards to the beginning.
BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
bool init_to_zero); bool init_to_zero_);
// Note: this ought to be part of the constructor, but that would require // Note: this ought to be part of the constructor, but that would require
// "this" to be passed as a parameter to a member constructor for // "this" to be passed as a parameter to a member constructor for
...@@ -358,6 +385,12 @@ class BlockOffsetArray: public BlockOffsetTable { ...@@ -358,6 +385,12 @@ class BlockOffsetArray: public BlockOffsetTable {
// If true, initialize array slots with no allocated blocks to zero. // If true, initialize array slots with no allocated blocks to zero.
// Otherwise, make them point back to the front. // Otherwise, make them point back to the front.
bool init_to_zero() { return _init_to_zero; } bool init_to_zero() { return _init_to_zero; }
// Corresponding setter
void set_init_to_zero(bool val) {
_init_to_zero = val;
assert(_array != NULL, "_array should be non-NULL");
_array->set_init_to_zero(val);
}
// Debugging // Debugging
// Return the index of the last entry in the "active" region. // Return the index of the last entry in the "active" region.
...@@ -424,16 +457,16 @@ class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { ...@@ -424,16 +457,16 @@ class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
// of BOT is touched. It is assumed (and verified in the // of BOT is touched. It is assumed (and verified in the
// non-product VM) that the remaining cards of the block // non-product VM) that the remaining cards of the block
// are correct. // are correct.
void mark_block(HeapWord* blk_start, HeapWord* blk_end); void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
void mark_block(HeapWord* blk, size_t size) { void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
mark_block(blk, blk + size); mark_block(blk, blk + size, reducing);
} }
// Adjust _unallocated_block to indicate that a particular // Adjust _unallocated_block to indicate that a particular
// block has been newly allocated or freed. It is assumed (and // block has been newly allocated or freed. It is assumed (and
// verified in the non-product VM) that the BOT is correct for // verified in the non-product VM) that the BOT is correct for
// the given block. // the given block.
void allocated(HeapWord* blk_start, HeapWord* blk_end) { void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
// Verify that the BOT shows [blk, blk + blk_size) to be one block. // Verify that the BOT shows [blk, blk + blk_size) to be one block.
verify_single_block(blk_start, blk_end); verify_single_block(blk_start, blk_end);
if (BlockOffsetArrayUseUnallocatedBlock) { if (BlockOffsetArrayUseUnallocatedBlock) {
...@@ -441,14 +474,12 @@ class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { ...@@ -441,14 +474,12 @@ class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
} }
} }
void allocated(HeapWord* blk, size_t size) { void allocated(HeapWord* blk, size_t size, bool reducing = false) {
allocated(blk, blk + size); allocated(blk, blk + size, reducing);
} }
void freed(HeapWord* blk_start, HeapWord* blk_end); void freed(HeapWord* blk_start, HeapWord* blk_end);
void freed(HeapWord* blk, size_t size) { void freed(HeapWord* blk, size_t size);
freed(blk, blk + size);
}
HeapWord* block_start_unsafe(const void* addr) const; HeapWord* block_start_unsafe(const void* addr) const;
...@@ -456,7 +487,6 @@ class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { ...@@ -456,7 +487,6 @@ class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
// start of the block that contains the given address. // start of the block that contains the given address.
HeapWord* block_start_careful(const void* addr) const; HeapWord* block_start_careful(const void* addr) const;
// Verification & debugging: ensure that the offset table reflects // Verification & debugging: ensure that the offset table reflects
// the fact that the block [blk_start, blk_end) or [blk, blk + size) // the fact that the block [blk_start, blk_end) or [blk, blk + size)
// is a single block of storage. NOTE: can't const this because of // is a single block of storage. NOTE: can't const this because of
......
/* /*
* Copyright (c) 2000, 2002, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -55,10 +55,22 @@ inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const { ...@@ -55,10 +55,22 @@ inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
return result; return result;
} }
inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) {
assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() ||
Thread::current()->is_VM_thread() ||
Thread::current()->is_ConcurrentGC_thread() ||
((!Thread::current()->is_ConcurrentGC_thread()) &&
ParGCRareEvent_lock->owned_by_self()), "Crack");
}
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
// BlockOffsetArrayNonContigSpace inlines // BlockOffsetArrayNonContigSpace inlines
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk,
size_t size) {
freed(blk, blk + size);
}
inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start, inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start,
HeapWord* blk_end) { HeapWord* blk_end) {
// Verify that the BOT shows [blk_start, blk_end) to be one block. // Verify that the BOT shows [blk_start, blk_end) to be one block.
......
...@@ -1712,7 +1712,7 @@ class CommandLineFlags { ...@@ -1712,7 +1712,7 @@ class CommandLineFlags {
develop(bool, VerifyBlockOffsetArray, false, \ develop(bool, VerifyBlockOffsetArray, false, \
"Do (expensive!) block offset array verification") \ "Do (expensive!) block offset array verification") \
\ \
product(bool, BlockOffsetArrayUseUnallocatedBlock, trueInDebug, \ product(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
"Maintain _unallocated_block in BlockOffsetArray" \ "Maintain _unallocated_block in BlockOffsetArray" \
" (currently applicable only to CMS collector)") \ " (currently applicable only to CMS collector)") \
\ \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册