提交 1022621a 编写于 作者: N neliasso

8006952: Slow VM due to excessive code cache freelist iteration

Summary: Remove continous free block requirement
Reviewed-by: kvn
上级 9cd6f543
......@@ -348,14 +348,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
void* RuntimeStub::operator new(size_t s, unsigned size) {
void* p = CodeCache::allocate(size);
void* p = CodeCache::allocate(size, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
// operator new shared by all singletons:
void* SingletonBlob::operator new(size_t s, unsigned size) {
void* p = CodeCache::allocate(size);
void* p = CodeCache::allocate(size, true);
if (!p) fatal("Initial size of CodeCache is too small");
return p;
}
......
......@@ -172,7 +172,7 @@ nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
static size_t maxCodeCacheUsed = 0;
CodeBlob* CodeCache::allocate(int size) {
CodeBlob* CodeCache::allocate(int size, bool is_critical) {
// Do not seize the CodeCache lock here--if the caller has not
// already done so, we are going to lose bigtime, since the code
// cache will contain a garbage CodeBlob until the caller can
......@@ -183,7 +183,7 @@ CodeBlob* CodeCache::allocate(int size) {
CodeBlob* cb = NULL;
_number_of_blobs++;
while (true) {
cb = (CodeBlob*)_heap->allocate(size);
cb = (CodeBlob*)_heap->allocate(size, is_critical);
if (cb != NULL) break;
if (!_heap->expand_by(CodeCacheExpansionSize)) {
// Expansion failed
......@@ -192,8 +192,8 @@ CodeBlob* CodeCache::allocate(int size) {
if (PrintCodeCacheExtension) {
ResourceMark rm;
tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
(intptr_t)_heap->begin(), (intptr_t)_heap->end(),
(address)_heap->end() - (address)_heap->begin());
(intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
(address)_heap->high() - (address)_heap->low_boundary());
}
}
maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
......@@ -608,13 +608,13 @@ void CodeCache::verify_oops() {
address CodeCache::first_address() {
assert_locked_or_safepoint(CodeCache_lock);
return (address)_heap->begin();
return (address)_heap->low_boundary();
}
address CodeCache::last_address() {
assert_locked_or_safepoint(CodeCache_lock);
return (address)_heap->end();
return (address)_heap->high();
}
......@@ -996,10 +996,9 @@ void CodeCache::print() {
void CodeCache::print_summary(outputStream* st, bool detailed) {
size_t total = (_heap->high_boundary() - _heap->low_boundary());
st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT
"Kb max_free_chunk=" SIZE_FORMAT "Kb",
"Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
total/K, (total - unallocated_capacity())/K,
maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K);
maxCodeCacheUsed/K, unallocated_capacity()/K);
if (detailed) {
st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
......@@ -1018,19 +1017,8 @@ void CodeCache::print_summary(outputStream* st, bool detailed) {
void CodeCache::log_state(outputStream* st) {
st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
" largest_free_block='" SIZE_FORMAT "'",
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
nof_blobs(), nof_nmethods(), nof_adapters(),
unallocated_capacity(), largest_free_block());
unallocated_capacity());
}
size_t CodeCache::largest_free_block() {
// This is called both with and without CodeCache_lock held so
// handle both cases.
if (CodeCache_lock->owned_by_self()) {
return _heap->largest_free_block();
} else {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
return _heap->largest_free_block();
}
}
......@@ -70,7 +70,7 @@ class CodeCache : AllStatic {
static void initialize();
// Allocation/administration
static CodeBlob* allocate(int size); // allocates a new CodeBlob
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
......@@ -156,19 +156,13 @@ class CodeCache : AllStatic {
static address low_bound() { return (address) _heap->low_boundary(); }
static address high_bound() { return (address) _heap->high_boundary(); }
static bool has_space(int size) {
// Always leave some room in the CodeCache for I2C/C2I adapters
return largest_free_block() > (CodeCacheMinimumFreeSpace + size);
}
// Profiling
static address first_address(); // first address used for CodeBlobs
static address last_address(); // last address used for CodeBlobs
static size_t capacity() { return _heap->capacity(); }
static size_t max_capacity() { return _heap->max_capacity(); }
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
static size_t largest_free_block();
static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
static bool needs_cache_clean() { return _needs_cache_clean; }
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
......
......@@ -501,18 +501,17 @@ nmethod* nmethod::new_native_nmethod(methodHandle method,
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
if (CodeCache::has_space(native_nmethod_size)) {
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
compile_id, &offsets,
code_buffer, frame_size,
basic_lock_owner_sp_offset,
basic_lock_sp_offset, oop_maps);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
if (PrintAssembly && nm != NULL)
Disassembler::decode(nm);
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
compile_id, &offsets,
code_buffer, frame_size,
basic_lock_owner_sp_offset,
basic_lock_sp_offset, oop_maps);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm));
if (PrintAssembly && nm != NULL) {
Disassembler::decode(nm);
}
}
// verify nmethod
......@@ -538,18 +537,17 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method,
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
if (CodeCache::has_space(nmethod_size)) {
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (nmethod_size) nmethod(method(), nmethod_size,
&offsets, code_buffer, frame_size);
nm = new (nmethod_size) nmethod(method(), nmethod_size,
&offsets, code_buffer, frame_size);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
if (PrintAssembly && nm != NULL)
Disassembler::decode(nm);
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
if (PrintAssembly && nm != NULL) {
Disassembler::decode(nm);
}
}
// verify nmethod
......@@ -591,16 +589,16 @@ nmethod* nmethod::new_nmethod(methodHandle method,
+ round_to(handler_table->size_in_bytes(), oopSize)
+ round_to(nul_chk_table->size_in_bytes(), oopSize)
+ round_to(debug_info->data_size() , oopSize);
if (CodeCache::has_space(nmethod_size)) {
nm = new (nmethod_size)
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
handler_table,
nul_chk_table,
compiler,
comp_level);
}
nm = new (nmethod_size)
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps,
handler_table,
nul_chk_table,
compiler,
comp_level);
if (nm != NULL) {
// To make dependency checking during class loading fast, record
// the nmethod dependencies in the classes it is dependent on.
......@@ -612,15 +610,18 @@ nmethod* nmethod::new_nmethod(methodHandle method,
// classes the slow way is too slow.
for (Dependencies::DepStream deps(nm); deps.next(); ) {
Klass* klass = deps.context_type();
if (klass == NULL) continue; // ignore things like evol_method
if (klass == NULL) {
continue; // ignore things like evol_method
}
// record this nmethod as dependent on this klass
InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
}
}
NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm));
if (PrintAssembly && nm != NULL)
if (PrintAssembly && nm != NULL) {
Disassembler::decode(nm);
}
}
// verify nmethod
......@@ -798,13 +799,11 @@ nmethod::nmethod(
}
#endif // def HAVE_DTRACE_H
void* nmethod::operator new(size_t size, int nmethod_size) {
void* alloc = CodeCache::allocate(nmethod_size);
guarantee(alloc != NULL, "CodeCache should have enough space");
return alloc;
void* nmethod::operator new(size_t size, int nmethod_size) throw () {
// Not critical, may return null if there is too little continuous memory
return CodeCache::allocate(nmethod_size);
}
nmethod::nmethod(
Method* method,
int nmethod_size,
......
......@@ -1581,7 +1581,7 @@ void CompileBroker::compiler_thread_loop() {
// We need this HandleMark to avoid leaking VM handles.
HandleMark hm(thread);
if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) {
if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
// the code cache is really full
handle_full_code_cache();
} else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
......
......@@ -42,7 +42,7 @@ CodeHeap::CodeHeap() {
_log2_segment_size = 0;
_next_segment = 0;
_freelist = NULL;
_free_segments = 0;
_freelist_segments = 0;
}
......@@ -115,8 +115,8 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
}
on_code_mapping(_memory.low(), _memory.committed_size());
_number_of_committed_segments = number_of_segments(_memory.committed_size());
_number_of_reserved_segments = number_of_segments(_memory.reserved_size());
_number_of_committed_segments = size_to_segments(_memory.committed_size());
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
// reserve space for _segmap
......@@ -149,8 +149,8 @@ bool CodeHeap::expand_by(size_t size) {
if (!_memory.expand_by(dm)) return false;
on_code_mapping(base, dm);
size_t i = _number_of_committed_segments;
_number_of_committed_segments = number_of_segments(_memory.committed_size());
assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change");
_number_of_committed_segments = size_to_segments(_memory.committed_size());
assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
// expand _segmap space
size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
......@@ -176,33 +176,44 @@ void CodeHeap::clear() {
}
void* CodeHeap::allocate(size_t size) {
size_t length = number_of_segments(size + sizeof(HeapBlock));
assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList");
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
// First check if we can satify request from freelist
debug_only(verify());
HeapBlock* block = search_freelist(length);
HeapBlock* block = search_freelist(number_of_segments, is_critical);
debug_only(if (VerifyCodeCacheOften) verify());
if (block != NULL) {
assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check");
assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
assert(!block->free(), "must be marked free");
#ifdef ASSERT
memset((void *)block->allocated_space(), badCodeHeapNewVal, size);
memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
#endif
return block->allocated_space();
}
if (length < CodeCacheMinBlockLength) {
length = CodeCacheMinBlockLength;
// Ensure minimum size for allocation to the heap.
if (number_of_segments < CodeCacheMinBlockLength) {
number_of_segments = CodeCacheMinBlockLength;
}
if (_next_segment + length <= _number_of_committed_segments) {
mark_segmap_as_used(_next_segment, _next_segment + length);
if (!is_critical) {
// Make sure the allocation fits in the unallocated heap without using
// the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
// Fail allocation
return NULL;
}
}
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
HeapBlock* b = block_at(_next_segment);
b->initialize(length);
_next_segment += length;
b->initialize(number_of_segments);
_next_segment += number_of_segments;
#ifdef ASSERT
memset((void *)b->allocated_space(), badCodeHeapNewVal, size);
memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
#endif
return b->allocated_space();
} else {
......@@ -219,7 +230,7 @@ void CodeHeap::deallocate(void* p) {
#ifdef ASSERT
memset((void *)b->allocated_space(),
badCodeHeapFreeVal,
size(b->length()) - sizeof(HeapBlock));
segments_to_size(b->length()) - sizeof(HeapBlock));
#endif
add_to_freelist(b);
......@@ -299,32 +310,14 @@ size_t CodeHeap::max_capacity() const {
}
size_t CodeHeap::allocated_capacity() const {
// Start with the committed size in _memory;
size_t l = _memory.committed_size();
// Subtract the committed, but unused, segments
l -= size(_number_of_committed_segments - _next_segment);
// Subtract the size of the freelist
l -= size(_free_segments);
return l;
// size of used heap - size on freelist
return segments_to_size(_next_segment - _freelist_segments);
}
size_t CodeHeap::largest_free_block() const {
// First check unused space excluding free blocks.
size_t free_sz = size(_free_segments);
size_t unused = max_capacity() - allocated_capacity() - free_sz;
if (unused >= free_sz)
return unused;
// Now check largest free block.
size_t len = 0;
for (FreeBlock* b = _freelist; b != NULL; b = b->link()) {
if (b->length() > len)
len = b->length();
}
return MAX2(unused, size(len));
// Returns size of the unallocated heap block
size_t CodeHeap::heap_unallocated_capacity() const {
// Total number of segments - number currently used
return segments_to_size(_number_of_reserved_segments - _next_segment);
}
// Free list management
......@@ -365,7 +358,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) {
assert(b != _freelist, "cannot be removed twice");
// Mark as free and update free space count
_free_segments += b->length();
_freelist_segments += b->length();
b->set_free();
// First element in list?
......@@ -400,7 +393,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) {
// Search freelist for an entry on the list with the best fit
// Return NULL if no one was found
FreeBlock* CodeHeap::search_freelist(size_t length) {
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
FreeBlock *best_block = NULL;
FreeBlock *best_prev = NULL;
size_t best_length = 0;
......@@ -411,6 +404,16 @@ FreeBlock* CodeHeap::search_freelist(size_t length) {
while(cur != NULL) {
size_t l = cur->length();
if (l >= length && (best_block == NULL || best_length > l)) {
// Non critical allocations are not allowed to use the last part of the code heap.
if (!is_critical) {
// Make sure the end of the allocation doesn't cross into the last part of the code heap
if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
// the freelist is sorted by address - if one fails, all consecutive will also fail.
break;
}
}
// Remember best block, its previous element, and its length
best_block = cur;
best_prev = prev;
......@@ -452,7 +455,7 @@ FreeBlock* CodeHeap::search_freelist(size_t length) {
}
best_block->set_used();
_free_segments -= length;
_freelist_segments -= length;
return best_block;
}
......@@ -478,7 +481,7 @@ void CodeHeap::verify() {
}
// Verify that freelist contains the right amount of free space
// guarantee(len == _free_segments, "wrong freelist");
// guarantee(len == _freelist_segments, "wrong freelist");
// Verify that the number of free blocks is not out of hand.
static int free_block_threshold = 10000;
......
......@@ -91,11 +91,11 @@ class CodeHeap : public CHeapObj<mtCode> {
size_t _next_segment;
FreeBlock* _freelist;
size_t _free_segments; // No. of segments in freelist
size_t _freelist_segments; // No. of segments in freelist
// Helper functions
size_t number_of_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
size_t size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
size_t segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; }
HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); }
......@@ -110,7 +110,7 @@ class CodeHeap : public CHeapObj<mtCode> {
// Toplevel freelist management
void add_to_freelist(HeapBlock *b);
FreeBlock* search_freelist(size_t length);
FreeBlock* search_freelist(size_t length, bool is_critical);
// Iteration helpers
void* next_free(HeapBlock* b) const;
......@@ -132,22 +132,19 @@ class CodeHeap : public CHeapObj<mtCode> {
void clear(); // clears all heap contents
// Memory allocation
void* allocate (size_t size); // allocates a block of size or returns NULL
void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
void deallocate(void* p); // deallocates a block
// Attributes
void* begin() const { return _memory.low (); }
void* end() const { return _memory.high(); }
bool contains(void* p) const { return begin() <= p && p < end(); }
void* find_start(void* p) const; // returns the block containing p or NULL
size_t alignment_unit() const; // alignment of any block
size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
static size_t header_size(); // returns the header size for each heap block
// Returns reserved area high and low addresses
char *low_boundary() const { return _memory.low_boundary (); }
char *high() const { return _memory.high(); }
char *high_boundary() const { return _memory.high_boundary(); }
char* low_boundary() const { return _memory.low_boundary (); }
char* high() const { return _memory.high(); }
char* high_boundary() const { return _memory.high_boundary(); }
bool contains(const void* p) const { return low_boundary() <= p && p < high(); }
void* find_start(void* p) const; // returns the block containing p or NULL
size_t alignment_unit() const; // alignment of any block
size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
static size_t header_size(); // returns the header size for each heap block
// Iteration
......@@ -161,8 +158,11 @@ class CodeHeap : public CHeapObj<mtCode> {
size_t max_capacity() const;
size_t allocated_capacity() const;
size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); }
size_t largest_free_block() const;
private:
size_t heap_unallocated_capacity() const;
public:
// Debugging
void verify();
void print() PRODUCT_RETURN;
......
......@@ -1044,21 +1044,6 @@ void NonSafepointEmitter::emit_non_safepoint() {
debug_info->end_non_safepoint(pc_offset);
}
// helper for fill_buffer bailout logic
static void turn_off_compiler(Compile* C) {
if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) {
// Do not turn off compilation if a single giant method has
// blown the code cache size.
C->record_failure("excessive request to CodeCache");
} else {
// Let CompilerBroker disable further compilations.
C->record_failure("CodeCache is full");
}
}
//------------------------------init_buffer------------------------------------
CodeBuffer* Compile::init_buffer(uint* blk_starts) {
......@@ -1158,7 +1143,7 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) {
// Have we run out of code space?
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
turn_off_compiler(this);
C->record_failure("CodeCache is full");
return NULL;
}
// Configure the code buffer.
......@@ -1476,7 +1461,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Verify that there is sufficient space remaining
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
turn_off_compiler(this);
C->record_failure("CodeCache is full");
return;
}
......@@ -1633,7 +1618,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// One last check for failed CodeBuffer::expand:
if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
turn_off_compiler(this);
C->record_failure("CodeCache is full");
return;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册