提交 ad497b3b 编写于 作者: H hseigel

7158805: Better rewriting of nested subroutine calls

Reviewed-by: mschoene, coleenp
上级 89681ac7
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -248,7 +248,7 @@ class ChunkPool: public CHeapObj<mtInternal> { ...@@ -248,7 +248,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
// Allocate a new chunk from the pool (might expand the pool) // Allocate a new chunk from the pool (might expand the pool)
_NOINLINE_ void* allocate(size_t bytes) { _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
assert(bytes == _size, "bad size"); assert(bytes == _size, "bad size");
void* p = NULL; void* p = NULL;
// No VM lock can be taken inside ThreadCritical lock, so os::malloc // No VM lock can be taken inside ThreadCritical lock, so os::malloc
...@@ -258,9 +258,9 @@ class ChunkPool: public CHeapObj<mtInternal> { ...@@ -258,9 +258,9 @@ class ChunkPool: public CHeapObj<mtInternal> {
p = get_first(); p = get_first();
} }
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
if (p == NULL) if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
}
return p; return p;
} }
...@@ -357,7 +357,7 @@ class ChunkPoolCleaner : public PeriodicTask { ...@@ -357,7 +357,7 @@ class ChunkPoolCleaner : public PeriodicTask {
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
// Chunk implementation // Chunk implementation
void* Chunk::operator new(size_t requested_size, size_t length) { void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
// requested_size is equal to sizeof(Chunk) but in order for the arena // requested_size is equal to sizeof(Chunk) but in order for the arena
// allocations to come out aligned as expected the size must be aligned // allocations to come out aligned as expected the size must be aligned
// to expected arean alignment. // to expected arean alignment.
...@@ -365,13 +365,14 @@ void* Chunk::operator new(size_t requested_size, size_t length) { ...@@ -365,13 +365,14 @@ void* Chunk::operator new(size_t requested_size, size_t length) {
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
size_t bytes = ARENA_ALIGN(requested_size) + length; size_t bytes = ARENA_ALIGN(requested_size) + length;
switch (length) { switch (length) {
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
default: { default: {
void *p = os::malloc(bytes, mtChunk, CALLER_PC); void* p = os::malloc(bytes, mtChunk, CALLER_PC);
if (p == NULL) if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, "Chunk::new"); vm_exit_out_of_memory(bytes, "Chunk::new");
}
return p; return p;
} }
} }
...@@ -425,7 +426,7 @@ NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) ...@@ -425,7 +426,7 @@ NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
Arena::Arena(size_t init_size) { Arena::Arena(size_t init_size) {
size_t round_size = (sizeof (char *)) - 1; size_t round_size = (sizeof (char *)) - 1;
init_size = (init_size+round_size) & ~round_size; init_size = (init_size+round_size) & ~round_size;
_first = _chunk = new (init_size) Chunk(init_size); _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max _hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top(); _max = _chunk->top();
set_size_in_bytes(init_size); set_size_in_bytes(init_size);
...@@ -433,7 +434,7 @@ Arena::Arena(size_t init_size) { ...@@ -433,7 +434,7 @@ Arena::Arena(size_t init_size) {
} }
Arena::Arena() { Arena::Arena() {
_first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max _hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top(); _max = _chunk->top();
set_size_in_bytes(Chunk::init_size); set_size_in_bytes(Chunk::init_size);
...@@ -540,12 +541,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) { ...@@ -540,12 +541,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
size_t len = MAX2(x, (size_t) Chunk::size); size_t len = MAX2(x, (size_t) Chunk::size);
Chunk *k = _chunk; // Get filled-up chunk address Chunk *k = _chunk; // Get filled-up chunk address
_chunk = new (len) Chunk(len); _chunk = new (alloc_failmode, len) Chunk(len);
if (_chunk == NULL) { if (_chunk == NULL) {
if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
}
return NULL; return NULL;
} }
if (k) k->set_next(_chunk); // Append new chunk to end of linked list if (k) k->set_next(_chunk); // Append new chunk to end of linked list
......
/* /*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -274,7 +274,7 @@ class Chunk: CHeapObj<mtChunk> { ...@@ -274,7 +274,7 @@ class Chunk: CHeapObj<mtChunk> {
Chunk* _next; // Next Chunk in list Chunk* _next; // Next Chunk in list
const size_t _len; // Size of this Chunk const size_t _len; // Size of this Chunk
public: public:
void* operator new(size_t size, size_t length); void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
void operator delete(void* p); void operator delete(void* p);
Chunk(size_t length); Chunk(size_t length);
...@@ -337,10 +337,15 @@ protected: ...@@ -337,10 +337,15 @@ protected:
void signal_out_of_memory(size_t request, const char* whence) const; void signal_out_of_memory(size_t request, const char* whence) const;
void check_for_overflow(size_t request, const char* whence) const { bool check_for_overflow(size_t request, const char* whence,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
if (UINTPTR_MAX - request < (uintptr_t)_hwm) { if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
return false;
}
signal_out_of_memory(request, whence); signal_out_of_memory(request, whence);
} }
return true;
} }
public: public:
...@@ -364,7 +369,8 @@ protected: ...@@ -364,7 +369,8 @@ protected:
assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
x = ARENA_ALIGN(x); x = ARENA_ALIGN(x);
debug_only(if (UseMallocOnly) return malloc(x);) debug_only(if (UseMallocOnly) return malloc(x);)
check_for_overflow(x, "Arena::Amalloc"); if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);) NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) { if (_hwm + x > _max) {
return grow(x, alloc_failmode); return grow(x, alloc_failmode);
...@@ -378,7 +384,8 @@ protected: ...@@ -378,7 +384,8 @@ protected:
void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
debug_only(if (UseMallocOnly) return malloc(x);) debug_only(if (UseMallocOnly) return malloc(x);)
check_for_overflow(x, "Arena::Amalloc_4"); if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);) NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) { if (_hwm + x > _max) {
return grow(x, alloc_failmode); return grow(x, alloc_failmode);
...@@ -399,7 +406,8 @@ protected: ...@@ -399,7 +406,8 @@ protected:
size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
x += delta; x += delta;
#endif #endif
check_for_overflow(x, "Arena::Amalloc_D"); if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);) NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) { if (_hwm + x > _max) {
return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
...@@ -539,6 +547,9 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { ...@@ -539,6 +547,9 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
#define NEW_RESOURCE_ARRAY(type, size)\ #define NEW_RESOURCE_ARRAY(type, size)\
(type*) resource_allocate_bytes((size) * sizeof(type)) (type*) resource_allocate_bytes((size) * sizeof(type))
#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
(type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
#define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
(type*) resource_allocate_bytes(thread, (size) * sizeof(type)) (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
......
...@@ -642,11 +642,21 @@ int GenerateOopMap::next_bb_start_pc(BasicBlock *bb) { ...@@ -642,11 +642,21 @@ int GenerateOopMap::next_bb_start_pc(BasicBlock *bb) {
// CellType handling methods // CellType handling methods
// //
// Allocate memory and throw LinkageError if failure.
#define ALLOC_RESOURCE_ARRAY(var, type, count) \
var = NEW_RESOURCE_ARRAY_RETURN_NULL(type, count); \
if (var == NULL) { \
report_error("Cannot reserve enough memory to analyze this method"); \
return; \
}
void GenerateOopMap::init_state() { void GenerateOopMap::init_state() {
_state_len = _max_locals + _max_stack + _max_monitors; _state_len = _max_locals + _max_stack + _max_monitors;
_state = NEW_RESOURCE_ARRAY(CellTypeState, _state_len); ALLOC_RESOURCE_ARRAY(_state, CellTypeState, _state_len);
memset(_state, 0, _state_len * sizeof(CellTypeState)); memset(_state, 0, _state_len * sizeof(CellTypeState));
_state_vec_buf = NEW_RESOURCE_ARRAY(char, MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */); int count = MAX3(_max_locals, _max_stack, _max_monitors) + 1/*for null terminator char */;
ALLOC_RESOURCE_ARRAY(_state_vec_buf, char, count);
} }
void GenerateOopMap::make_context_uninitialized() { void GenerateOopMap::make_context_uninitialized() {
...@@ -905,7 +915,7 @@ void GenerateOopMap::init_basic_blocks() { ...@@ -905,7 +915,7 @@ void GenerateOopMap::init_basic_blocks() {
// But cumbersome since we don't know the stack heights yet. (Nor the // But cumbersome since we don't know the stack heights yet. (Nor the
// monitor stack heights...) // monitor stack heights...)
_basic_blocks = NEW_RESOURCE_ARRAY(BasicBlock, _bb_count); ALLOC_RESOURCE_ARRAY(_basic_blocks, BasicBlock, _bb_count);
// Make a pass through the bytecodes. Count the number of monitorenters. // Make a pass through the bytecodes. Count the number of monitorenters.
// This can be used an upper bound on the monitor stack depth in programs // This can be used an upper bound on the monitor stack depth in programs
...@@ -976,8 +986,8 @@ void GenerateOopMap::init_basic_blocks() { ...@@ -976,8 +986,8 @@ void GenerateOopMap::init_basic_blocks() {
return; return;
} }
CellTypeState *basicBlockState = CellTypeState *basicBlockState;
NEW_RESOURCE_ARRAY(CellTypeState, bbNo * _state_len); ALLOC_RESOURCE_ARRAY(basicBlockState, CellTypeState, bbNo * _state_len);
memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState)); memset(basicBlockState, 0, bbNo * _state_len * sizeof(CellTypeState));
// Make a pass over the basicblocks and assign their state vectors. // Make a pass over the basicblocks and assign their state vectors.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册