提交 eed80ba4 编写于 作者: Z zgu

7181995: NMT ON: NMT assertion failure assert(cur_vm->is_uncommit_record() ||...

7181995: NMT ON: NMT assertion failure assert(cur_vm->is_uncommit_record() || cur_vm->is_deallocation_record
Summary: Fixed virtual memory records merge and promotion logic, should be based on sequence number vs. base address order
Reviewed-by: coleenp, acorn
上级 9c9d9209
...@@ -318,10 +318,9 @@ void Thread::record_stack_base_and_size() { ...@@ -318,10 +318,9 @@ void Thread::record_stack_base_and_size() {
set_stack_size(os::current_stack_size()); set_stack_size(os::current_stack_size());
// record thread's native stack, stack grows downward // record thread's native stack, stack grows downward
address vm_base = _stack_base - _stack_size; address low_stack_addr = stack_base() - stack_size();
MemTracker::record_virtual_memory_reserve(vm_base, _stack_size, MemTracker::record_thread_stack(low_stack_addr, stack_size(), this,
CURRENT_PC, this); CURRENT_PC);
MemTracker::record_virtual_memory_type(vm_base, mtThreadStack);
} }
...@@ -329,8 +328,8 @@ Thread::~Thread() { ...@@ -329,8 +328,8 @@ Thread::~Thread() {
// Reclaim the objectmonitors from the omFreeList of the moribund thread. // Reclaim the objectmonitors from the omFreeList of the moribund thread.
ObjectSynchronizer::omFlush (this) ; ObjectSynchronizer::omFlush (this) ;
MemTracker::record_virtual_memory_release((_stack_base - _stack_size), address low_stack_addr = stack_base() - stack_size();
_stack_size, this); MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
// deallocate data structures // deallocate data structures
delete resource_area(); delete resource_area();
......
...@@ -43,9 +43,9 @@ jint SequenceGenerator::next() { ...@@ -43,9 +43,9 @@ jint SequenceGenerator::next() {
bool VMMemRegion::contains(const VMMemRegion* mr) const { bool VMMemRegion::contains(const VMMemRegion* mr) const {
assert(base() != 0, "no base address"); assert(base() != 0, "Sanity check");
assert(size() != 0 || committed_size() != 0, assert(size() != 0 || committed_size() != 0,
"no range"); "Sanity check");
address base_addr = base(); address base_addr = base();
address end_addr = base_addr + address end_addr = base_addr +
(is_reserve_record()? reserved_size(): committed_size()); (is_reserve_record()? reserved_size(): committed_size());
...@@ -61,14 +61,14 @@ bool VMMemRegion::contains(const VMMemRegion* mr) const { ...@@ -61,14 +61,14 @@ bool VMMemRegion::contains(const VMMemRegion* mr) const {
return (mr->base() >= base_addr && return (mr->base() >= base_addr &&
(mr->base() + mr->committed_size()) <= end_addr); (mr->base() + mr->committed_size()) <= end_addr);
} else if (mr->is_type_tagging_record()) { } else if (mr->is_type_tagging_record()) {
assert(mr->base() != 0, "no base"); assert(mr->base() != NULL, "Sanity check");
return mr->base() == base_addr; return (mr->base() >= base_addr && mr->base() < end_addr);
} else if (mr->is_release_record()) { } else if (mr->is_release_record()) {
assert(mr->base() != 0 && mr->size() > 0, assert(mr->base() != 0 && mr->size() > 0,
"bad record"); "bad record");
return (mr->base() == base_addr && mr->size() == size()); return (mr->base() == base_addr && mr->size() == size());
} else { } else {
assert(false, "what happened?"); ShouldNotReachHere();
return false; return false;
} }
} }
...@@ -84,11 +84,7 @@ class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC { ...@@ -84,11 +84,7 @@ class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
// implementation class // implementation class
class MemPointerArrayIteratorImpl : public MemPointerArrayIterator { class MemPointerArrayIteratorImpl : public MemPointerArrayIterator {
#ifdef ASSERT
protected: protected:
#else
private:
#endif
MemPointerArray* _array; MemPointerArray* _array;
int _pos; int _pos;
......
...@@ -111,38 +111,32 @@ class VMMemPointerIterator : public MemPointerIterator { ...@@ -111,38 +111,32 @@ class VMMemPointerIterator : public MemPointerIterator {
MemPointerIterator(arr) { MemPointerIterator(arr) {
} }
// locate an exiting record that contains specified address, or // locate an existing record that contains specified address, or
// the record, where the record with specified address, should // the record, where the record with specified address, should
// be inserted // be inserted.
// virtual memory record array is sorted in address order, so
// binary search is performed
virtual MemPointer* locate(address addr) { virtual MemPointer* locate(address addr) {
VMMemRegion* cur = (VMMemRegion*)current(); int index_low = 0;
VMMemRegion* next_p; int index_high = _array->length();
int index_mid = (index_high + index_low) / 2;
while (cur != NULL) { int r = 1;
if (cur->base() > addr) { while (index_low < index_high && (r = compare(index_mid, addr)) != 0) {
return cur; if (r > 0) {
index_high = index_mid;
} else { } else {
// find nearest existing range that has base address <= addr index_low = index_mid;
next_p = (VMMemRegion*)peek_next();
if (next_p != NULL && next_p->base() <= addr) {
cur = (VMMemRegion*)next();
continue;
}
}
if (cur->is_reserve_record() &&
cur->base() <= addr &&
(cur->base() + cur->size() > addr)) {
return cur;
} else if (cur->is_commit_record() &&
cur->base() <= addr &&
(cur->base() + cur->committed_size() > addr)) {
return cur;
} }
cur = (VMMemRegion*)next(); index_mid = (index_high + index_low) / 2;
} }
if (r == 0) {
// update current location
_pos = index_mid;
return _array->at(index_mid);
} else {
return NULL; return NULL;
} }
}
#ifdef ASSERT #ifdef ASSERT
virtual bool is_dup_pointer(const MemPointer* ptr1, virtual bool is_dup_pointer(const MemPointer* ptr1,
...@@ -160,75 +154,99 @@ class VMMemPointerIterator : public MemPointerIterator { ...@@ -160,75 +154,99 @@ class VMMemPointerIterator : public MemPointerIterator {
(p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
} }
#endif #endif
// compare if an address falls into a memory region,
// return 0, if the address falls into a memory region at specified index
// return 1, if memory region pointed by specified index is higher than the address
// return -1, if memory region pointed by specified index is lower than the address
int compare(int index, address addr) const {
VMMemRegion* r = (VMMemRegion*)_array->at(index);
assert(r->is_reserve_record(), "Sanity check");
if (r->addr() > addr) {
return 1;
} else if (r->addr() + r->reserved_size() <= addr) {
return -1;
} else {
return 0;
}
}
}; };
class StagingWalker : public MemPointerArrayIterator { class MallocRecordIterator : public MemPointerArrayIterator {
private: private:
MemPointerArrayIteratorImpl _itr; MemPointerArrayIteratorImpl _itr;
bool _is_vm_record;
bool _end_of_array;
VMMemRegionEx _vm_record;
MemPointerRecordEx _malloc_record;
public: public:
StagingWalker(MemPointerArray* arr): _itr(arr) { MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
_end_of_array = false;
next();
} }
// return the pointer at current position
MemPointer* current() const { MemPointer* current() const {
if (_end_of_array) { MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
if (next == NULL || next->addr() != cur->addr()) {
return cur;
} else {
assert(!cur->is_vm_pointer(), "Sanity check");
assert(cur->is_allocation_record() && next->is_deallocation_record(),
"sorting order");
assert(cur->seq() != next->seq(), "Sanity check");
return cur->seq() > next->seq() ? cur : next;
}
}
MemPointer* next() {
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
MemPointerRecord* next = (MemPointerRecord*)_itr.next();
if (next == NULL) {
return NULL; return NULL;
} }
if (is_vm_record()) { if (cur->addr() == next->addr()) {
return (MemPointer*)&_vm_record; next = (MemPointerRecord*)_itr.next();
} else {
return (MemPointer*)&_malloc_record;
} }
return current();
} }
// return the next pointer and advance current position MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
MemPointer* next(); MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
void remove() { ShouldNotReachHere(); }
bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
};
// type of 'current' record class StagingArea : public _ValueObj {
bool is_vm_record() const { private:
return _is_vm_record; MemPointerArray* _malloc_data;
} MemPointerArray* _vm_data;
// return the next poinger without advancing current position public:
MemPointer* peek_next() const { StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
assert(false, "not supported"); init();
return NULL;
} }
MemPointer* peek_prev() const { ~StagingArea() {
assert(false, "not supported"); if (_malloc_data != NULL) delete _malloc_data;
return NULL; if (_vm_data != NULL) delete _vm_data;
}
// remove the pointer at current position
void remove() {
assert(false, "not supported");
} }
// insert the pointer at current position MallocRecordIterator malloc_record_walker() {
bool insert(MemPointer* ptr) { return MallocRecordIterator(malloc_data());
assert(false, "not supported");
return false;
} }
bool insert_after(MemPointer* ptr) { MemPointerArrayIteratorImpl virtual_memory_record_walker();
assert(false, "not supported"); bool init();
return false; void clear() {
assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
_malloc_data->shrink();
_malloc_data->clear();
_vm_data->clear();
} }
private: inline MemPointerArray* malloc_data() { return _malloc_data; }
// consolidate all records referring to this vm region inline MemPointerArray* vm_data() { return _vm_data; }
bool consolidate_vm_records(VMMemRegionEx* vm_rec);
}; };
class MemBaseline; class MemBaseline;
class MemSnapshot : public CHeapObj<mtNMT> { class MemSnapshot : public CHeapObj<mtNMT> {
private: private:
// the following two arrays contain records of all known lived memory blocks // the following two arrays contain records of all known lived memory blocks
...@@ -237,9 +255,7 @@ class MemSnapshot : public CHeapObj<mtNMT> { ...@@ -237,9 +255,7 @@ class MemSnapshot : public CHeapObj<mtNMT> {
// live virtual memory pointers // live virtual memory pointers
MemPointerArray* _vm_ptrs; MemPointerArray* _vm_ptrs;
// stagging a generation's data, before StagingArea _staging_area;
// it can be prompted to snapshot
MemPointerArray* _staging_area;
// the lock to protect this snapshot // the lock to protect this snapshot
Monitor* _lock; Monitor* _lock;
...@@ -252,18 +268,19 @@ class MemSnapshot : public CHeapObj<mtNMT> { ...@@ -252,18 +268,19 @@ class MemSnapshot : public CHeapObj<mtNMT> {
virtual ~MemSnapshot(); virtual ~MemSnapshot();
// if we are running out of native memory // if we are running out of native memory
bool out_of_memory() const { bool out_of_memory() {
return (_alloc_ptrs == NULL || _staging_area == NULL || return (_alloc_ptrs == NULL ||
_staging_area.malloc_data() == NULL ||
_staging_area.vm_data() == NULL ||
_vm_ptrs == NULL || _lock == NULL || _vm_ptrs == NULL || _lock == NULL ||
_alloc_ptrs->out_of_memory() || _alloc_ptrs->out_of_memory() ||
_staging_area->out_of_memory() ||
_vm_ptrs->out_of_memory()); _vm_ptrs->out_of_memory());
} }
// merge a per-thread memory recorder into staging area // merge a per-thread memory recorder into staging area
bool merge(MemRecorder* rec); bool merge(MemRecorder* rec);
// promote staged data to snapshot // promote staged data to snapshot
void promote(); bool promote();
void wait(long timeout) { void wait(long timeout) {
...@@ -280,6 +297,9 @@ class MemSnapshot : public CHeapObj<mtNMT> { ...@@ -280,6 +297,9 @@ class MemSnapshot : public CHeapObj<mtNMT> {
private: private:
// copy pointer data from src to dest // copy pointer data from src to dest
void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src); void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
bool promote_malloc_records(MemPointerArrayIterator* itr);
bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
}; };
......
...@@ -118,7 +118,10 @@ void MemTrackWorker::run() { ...@@ -118,7 +118,10 @@ void MemTrackWorker::run() {
_head = (_head + 1) % MAX_GENERATIONS; _head = (_head + 1) % MAX_GENERATIONS;
} }
// promote this generation data to snapshot // promote this generation data to snapshot
snapshot->promote(); if (!snapshot->promote()) {
// failed to promote, means out of memory
MemTracker::shutdown(MemTracker::NMT_out_of_memory);
}
} else { } else {
snapshot->wait(1000); snapshot->wait(1000);
ThreadCritical tc; ThreadCritical tc;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include "thread_solaris.inline.hpp" #include "thread_solaris.inline.hpp"
#endif #endif
#ifdef _DEBUG_ #ifdef _DEBUG
#define DEBUG_CALLER_PC os::get_caller_pc(3) #define DEBUG_CALLER_PC os::get_caller_pc(3)
#else #else
#define DEBUG_CALLER_PC 0 #define DEBUG_CALLER_PC 0
...@@ -223,12 +223,33 @@ class MemTracker : AllStatic { ...@@ -223,12 +223,33 @@ class MemTracker : AllStatic {
} }
} }
static inline void record_thread_stack(address addr, size_t size, Thread* thr,
address pc = 0) {
if (is_on()) {
assert(size > 0 && thr != NULL, "Sanity check");
create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag() | mtThreadStack,
size, pc, thr);
create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag() | mtThreadStack,
size, pc, thr);
}
}
static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
if (is_on()) {
assert(size > 0 && thr != NULL, "Sanity check");
create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack,
size, DEBUG_CALLER_PC, thr);
create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack,
size, DEBUG_CALLER_PC, thr);
}
}
// record a virtual memory 'commit' call // record a virtual memory 'commit' call
static inline void record_virtual_memory_commit(address addr, size_t size, static inline void record_virtual_memory_commit(address addr, size_t size,
address pc = 0, Thread* thread = NULL) { address pc = 0, Thread* thread = NULL) {
if (is_on()) { if (is_on()) {
create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(), create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
size, pc, thread); size, DEBUG_CALLER_PC, thread);
} }
} }
...@@ -237,7 +258,7 @@ class MemTracker : AllStatic { ...@@ -237,7 +258,7 @@ class MemTracker : AllStatic {
Thread* thread = NULL) { Thread* thread = NULL) {
if (is_on()) { if (is_on()) {
create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(), create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(),
size, 0, thread); size, DEBUG_CALLER_PC, thread);
} }
} }
...@@ -246,7 +267,7 @@ class MemTracker : AllStatic { ...@@ -246,7 +267,7 @@ class MemTracker : AllStatic {
Thread* thread = NULL) { Thread* thread = NULL) {
if (is_on()) { if (is_on()) {
create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(), create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(),
size, 0, thread); size, DEBUG_CALLER_PC, thread);
} }
} }
...@@ -257,7 +278,7 @@ class MemTracker : AllStatic { ...@@ -257,7 +278,7 @@ class MemTracker : AllStatic {
assert(base > 0, "wrong base address"); assert(base > 0, "wrong base address");
assert((flags & (~mt_masks)) == 0, "memory type only"); assert((flags & (~mt_masks)) == 0, "memory type only");
create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()), create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()),
0, 0, thread); 0, DEBUG_CALLER_PC, thread);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册