提交 ee35bcfa 编写于 作者: Z zgu

7199092: NMT: NMT needs to deal overlapped virtual memory ranges

Summary: Enhanced virtual memory tracking to track committed regions as well as reserved regions, so NMT now can generate virtual memory map.
Reviewed-by: acorn, coleenp
上级 c4f6e748
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_bsd.inline.hpp" #include "os_bsd.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
// put OS-includes here // put OS-includes here
...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) { ...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)::memset((void*) mapAddress, 0, size); (void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return mapAddress; return mapAddress;
} }
...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor ...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addr = mapAddress; *addr = mapAddress;
*sizep = size; *sizep = size;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_linux.inline.hpp" #include "os_linux.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
// put OS-includes here // put OS-includes here
...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) { ...@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)::memset((void*) mapAddress, 0, size); (void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return mapAddress; return mapAddress;
} }
...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor ...@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addr = mapAddress; *addr = mapAddress;
*sizep = size; *sizep = size;
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include "runtime/threadCritical.hpp" #include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp" #include "runtime/timer.hpp"
#include "services/attachListener.hpp" #include "services/attachListener.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp" #include "services/runtimeService.hpp"
#include "thread_solaris.inline.hpp" #include "thread_solaris.inline.hpp"
#include "utilities/decoder.hpp" #include "utilities/decoder.hpp"
...@@ -3072,11 +3073,12 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { ...@@ -3072,11 +3073,12 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// Since snv_84, Solaris attempts to honor the address hint - see 5003415. // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
// Give it a try, if the kernel honors the hint we can return immediately. // Give it a try, if the kernel honors the hint we can return immediately.
char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
volatile int err = errno; volatile int err = errno;
if (addr == requested_addr) { if (addr == requested_addr) {
return addr; return addr;
} else if (addr != NULL) { } else if (addr != NULL) {
unmap_memory(addr, bytes); pd_unmap_memory(addr, bytes);
} }
if (PrintMiscellaneous && Verbose) { if (PrintMiscellaneous && Verbose) {
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_solaris.inline.hpp" #include "os_solaris.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
// put OS-includes here // put OS-includes here
...@@ -768,6 +769,10 @@ static char* mmap_create_shared(size_t size) { ...@@ -768,6 +769,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)::memset((void*) mapAddress, 0, size); (void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return mapAddress; return mapAddress;
} }
...@@ -927,6 +932,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor ...@@ -927,6 +932,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addr = mapAddress; *addr = mapAddress;
*sizep = size; *sizep = size;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "os_windows.inline.hpp" #include "os_windows.inline.hpp"
#include "runtime/handles.inline.hpp" #include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp" #include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp" #include "utilities/exceptions.hpp"
#include <windows.h> #include <windows.h>
...@@ -1496,6 +1497,10 @@ static char* mapping_create_shared(size_t size) { ...@@ -1496,6 +1497,10 @@ static char* mapping_create_shared(size_t size) {
// clear the shared memory region // clear the shared memory region
(void)memset(mapAddress, '\0', size); (void)memset(mapAddress, '\0', size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
return (char*) mapAddress; return (char*) mapAddress;
} }
...@@ -1672,6 +1677,11 @@ static void open_file_mapping(const char* user, int vmid, ...@@ -1672,6 +1677,11 @@ static void open_file_mapping(const char* user, int vmid,
"Could not map PerfMemory"); "Could not map PerfMemory");
} }
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
*addrp = (char*)mapAddress; *addrp = (char*)mapAddress;
*sizep = size; *sizep = size;
...@@ -1824,6 +1834,8 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) { ...@@ -1824,6 +1834,8 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
} }
remove_file_mapping(addr); remove_file_mapping(addr);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_release((address)addr, bytes);
} }
char* PerfMemory::backing_store_filename() { char* PerfMemory::backing_store_filename() {
......
...@@ -433,19 +433,18 @@ Arena::Arena() { ...@@ -433,19 +433,18 @@ Arena::Arena() {
NOT_PRODUCT(Atomic::inc(&_instance_count);) NOT_PRODUCT(Atomic::inc(&_instance_count);)
} }
Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
set_size_in_bytes(a->size_in_bytes());
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}
Arena *Arena::move_contents(Arena *copy) { Arena *Arena::move_contents(Arena *copy) {
copy->destruct_contents(); copy->destruct_contents();
copy->_chunk = _chunk; copy->_chunk = _chunk;
copy->_hwm = _hwm; copy->_hwm = _hwm;
copy->_max = _max; copy->_max = _max;
copy->_first = _first; copy->_first = _first;
copy->set_size_in_bytes(size_in_bytes());
// workaround rare racing condition, which could double count
// the arena size by native memory tracking
size_t size = size_in_bytes();
set_size_in_bytes(0);
copy->set_size_in_bytes(size);
// Destroy original arena // Destroy original arena
reset(); reset();
return copy; // Return Arena with contents return copy; // Return Arena with contents
...@@ -497,6 +496,9 @@ void Arena::destruct_contents() { ...@@ -497,6 +496,9 @@ void Arena::destruct_contents() {
char* end = _first->next() ? _first->top() : _hwm; char* end = _first->next() ? _first->top() : _hwm;
free_malloced_objects(_first, _first->bottom(), end, _hwm); free_malloced_objects(_first, _first->bottom(), end, _hwm);
} }
// reset size before chop to avoid a rare racing condition
// that can have total arena memory exceed total chunk memory
set_size_in_bytes(0);
_first->chop(); _first->chop();
reset(); reset();
} }
......
...@@ -144,8 +144,10 @@ enum MemoryType { ...@@ -144,8 +144,10 @@ enum MemoryType {
mtNMT = 0x0A00, // memory used by native memory tracking mtNMT = 0x0A00, // memory used by native memory tracking
mtChunk = 0x0B00, // chunk that holds content of arenas mtChunk = 0x0B00, // chunk that holds content of arenas
mtJavaHeap = 0x0C00, // Java heap mtJavaHeap = 0x0C00, // Java heap
mtDontTrack = 0x0D00, // memory we donot or cannot track mtClassShared = 0x0D00, // class data sharing
mt_number_of_types = 0x000C, // number of memory types mt_number_of_types = 0x000D, // number of memory types (mtDontTrack
// is not included as validate type)
mtDontTrack = 0x0E00, // memory we do not or cannot track
mt_masks = 0x7F00, mt_masks = 0x7F00,
// object type mask // object type mask
...@@ -342,7 +344,6 @@ protected: ...@@ -342,7 +344,6 @@ protected:
public: public:
Arena(); Arena();
Arena(size_t init_size); Arena(size_t init_size);
Arena(Arena *old);
~Arena(); ~Arena();
void destruct_contents(); void destruct_contents();
char* hwm() const { return _hwm; } char* hwm() const { return _hwm; }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/defaultStream.hpp" #include "utilities/defaultStream.hpp"
# include <sys/stat.h> # include <sys/stat.h>
...@@ -344,24 +345,13 @@ ReservedSpace FileMapInfo::reserve_shared_memory() { ...@@ -344,24 +345,13 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
fail_continue(err_msg("Unable to reserved shared space at required address " INTPTR_FORMAT, requested_addr)); fail_continue(err_msg("Unable to reserved shared space at required address " INTPTR_FORMAT, requested_addr));
return rs; return rs;
} }
// the reserved virtual memory is for mapping class data sharing archive
if (MemTracker::is_on()) {
MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
}
return rs; return rs;
} }
// Memory map a region in the address space.
char* FileMapInfo::map_region(int i, ReservedSpace rs) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
size_t used = si->_used;
size_t size = align_size_up(used, os::vm_allocation_granularity());
ReservedSpace mapped_rs = rs.first_part(size, true, true);
ReservedSpace unmapped_rs = rs.last_part(size);
mapped_rs.release();
return map_region(i);
}
// Memory map a region in the address space. // Memory map a region in the address space.
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"}; static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
......
...@@ -125,7 +125,6 @@ public: ...@@ -125,7 +125,6 @@ public:
size_t capacity, bool read_only, bool allow_exec); size_t capacity, bool read_only, bool allow_exec);
void write_bytes(const void* buffer, int count); void write_bytes(const void* buffer, int count);
void write_bytes_aligned(const void* buffer, int count); void write_bytes_aligned(const void* buffer, int count);
char* map_region(int i, ReservedSpace rs);
char* map_region(int i); char* map_region(int i);
void unmap_region(int i); void unmap_region(int i);
void close(); void close();
......
...@@ -663,8 +663,8 @@ bool MetaspaceShared::is_in_shared_space(const void* p) { ...@@ -663,8 +663,8 @@ bool MetaspaceShared::is_in_shared_space(const void* p) {
if (_ro_base == NULL || _rw_base == NULL) { if (_ro_base == NULL || _rw_base == NULL) {
return false; return false;
} else { } else {
return ((p > _ro_base && p < (_ro_base + SharedReadOnlySize)) || return ((p >= _ro_base && p < (_ro_base + SharedReadOnlySize)) ||
(p > _rw_base && p < (_rw_base + SharedReadWriteSize))); (p >= _rw_base && p < (_rw_base + SharedReadWriteSize)));
} }
} }
...@@ -693,14 +693,6 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { ...@@ -693,14 +693,6 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
if (!shared_rs.is_reserved()) return false; if (!shared_rs.is_reserved()) return false;
// Split reserved memory into pieces (windows needs this)
ReservedSpace ro_rs = shared_rs.first_part(SharedReadOnlySize);
ReservedSpace tmp_rs1 = shared_rs.last_part(SharedReadOnlySize);
ReservedSpace rw_rs = tmp_rs1.first_part(SharedReadWriteSize);
ReservedSpace tmp_rs2 = tmp_rs1.last_part(SharedReadWriteSize);
ReservedSpace md_rs = tmp_rs2.first_part(SharedMiscDataSize);
ReservedSpace mc_rs = tmp_rs2.last_part(SharedMiscDataSize);
// Map each shared region // Map each shared region
if ((_ro_base = mapinfo->map_region(ro)) != NULL && if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
(_rw_base = mapinfo->map_region(rw)) != NULL && (_rw_base = mapinfo->map_region(rw)) != NULL &&
......
...@@ -127,15 +127,21 @@ protected: ...@@ -127,15 +127,21 @@ protected:
void reset_to_mark() { void reset_to_mark() {
if (UseMallocOnly) free_malloced_objects(); if (UseMallocOnly) free_malloced_objects();
if( _chunk->next() ) // Delete later chunks if( _chunk->next() ) { // Delete later chunks
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
_area->set_size_in_bytes(size_in_bytes());
_chunk->next_chop(); _chunk->next_chop();
} else {
assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
}
_area->_chunk = _chunk; // Roll back arena to saved chunk _area->_chunk = _chunk; // Roll back arena to saved chunk
_area->_hwm = _hwm; _area->_hwm = _hwm;
_area->_max = _max; _area->_max = _max;
// clear out this chunk (to detect allocation bugs) // clear out this chunk (to detect allocation bugs)
if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
_area->set_size_in_bytes(size_in_bytes());
} }
~ResourceMark() { ~ResourceMark() {
...@@ -219,15 +225,21 @@ protected: ...@@ -219,15 +225,21 @@ protected:
void reset_to_mark() { void reset_to_mark() {
if (UseMallocOnly) free_malloced_objects(); if (UseMallocOnly) free_malloced_objects();
if( _chunk->next() ) // Delete later chunks if( _chunk->next() ) { // Delete later chunks
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
_area->set_size_in_bytes(size_in_bytes());
_chunk->next_chop(); _chunk->next_chop();
} else {
assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check");
}
_area->_chunk = _chunk; // Roll back arena to saved chunk _area->_chunk = _chunk; // Roll back arena to saved chunk
_area->_hwm = _hwm; _area->_hwm = _hwm;
_area->_max = _max; _area->_max = _max;
// clear out this chunk (to detect allocation bugs) // clear out this chunk (to detect allocation bugs)
if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm);
_area->set_size_in_bytes(size_in_bytes());
} }
~DeoptResourceMark() { ~DeoptResourceMark() {
......
...@@ -158,13 +158,18 @@ HandleMark::~HandleMark() { ...@@ -158,13 +158,18 @@ HandleMark::~HandleMark() {
// Delete later chunks // Delete later chunks
if( _chunk->next() ) { if( _chunk->next() ) {
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert(area->size_in_bytes() > size_in_bytes(), "Sanity check");
area->set_size_in_bytes(size_in_bytes());
_chunk->next_chop(); _chunk->next_chop();
} else {
assert(area->size_in_bytes() == size_in_bytes(), "Sanity check");
} }
// Roll back arena to saved top markers // Roll back arena to saved top markers
area->_chunk = _chunk; area->_chunk = _chunk;
area->_hwm = _hwm; area->_hwm = _hwm;
area->_max = _max; area->_max = _max;
area->set_size_in_bytes(_size_in_bytes);
#ifdef ASSERT #ifdef ASSERT
// clear out first chunk (to detect allocation bugs) // clear out first chunk (to detect allocation bugs)
if (ZapVMHandleArea) { if (ZapVMHandleArea) {
......
...@@ -297,6 +297,7 @@ class HandleMark { ...@@ -297,6 +297,7 @@ class HandleMark {
void set_previous_handle_mark(HandleMark* mark) { _previous_handle_mark = mark; } void set_previous_handle_mark(HandleMark* mark) { _previous_handle_mark = mark; }
HandleMark* previous_handle_mark() const { return _previous_handle_mark; } HandleMark* previous_handle_mark() const { return _previous_handle_mark; }
size_t size_in_bytes() const { return _size_in_bytes; }
public: public:
HandleMark(); // see handles_inline.hpp HandleMark(); // see handles_inline.hpp
HandleMark(Thread* thread) { initialize(thread); } HandleMark(Thread* thread) { initialize(thread); }
......
...@@ -136,13 +136,18 @@ inline void HandleMark::pop_and_restore() { ...@@ -136,13 +136,18 @@ inline void HandleMark::pop_and_restore() {
HandleArea* area = _area; // help compilers with poor alias analysis HandleArea* area = _area; // help compilers with poor alias analysis
// Delete later chunks // Delete later chunks
if( _chunk->next() ) { if( _chunk->next() ) {
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert(area->size_in_bytes() > size_in_bytes(), "Sanity check");
area->set_size_in_bytes(size_in_bytes());
_chunk->next_chop(); _chunk->next_chop();
} else {
assert(area->size_in_bytes() == size_in_bytes(), "Sanity check");
} }
// Roll back arena to saved top markers // Roll back arena to saved top markers
area->_chunk = _chunk; area->_chunk = _chunk;
area->_hwm = _hwm; area->_hwm = _hwm;
area->_max = _max; area->_max = _max;
area->set_size_in_bytes(_size_in_bytes);
debug_only(area->_handle_mark_nesting--); debug_only(area->_handle_mark_nesting--);
} }
......
...@@ -600,9 +600,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) { ...@@ -600,9 +600,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock); if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
// we do not track MallocCushion memory // we do not track MallocCushion memory
if (MemTracker::is_on()) {
MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller); MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller);
}
return memblock; return memblock;
} }
...@@ -613,7 +611,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller ...@@ -613,7 +611,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
void* ptr = ::realloc(memblock, size); void* ptr = ::realloc(memblock, size);
if (ptr != NULL && MemTracker::is_on()) { if (ptr != NULL) {
MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags, MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags,
caller == 0 ? CALLER_PC : caller); caller == 0 ? CALLER_PC : caller);
} }
...@@ -1401,7 +1399,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) { ...@@ -1401,7 +1399,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint); char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL && MemTracker::is_on()) { if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
} }
...@@ -1409,7 +1407,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { ...@@ -1409,7 +1407,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
} }
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
char* result = pd_attempt_reserve_memory_at(bytes, addr); char* result = pd_attempt_reserve_memory_at(bytes, addr);
if (result != NULL && MemTracker::is_on()) { if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
} }
return result; return result;
...@@ -1422,7 +1420,7 @@ void os::split_reserved_memory(char *base, size_t size, ...@@ -1422,7 +1420,7 @@ void os::split_reserved_memory(char *base, size_t size,
bool os::commit_memory(char* addr, size_t bytes, bool executable) { bool os::commit_memory(char* addr, size_t bytes, bool executable) {
bool res = pd_commit_memory(addr, bytes, executable); bool res = pd_commit_memory(addr, bytes, executable);
if (res && MemTracker::is_on()) { if (res) {
MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
} }
return res; return res;
...@@ -1431,7 +1429,7 @@ bool os::commit_memory(char* addr, size_t bytes, bool executable) { ...@@ -1431,7 +1429,7 @@ bool os::commit_memory(char* addr, size_t bytes, bool executable) {
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
bool executable) { bool executable) {
bool res = os::pd_commit_memory(addr, size, alignment_hint, executable); bool res = os::pd_commit_memory(addr, size, alignment_hint, executable);
if (res && MemTracker::is_on()) { if (res) {
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
} }
return res; return res;
...@@ -1458,8 +1456,9 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset, ...@@ -1458,8 +1456,9 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only, char *addr, size_t bytes, bool read_only,
bool allow_exec) { bool allow_exec) {
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
if (result != NULL && MemTracker::is_on()) { if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
MemTracker::record_virtual_memory_commit((address)result, bytes, CALLER_PC);
} }
return result; return result;
} }
...@@ -1474,6 +1473,7 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset, ...@@ -1474,6 +1473,7 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
bool os::unmap_memory(char *addr, size_t bytes) { bool os::unmap_memory(char *addr, size_t bytes) {
bool result = pd_unmap_memory(addr, bytes); bool result = pd_unmap_memory(addr, bytes);
if (result) { if (result) {
MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
MemTracker::record_virtual_memory_release((address)addr, bytes); MemTracker::record_virtual_memory_release((address)addr, bytes);
} }
return result; return result;
......
...@@ -324,11 +324,9 @@ void Thread::record_stack_base_and_size() { ...@@ -324,11 +324,9 @@ void Thread::record_stack_base_and_size() {
#if INCLUDE_NMT #if INCLUDE_NMT
// record thread's native stack, stack grows downward // record thread's native stack, stack grows downward
if (MemTracker::is_on()) {
address stack_low_addr = stack_base() - stack_size(); address stack_low_addr = stack_base() - stack_size();
MemTracker::record_thread_stack(stack_low_addr, stack_size(), this, MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
CURRENT_PC); CURRENT_PC);
}
#endif // INCLUDE_NMT #endif // INCLUDE_NMT
} }
...@@ -345,6 +343,9 @@ Thread::~Thread() { ...@@ -345,6 +343,9 @@ Thread::~Thread() {
if (_stack_base != NULL) { if (_stack_base != NULL) {
address low_stack_addr = stack_base() - stack_size(); address low_stack_addr = stack_base() - stack_size();
MemTracker::release_thread_stack(low_stack_addr, stack_size(), this); MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
#ifdef ASSERT
set_stack_base(NULL);
#endif
} }
#endif // INCLUDE_NMT #endif // INCLUDE_NMT
...@@ -1521,10 +1522,12 @@ JavaThread::~JavaThread() { ...@@ -1521,10 +1522,12 @@ JavaThread::~JavaThread() {
tty->print_cr("terminate thread %p", this); tty->print_cr("terminate thread %p", this);
} }
// Info NMT that this JavaThread is exiting, its memory // By now, this thread should already be invisible to safepoint,
// recorder should be collected // and its per-thread recorder also collected.
assert(!is_safepoint_visible(), "wrong state"); assert(!is_safepoint_visible(), "wrong state");
MemTracker::thread_exiting(this); #if INCLUDE_NMT
assert(get_recorder() == NULL, "Already collected");
#endif // INCLUDE_NMT
// JSR166 -- return the parker to the free list // JSR166 -- return the parker to the free list
Parker::Release(_parker); Parker::Release(_parker);
...@@ -2425,6 +2428,7 @@ void JavaThread::create_stack_guard_pages() { ...@@ -2425,6 +2428,7 @@ void JavaThread::create_stack_guard_pages() {
} }
void JavaThread::remove_stack_guard_pages() { void JavaThread::remove_stack_guard_pages() {
assert(Thread::current() == this, "from different thread");
if (_stack_guard_state == stack_guard_unused) return; if (_stack_guard_state == stack_guard_unused) return;
address low_addr = stack_base() - stack_size(); address low_addr = stack_base() - stack_size();
size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size(); size_t len = (StackYellowPages + StackRedPages) * os::vm_page_size();
...@@ -4093,7 +4097,10 @@ void Threads::remove(JavaThread* p) { ...@@ -4093,7 +4097,10 @@ void Threads::remove(JavaThread* p) {
// Now, this thread is not visible to safepoint // Now, this thread is not visible to safepoint
p->set_safepoint_visible(false); p->set_safepoint_visible(false);
// once the thread becomes safepoint invisible, we can not use its per-thread
// recorder. And Threads::do_threads() no longer walks this thread, so we have
// to release its per-thread recorder here.
MemTracker::thread_exiting(p);
} // unlock Threads_lock } // unlock Threads_lock
// Since Events::log uses a lock, we grab it outside the Threads_lock // Since Events::log uses a lock, we grab it outside the Threads_lock
......
...@@ -404,6 +404,8 @@ static AttachOperationFunctionInfo funcs[] = { ...@@ -404,6 +404,8 @@ static AttachOperationFunctionInfo funcs[] = {
static void attach_listener_thread_entry(JavaThread* thread, TRAPS) { static void attach_listener_thread_entry(JavaThread* thread, TRAPS) {
os::set_priority(thread, NearMaxPriority); os::set_priority(thread, NearMaxPriority);
thread->record_stack_base_and_size();
if (AttachListener::pd_init() != 0) { if (AttachListener::pd_init() != 0) {
return; return;
} }
......
...@@ -40,6 +40,7 @@ MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = { ...@@ -40,6 +40,7 @@ MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
{mtSymbol, "Symbol"}, {mtSymbol, "Symbol"},
{mtNMT, "Memory Tracking"}, {mtNMT, "Memory Tracking"},
{mtChunk, "Pooled Free Chunks"}, {mtChunk, "Pooled Free Chunks"},
{mtClassShared,"Shared spaces for classes"},
{mtNone, "Unknown"} // It can happen when type tagging records are lagging {mtNone, "Unknown"} // It can happen when type tagging records are lagging
// behind // behind
}; };
...@@ -55,6 +56,7 @@ MemBaseline::MemBaseline() { ...@@ -55,6 +56,7 @@ MemBaseline::MemBaseline() {
_malloc_cs = NULL; _malloc_cs = NULL;
_vm_cs = NULL; _vm_cs = NULL;
_vm_map = NULL;
_number_of_classes = 0; _number_of_classes = 0;
_number_of_threads = 0; _number_of_threads = 0;
...@@ -72,6 +74,11 @@ void MemBaseline::clear() { ...@@ -72,6 +74,11 @@ void MemBaseline::clear() {
_vm_cs = NULL; _vm_cs = NULL;
} }
if (_vm_map != NULL) {
delete _vm_map;
_vm_map = NULL;
}
reset(); reset();
} }
...@@ -85,6 +92,7 @@ void MemBaseline::reset() { ...@@ -85,6 +92,7 @@ void MemBaseline::reset() {
if (_malloc_cs != NULL) _malloc_cs->clear(); if (_malloc_cs != NULL) _malloc_cs->clear();
if (_vm_cs != NULL) _vm_cs->clear(); if (_vm_cs != NULL) _vm_cs->clear();
if (_vm_map != NULL) _vm_map->clear();
for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
_malloc_data[index].clear(); _malloc_data[index].clear();
...@@ -94,39 +102,33 @@ void MemBaseline::reset() { ...@@ -94,39 +102,33 @@ void MemBaseline::reset() {
} }
MemBaseline::~MemBaseline() { MemBaseline::~MemBaseline() {
if (_malloc_cs != NULL) { clear();
delete _malloc_cs;
}
if (_vm_cs != NULL) {
delete _vm_cs;
}
} }
// baseline malloc'd memory records, generate overall summary and summaries by // baseline malloc'd memory records, generate overall summary and summaries by
// memory types // memory types
bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) { bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records); MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
MemPointerRecord* mptr = (MemPointerRecord*)mItr.current(); MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
size_t used_arena_size = 0; size_t used_arena_size = 0;
int index; int index;
while (mptr != NULL) { while (malloc_ptr != NULL) {
index = flag2index(FLAGS_TO_MEMORY_TYPE(mptr->flags())); index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
size_t size = mptr->size(); size_t size = malloc_ptr->size();
_total_malloced += size; _total_malloced += size;
_malloc_data[index].inc(size); _malloc_data[index].inc(size);
if (MemPointerRecord::is_arena_record(mptr->flags())) { if (MemPointerRecord::is_arena_record(malloc_ptr->flags())) {
// see if arena size record present // see if arena size record present
MemPointerRecord* next_p = (MemPointerRecordEx*)mItr.peek_next(); MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
if (MemPointerRecord::is_arena_size_record(next_p->flags())) { if (MemPointerRecord::is_arena_size_record(next_malloc_ptr->flags())) {
assert(next_p->is_size_record_of_arena(mptr), "arena records do not match"); assert(next_malloc_ptr->is_size_record_of_arena(malloc_ptr), "arena records do not match");
size = next_p->size(); size = next_malloc_ptr->size();
_arena_data[index].inc(size); _arena_data[index].inc(size);
used_arena_size += size; used_arena_size += size;
mItr.next(); malloc_itr.next();
} }
} }
mptr = (MemPointerRecordEx*)mItr.next(); malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
} }
// substract used arena size to get size of arena chunk in free list // substract used arena size to get size of arena chunk in free list
...@@ -142,20 +144,23 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) ...@@ -142,20 +144,23 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records)
// baseline mmap'd memory records, generate overall summary and summaries by // baseline mmap'd memory records, generate overall summary and summaries by
// memory types // memory types
bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) { bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records); MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
VMMemRegion* vptr = (VMMemRegion*)vItr.current(); VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
int index; int index;
while (vptr != NULL) { while (vm_ptr != NULL) {
index = flag2index(FLAGS_TO_MEMORY_TYPE(vptr->flags())); if (vm_ptr->is_reserved_region()) {
index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
// we use the number of thread stack to count threads // we use the number of thread stack to count threads
if (IS_MEMORY_TYPE(vptr->flags(), mtThreadStack)) { if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
_number_of_threads ++; _number_of_threads ++;
} }
_total_vm_reserved += vptr->reserved_size(); _total_vm_reserved += vm_ptr->size();
_total_vm_committed += vptr->committed_size(); _vm_data[index].inc(vm_ptr->size(), 0);
_vm_data[index].inc(vptr->reserved_size(), vptr->committed_size()); } else {
vptr = (VMMemRegion*)vItr.next(); _total_vm_committed += vm_ptr->size();
_vm_data[index].inc(0, vm_ptr->size());
}
vm_ptr = (VMMemRegion*)vm_itr.next();
} }
return true; return true;
} }
...@@ -165,41 +170,57 @@ bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) { ...@@ -165,41 +170,57 @@ bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) { bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
assert(MemTracker::track_callsite(), "detail tracking is off"); assert(MemTracker::track_callsite(), "detail tracking is off");
MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records); MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
MemPointerRecordEx* mptr = (MemPointerRecordEx*)mItr.current(); MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
MallocCallsitePointer mp; MallocCallsitePointer malloc_callsite;
// initailize malloc callsite array
if (_malloc_cs == NULL) { if (_malloc_cs == NULL) {
_malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64); _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
// out of native memory // out of native memory
if (_malloc_cs == NULL) { if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
return false; return false;
} }
} else { } else {
_malloc_cs->clear(); _malloc_cs->clear();
} }
MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
// sort into callsite pc order. Details are aggregated by callsites
malloc_data->sort((FN_SORT)malloc_sort_by_pc);
bool ret = true;
// baseline memory that is totaled over 1 KB // baseline memory that is totaled over 1 KB
while (mptr != NULL) { while (malloc_ptr != NULL) {
if (!MemPointerRecord::is_arena_size_record(mptr->flags())) { if (!MemPointerRecord::is_arena_size_record(malloc_ptr->flags())) {
// skip thread stacks // skip thread stacks
if (!IS_MEMORY_TYPE(mptr->flags(), mtThreadStack)) { if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
if (mp.addr() != mptr->pc()) { if (malloc_callsite.addr() != malloc_ptr->pc()) {
if ((mp.amount()/K) > 0) { if ((malloc_callsite.amount()/K) > 0) {
if (!_malloc_cs->append(&mp)) { if (!_malloc_cs->append(&malloc_callsite)) {
return false; ret = false;
break;
} }
} }
mp = MallocCallsitePointer(mptr->pc()); malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
} }
mp.inc(mptr->size()); malloc_callsite.inc(malloc_ptr->size());
} }
} }
mptr = (MemPointerRecordEx*)mItr.next(); malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
} }
if (mp.addr() != 0 && (mp.amount()/K) > 0) { // restore to address order. Snapshot malloc data is maintained in memory
if (!_malloc_cs->append(&mp)) { // address order.
malloc_data->sort((FN_SORT)malloc_sort_by_addr);
if (!ret) {
return false;
}
// deal with last record
if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
if (!_malloc_cs->append(&malloc_callsite)) {
return false; return false;
} }
} }
...@@ -210,34 +231,106 @@ bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) ...@@ -210,34 +231,106 @@ bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records)
bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) { bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
assert(MemTracker::track_callsite(), "detail tracking is off"); assert(MemTracker::track_callsite(), "detail tracking is off");
VMCallsitePointer vp; VMCallsitePointer vm_callsite;
MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records); VMCallsitePointer* cur_callsite = NULL;
VMMemRegionEx* vptr = (VMMemRegionEx*)vItr.current(); MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
// initialize virtual memory map array
if (_vm_map == NULL) {
_vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
if (_vm_map == NULL || _vm_map->out_of_memory()) {
return false;
}
} else {
_vm_map->clear();
}
// initialize virtual memory callsite array
if (_vm_cs == NULL) { if (_vm_cs == NULL) {
_vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64); _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
if (_vm_cs == NULL) { if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
return false; return false;
} }
} else { } else {
_vm_cs->clear(); _vm_cs->clear();
} }
while (vptr != NULL) { // consolidate virtual memory data
if (vp.addr() != vptr->pc()) { VMMemRegionEx* reserved_rec = NULL;
if (!_vm_cs->append(&vp)) { VMMemRegionEx* committed_rec = NULL;
// vm_ptr is coming in increasing base address order
while (vm_ptr != NULL) {
if (vm_ptr->is_reserved_region()) {
// consolidate reserved memory regions for virtual memory map.
// The criteria for consolidation is:
// 1. two adjacent reserved memory regions
// 2. belong to the same memory type
// 3. reserved from the same callsite
if (reserved_rec == NULL ||
reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
reserved_rec->pc() != vm_ptr->pc()) {
if (!_vm_map->append(vm_ptr)) {
return false; return false;
} }
vp = VMCallsitePointer(vptr->pc()); // inserted reserved region, we need the pointer to the element in virtual
// memory map array.
reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
} else {
reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
}
if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
return false;
}
vm_callsite = VMCallsitePointer(vm_ptr->pc());
cur_callsite = &vm_callsite;
vm_callsite.inc(vm_ptr->size(), 0);
} else {
// consolidate committed memory regions for virtual memory map
// The criterial is:
// 1. two adjacent committed memory regions
// 2. committed from the same callsite
if (committed_rec == NULL ||
committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
committed_rec->pc() != vm_ptr->pc()) {
if (!_vm_map->append(vm_ptr)) {
return false;
}
committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
} else {
committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
} }
vp.inc(vptr->size(), vptr->committed_size()); vm_callsite.inc(0, vm_ptr->size());
vptr = (VMMemRegionEx*)vItr.next();
} }
if (vp.addr() != 0) { vm_ptr = (VMMemRegionEx*)vm_itr.next();
if (!_vm_cs->append(&vp)) { }
// deal with last record
if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
return false; return false;
} }
// sort it into callsite pc order. Details are aggregated by callsites
_vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
// walk the array to consolidate record by pc
MemPointerArrayIteratorImpl itr(_vm_cs);
VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
while (next_rec != NULL) {
assert(callsite_rec != NULL, "Sanity check");
if (next_rec->addr() == callsite_rec->addr()) {
callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
itr.remove();
next_rec = (VMCallsitePointer*)itr.current();
} else {
callsite_rec = next_rec;
next_rec = (VMCallsitePointer*)itr.next();
}
} }
return true; return true;
} }
...@@ -251,12 +344,8 @@ bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) { ...@@ -251,12 +344,8 @@ bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
_number_of_classes = SystemDictionary::number_of_classes(); _number_of_classes = SystemDictionary::number_of_classes();
if (!summary_only && MemTracker::track_callsite() && _baselined) { if (!summary_only && MemTracker::track_callsite() && _baselined) {
((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_pc);
((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_pc);
_baselined = baseline_malloc_details(snapshot._alloc_ptrs) && _baselined = baseline_malloc_details(snapshot._alloc_ptrs) &&
baseline_vm_details(snapshot._vm_ptrs); baseline_vm_details(snapshot._vm_ptrs);
((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_addr);
((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_addr);
} }
return _baselined; return _baselined;
} }
...@@ -278,7 +367,7 @@ const char* MemBaseline::type2name(MEMFLAGS type) { ...@@ -278,7 +367,7 @@ const char* MemBaseline::type2name(MEMFLAGS type) {
return MemType2NameMap[index]._name; return MemType2NameMap[index]._name;
} }
} }
assert(false, "no type"); assert(false, err_msg("bad type %x", type));
return NULL; return NULL;
} }
...@@ -341,13 +430,6 @@ int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) { ...@@ -341,13 +430,6 @@ int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
} }
// sort snapshot mmap'd records in callsite pc order
int MemBaseline::vm_sort_by_pc(const void* p1, const void* p2) {
assert(MemTracker::track_callsite(),"Just check");
const VMMemRegionEx* mp1 = (const VMMemRegionEx*)p1;
const VMMemRegionEx* mp2 = (const VMMemRegionEx*)p2;
return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
}
// sort baselined mmap'd records in size (reserved size) order // sort baselined mmap'd records in size (reserved size) order
int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) { int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
...@@ -376,12 +458,3 @@ int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) { ...@@ -376,12 +458,3 @@ int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
return delta; return delta;
} }
// sort snapshot mmap'd records in memory block address order
int MemBaseline::vm_sort_by_addr(const void* p1, const void* p2) {
assert(MemTracker::is_on(), "Just check");
const VMMemRegion* mp1 = (const VMMemRegion*)p1;
const VMMemRegion* mp2 = (const VMMemRegion*)p2;
int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
assert(delta != 0, "dup pointer");
return delta;
}
...@@ -320,6 +320,8 @@ class MemBaseline : public _ValueObj { ...@@ -320,6 +320,8 @@ class MemBaseline : public _ValueObj {
// only available when detail tracking is on. // only available when detail tracking is on.
MemPointerArray* _malloc_cs; MemPointerArray* _malloc_cs;
MemPointerArray* _vm_cs; MemPointerArray* _vm_cs;
// virtual memory map
MemPointerArray* _vm_map;
private: private:
static MemType2Name MemType2NameMap[NUMBER_OF_MEMORY_TYPE]; static MemType2Name MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
...@@ -432,9 +434,6 @@ class MemBaseline : public _ValueObj { ...@@ -432,9 +434,6 @@ class MemBaseline : public _ValueObj {
static int malloc_sort_by_pc(const void* p1, const void* p2); static int malloc_sort_by_pc(const void* p1, const void* p2);
static int malloc_sort_by_addr(const void* p1, const void* p2); static int malloc_sort_by_addr(const void* p1, const void* p2);
static int vm_sort_by_pc(const void* p1, const void* p2);
static int vm_sort_by_addr(const void* p1, const void* p2);
private: private:
// sorting functions for baselined records // sorting functions for baselined records
static int bl_malloc_sort_by_size(const void* p1, const void* p2); static int bl_malloc_sort_by_size(const void* p1, const void* p2);
......
...@@ -40,35 +40,3 @@ jint SequenceGenerator::next() { ...@@ -40,35 +40,3 @@ jint SequenceGenerator::next() {
return seq; return seq;
} }
bool VMMemRegion::contains(const VMMemRegion* mr) const {
assert(base() != 0, "Sanity check");
assert(size() != 0 || committed_size() != 0,
"Sanity check");
address base_addr = base();
address end_addr = base_addr +
(is_reserve_record()? reserved_size(): committed_size());
if (mr->is_reserve_record()) {
if (mr->base() == base_addr && mr->size() == size()) {
// the same range
return true;
}
return false;
} else if (mr->is_commit_record() || mr->is_uncommit_record()) {
assert(mr->base() != 0 && mr->committed_size() > 0,
"bad record");
return (mr->base() >= base_addr &&
(mr->base() + mr->committed_size()) <= end_addr);
} else if (mr->is_type_tagging_record()) {
assert(mr->base() != NULL, "Sanity check");
return (mr->base() >= base_addr && mr->base() < end_addr);
} else if (mr->is_release_record()) {
assert(mr->base() != 0 && mr->size() > 0,
"bad record");
return (mr->base() == base_addr && mr->size() == size());
} else {
ShouldNotReachHere();
return false;
}
}
...@@ -291,6 +291,26 @@ public: ...@@ -291,6 +291,26 @@ public:
inline bool is_type_tagging_record() const { inline bool is_type_tagging_record() const {
return is_virtual_memory_type_record(_flags); return is_virtual_memory_type_record(_flags);
} }
// if the two memory pointer records actually represent the same
// memory block
inline bool is_same_region(const MemPointerRecord* other) const {
return (addr() == other->addr() && size() == other->size());
}
// if this memory region fully contains another one
inline bool contains_region(const MemPointerRecord* other) const {
return contains_region(other->addr(), other->size());
}
// if this memory region fully contains specified memory range
inline bool contains_region(address add, size_t sz) const {
return (addr() <= add && addr() + size() >= add + sz);
}
inline bool contains_address(address add) const {
return (addr() <= add && addr() + size() > add);
}
}; };
// MemPointerRecordEx also records callsite pc, from where // MemPointerRecordEx also records callsite pc, from where
...@@ -321,65 +341,31 @@ class MemPointerRecordEx : public MemPointerRecord { ...@@ -321,65 +341,31 @@ class MemPointerRecordEx : public MemPointerRecord {
} }
}; };
// a virtual memory region // a virtual memory region. The region can represent a reserved
// virtual memory region or a committed memory region
class VMMemRegion : public MemPointerRecord { class VMMemRegion : public MemPointerRecord {
private:
// committed size
size_t _committed_size;
public: public:
VMMemRegion(): _committed_size(0) { } VMMemRegion() { }
void init(const MemPointerRecord* mp) { void init(const MemPointerRecord* mp) {
assert(mp->is_vm_pointer(), "not virtual memory pointer"); assert(mp->is_vm_pointer(), "Sanity check");
_addr = mp->addr(); _addr = mp->addr();
if (mp->is_commit_record() || mp->is_uncommit_record()) {
_committed_size = mp->size();
set_size(_committed_size);
} else {
set_size(mp->size()); set_size(mp->size());
_committed_size = 0;
}
set_flags(mp->flags()); set_flags(mp->flags());
} }
VMMemRegion& operator=(const VMMemRegion& other) { VMMemRegion& operator=(const VMMemRegion& other) {
MemPointerRecord::operator=(other); MemPointerRecord::operator=(other);
_committed_size = other.committed_size();
return *this; return *this;
} }
inline bool is_reserve_record() const { inline bool is_reserved_region() const {
return is_virtual_memory_reserve_record(flags()); return is_allocation_record();
}
inline bool is_release_record() const {
return is_virtual_memory_release_record(flags());
}
// resize reserved VM range
inline void set_reserved_size(size_t new_size) {
assert(new_size >= committed_size(), "resize");
set_size(new_size);
}
inline void commit(size_t size) {
_committed_size += size;
} }
inline void uncommit(size_t size) { inline bool is_committed_region() const {
if (_committed_size >= size) { return is_commit_record();
_committed_size -= size;
} else {
_committed_size = 0;
} }
}
/*
* if this virtual memory range covers whole range of
* the other VMMemRegion
*/
bool contains(const VMMemRegion* mr) const;
/* base address of this virtual memory range */ /* base address of this virtual memory range */
inline address base() const { inline address base() const {
...@@ -391,13 +377,28 @@ public: ...@@ -391,13 +377,28 @@ public:
set_flags(flags() | (f & mt_masks)); set_flags(flags() | (f & mt_masks));
} }
// release part of memory range // expand this region to also cover specified range.
inline void partial_release(address add, size_t sz) { // The range has to be on either end of the memory region.
assert(add >= addr() && add < addr() + size(), "not valid address"); void expand_region(address addr, size_t sz) {
// for now, it can partially release from the both ends, if (addr < base()) {
// but not in the middle assert(addr + sz == base(), "Sanity check");
_addr = addr;
set_size(size() + sz);
} else {
assert(base() + size() == addr, "Sanity check");
set_size(size() + sz);
}
}
// exclude the specified address range from this region.
// The excluded memory range has to be on either end of this memory
// region.
inline void exclude_region(address add, size_t sz) {
assert(is_reserved_region() || is_committed_region(), "Sanity check");
assert(addr() != NULL && size() != 0, "Sanity check");
assert(add >= addr() && add < addr() + size(), "Sanity check");
assert(add == addr() || (add + sz) == (addr() + size()), assert(add == addr() || (add + sz) == (addr() + size()),
"release in the middle"); "exclude in the middle");
if (add == addr()) { if (add == addr()) {
set_addr(add + sz); set_addr(add + sz);
set_size(size() - sz); set_size(size() - sz);
...@@ -405,16 +406,6 @@ public: ...@@ -405,16 +406,6 @@ public:
set_size(size() - sz); set_size(size() - sz);
} }
} }
// the committed size of the virtual memory block
inline size_t committed_size() const {
return _committed_size;
}
// the reserved size of the virtual memory block
inline size_t reserved_size() const {
return size();
}
}; };
class VMMemRegionEx : public VMMemRegion { class VMMemRegionEx : public VMMemRegion {
......
...@@ -31,14 +31,19 @@ ...@@ -31,14 +31,19 @@
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
MemPointer* SequencedRecordIterator::next_record() { MemPointer* SequencedRecordIterator::next_record() {
MemPointer* itr_cur = _itr.current(); MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
if (itr_cur == NULL) return NULL; if (itr_cur == NULL) {
MemPointer* itr_next = _itr.next(); return itr_cur;
}
MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
while (itr_next != NULL && // don't collapse virtual memory records
same_kind((MemPointerRecord*)itr_cur, (MemPointerRecord*)itr_next)) { while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
!itr_next->is_vm_pointer() &&
same_kind(itr_cur, itr_next)) {
itr_cur = itr_next; itr_cur = itr_next;
itr_next = _itr.next(); itr_next = (MemPointerRecord*)_itr.next();
} }
return itr_cur; return itr_cur;
......
...@@ -188,6 +188,7 @@ class SequencedRecordIterator : public MemPointerArrayIterator { ...@@ -188,6 +188,7 @@ class SequencedRecordIterator : public MemPointerArrayIterator {
// Test if the two records are the same kind: the same memory block and allocation // Test if the two records are the same kind: the same memory block and allocation
// type. // type.
inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const { inline bool same_kind(const MemPointerRecord* p1, const MemPointerRecord* p2) const {
assert(!p1->is_vm_pointer() && !p2->is_vm_pointer(), "malloc pointer only");
return (p1->addr() == p2->addr() && return (p1->addr() == p2->addr() &&
(p1->flags() &MemPointerRecord::tag_masks) == (p1->flags() &MemPointerRecord::tag_masks) ==
(p2->flags() & MemPointerRecord::tag_masks)); (p2->flags() & MemPointerRecord::tag_masks));
......
...@@ -51,6 +51,7 @@ void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary ...@@ -51,6 +51,7 @@ void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary
report_summaries(baseline); report_summaries(baseline);
if (!summary_only && MemTracker::track_callsite()) { if (!summary_only && MemTracker::track_callsite()) {
report_virtual_memory_map(baseline);
report_callsites(baseline); report_callsites(baseline);
} }
_outputer.done(); _outputer.done();
...@@ -74,6 +75,25 @@ void BaselineReporter::report_summaries(const MemBaseline& baseline) { ...@@ -74,6 +75,25 @@ void BaselineReporter::report_summaries(const MemBaseline& baseline) {
_outputer.done_category_summary(); _outputer.done_category_summary();
} }
void BaselineReporter::report_virtual_memory_map(const MemBaseline& baseline) {
_outputer.start_virtual_memory_map();
MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
MemPointerArrayIteratorImpl itr = MemPointerArrayIteratorImpl(pBL->_vm_map);
VMMemRegionEx* rgn = (VMMemRegionEx*)itr.current();
while (rgn != NULL) {
if (rgn->is_reserved_region()) {
_outputer.reserved_memory_region(FLAGS_TO_MEMORY_TYPE(rgn->flags()),
rgn->base(), rgn->base() + rgn->size(), amount_in_current_scale(rgn->size()), rgn->pc());
} else {
_outputer.committed_memory_region(rgn->base(), rgn->base() + rgn->size(),
amount_in_current_scale(rgn->size()), rgn->pc());
}
rgn = (VMMemRegionEx*)itr.next();
}
_outputer.done_virtual_memory_map();
}
void BaselineReporter::report_callsites(const MemBaseline& baseline) { void BaselineReporter::report_callsites(const MemBaseline& baseline) {
_outputer.start_callsite(); _outputer.start_callsite();
MemBaseline* pBL = const_cast<MemBaseline*>(&baseline); MemBaseline* pBL = const_cast<MemBaseline*>(&baseline);
...@@ -324,6 +344,40 @@ void BaselineTTYOutputer::done_category_summary() { ...@@ -324,6 +344,40 @@ void BaselineTTYOutputer::done_category_summary() {
_output->print_cr(" "); _output->print_cr(" ");
} }
void BaselineTTYOutputer::start_virtual_memory_map() {
_output->print_cr("Virtual memory map:");
}
void BaselineTTYOutputer::reserved_memory_region(MEMFLAGS type, address base, address end,
size_t size, address pc) {
const char* unit = memory_unit(_scale);
char buf[128];
int offset;
_output->print_cr(" ");
_output->print_cr("[" PTR_FORMAT " - " PTR_FORMAT "] reserved %d%s for %s", base, end, size, unit,
MemBaseline::type2name(type));
if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
_output->print_cr("\t\tfrom [%s+0x%x]", buf, offset);
}
}
void BaselineTTYOutputer::committed_memory_region(address base, address end, size_t size, address pc) {
const char* unit = memory_unit(_scale);
char buf[128];
int offset;
_output->print("\t[" PTR_FORMAT " - " PTR_FORMAT "] committed %d%s", base, end, size, unit);
if (os::dll_address_to_function_name(pc, buf, sizeof(buf), &offset)) {
_output->print_cr(" from [%s+0x%x]", buf, offset);
}
}
void BaselineTTYOutputer::done_virtual_memory_map() {
_output->print_cr(" ");
}
void BaselineTTYOutputer::start_callsite() { void BaselineTTYOutputer::start_callsite() {
_output->print_cr("Details:"); _output->print_cr("Details:");
_output->print_cr(" "); _output->print_cr(" ");
...@@ -337,7 +391,7 @@ void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt, ...@@ -337,7 +391,7 @@ void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt,
size_t malloc_count) { size_t malloc_count) {
if (malloc_amt > 0) { if (malloc_amt > 0) {
const char* unit = memory_unit(_scale); const char* unit = memory_unit(_scale);
char buf[64]; char buf[128];
int offset; int offset;
if (pc == 0) { if (pc == 0) {
_output->print("[BOOTSTRAP]%18s", " "); _output->print("[BOOTSTRAP]%18s", " ");
...@@ -357,7 +411,7 @@ void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_am ...@@ -357,7 +411,7 @@ void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_am
size_t committed_amt) { size_t committed_amt) {
if (reserved_amt > 0) { if (reserved_amt > 0) {
const char* unit = memory_unit(_scale); const char* unit = memory_unit(_scale);
char buf[64]; char buf[128];
int offset; int offset;
if (pc == 0) { if (pc == 0) {
_output->print("[BOOTSTRAP]%18s", " "); _output->print("[BOOTSTRAP]%18s", " ");
...@@ -502,7 +556,7 @@ void BaselineTTYOutputer::diff_malloc_callsite(address pc, ...@@ -502,7 +556,7 @@ void BaselineTTYOutputer::diff_malloc_callsite(address pc,
int malloc_diff, int malloc_count_diff) { int malloc_diff, int malloc_count_diff) {
if (malloc_diff != 0) { if (malloc_diff != 0) {
const char* unit = memory_unit(_scale); const char* unit = memory_unit(_scale);
char buf[64]; char buf[128];
int offset; int offset;
if (pc == 0) { if (pc == 0) {
_output->print_cr("[BOOTSTRAP]%18s", " "); _output->print_cr("[BOOTSTRAP]%18s", " ");
......
...@@ -93,6 +93,11 @@ class BaselineOutputer : public StackObj { ...@@ -93,6 +93,11 @@ class BaselineOutputer : public StackObj {
virtual void done_category_summary() = 0; virtual void done_category_summary() = 0;
virtual void start_virtual_memory_map() = 0;
virtual void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc) = 0;
virtual void committed_memory_region(address base, address end, size_t size, address pc) = 0;
virtual void done_virtual_memory_map() = 0;
/* /*
* Report callsite information * Report callsite information
*/ */
...@@ -136,6 +141,7 @@ class BaselineReporter : public StackObj { ...@@ -136,6 +141,7 @@ class BaselineReporter : public StackObj {
private: private:
void report_summaries(const MemBaseline& baseline); void report_summaries(const MemBaseline& baseline);
void report_virtual_memory_map(const MemBaseline& baseline);
void report_callsites(const MemBaseline& baseline); void report_callsites(const MemBaseline& baseline);
void diff_summaries(const MemBaseline& cur, const MemBaseline& prev); void diff_summaries(const MemBaseline& cur, const MemBaseline& prev);
...@@ -251,6 +257,13 @@ class BaselineTTYOutputer : public BaselineOutputer { ...@@ -251,6 +257,13 @@ class BaselineTTYOutputer : public BaselineOutputer {
void done_category_summary(); void done_category_summary();
// virtual memory map
void start_virtual_memory_map();
void reserved_memory_region(MEMFLAGS type, address base, address end, size_t size, address pc);
void committed_memory_region(address base, address end, size_t size, address pc);
void done_virtual_memory_map();
/* /*
* Report callsite information * Report callsite information
*/ */
......
...@@ -31,6 +31,220 @@ ...@@ -31,6 +31,220 @@
#include "services/memSnapshot.hpp" #include "services/memSnapshot.hpp"
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
VMMemRegionEx new_rec;
assert(rec->is_allocation_record() || rec->is_commit_record(),
"Sanity check");
if (MemTracker::track_callsite()) {
new_rec.init((MemPointerRecordEx*)rec);
} else {
new_rec.init(rec);
}
return insert(&new_rec);
}
bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
VMMemRegionEx new_rec;
assert(rec->is_allocation_record() || rec->is_commit_record(),
"Sanity check");
if (MemTracker::track_callsite()) {
new_rec.init((MemPointerRecordEx*)rec);
} else {
new_rec.init(rec);
}
return insert_after(&new_rec);
}
// we don't consolidate reserved regions, since they may be categorized
// in different types.
bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
assert(rec->is_allocation_record(), "Sanity check");
VMMemRegion* cur = (VMMemRegion*)current();
// we don't have anything yet
if (cur == NULL) {
return insert_record(rec);
}
assert(cur->is_reserved_region(), "Sanity check");
// duplicated records
if (cur->is_same_region(rec)) {
return true;
}
assert(cur->base() > rec->addr(), "Just check: locate()");
assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
return insert_record(rec);
}
// we do consolidate committed regions
bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
assert(rec->is_commit_record(), "Sanity check");
VMMemRegion* cur;
cur = (VMMemRegion*)current();
assert(cur->is_reserved_region() && cur->contains_region(rec),
"Sanity check");
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
return true;
}
cur = (VMMemRegion*)next();
while (cur != NULL && cur->is_committed_region()) {
// duplicated commit records
if(cur->contains_region(rec)) {
return true;
}
if (cur->base() > rec->addr()) {
// committed regions can not overlap
assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
if (rec->addr() + rec->size() == cur->base()) {
cur->expand_region(rec->addr(), rec->size());
return true;
} else {
return insert_record(rec);
}
} else if (cur->base() + cur->size() == rec->addr()) {
cur->expand_region(rec->addr(), rec->size());
VMMemRegion* next_reg = (VMMemRegion*)next();
// see if we can consolidate next committed region
if (next_reg != NULL && next_reg->is_committed_region() &&
next_reg->base() == cur->base() + cur->size()) {
cur->expand_region(next_reg->base(), next_reg->size());
remove();
}
return true;
}
cur = (VMMemRegion*)next();
}
return insert_record(rec);
}
bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
assert(rec->is_uncommit_record(), "sanity check");
VMMemRegion* cur;
cur = (VMMemRegion*)current();
assert(cur->is_reserved_region() && cur->contains_region(rec),
"Sanity check");
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
return true;
}
cur = (VMMemRegion*)next();
while (cur != NULL && cur->is_committed_region()) {
// region already uncommitted, must be due to duplicated record
if (cur->addr() >= rec->addr() + rec->size()) {
break;
} else if (cur->contains_region(rec)) {
// uncommit whole region
if (cur->is_same_region(rec)) {
remove();
break;
} else if (rec->addr() == cur->addr() ||
rec->addr() + rec->size() == cur->addr() + cur->size()) {
// uncommitted from either end of current memory region.
cur->exclude_region(rec->addr(), rec->size());
break;
} else { // split the committed region and release the middle
address high_addr = cur->addr() + cur->size();
size_t sz = high_addr - rec->addr();
cur->exclude_region(rec->addr(), sz);
sz = high_addr - (rec->addr() + rec->size());
if (MemTracker::track_callsite()) {
MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
((VMMemRegionEx*)cur)->pc());
return insert_record_after(&tmp);
} else {
MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
return insert_record_after(&tmp);
}
}
}
cur = (VMMemRegion*)next();
}
// we may not find committed record due to duplicated records
return true;
}
bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
assert(rec->is_deallocation_record(), "Sanity check");
VMMemRegion* cur = (VMMemRegion*)current();
assert(cur->is_reserved_region() && cur->contains_region(rec),
"Sanity check");
#ifdef ASSERT
VMMemRegion* next_reg = (VMMemRegion*)peek_next();
// should not have any committed memory in this reserved region
assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
#endif
if (rec->is_same_region(cur)) {
remove();
} else if (rec->addr() == cur->addr() ||
rec->addr() + rec->size() == cur->addr() + cur->size()) {
// released region is at either end of this region
cur->exclude_region(rec->addr(), rec->size());
} else { // split the reserved region and release the middle
address high_addr = cur->addr() + cur->size();
size_t sz = high_addr - rec->addr();
cur->exclude_region(rec->addr(), sz);
sz = high_addr - rec->addr() - rec->size();
if (MemTracker::track_callsite()) {
MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
((VMMemRegionEx*)cur)->pc());
return insert_reserved_region(&tmp);
} else {
MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
return insert_reserved_region(&tmp);
}
}
return true;
}
bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
// skip all 'commit' records associated with previous reserved region
VMMemRegion* p = (VMMemRegion*)next();
while (p != NULL && p->is_committed_region() &&
p->base() + p->size() < rec->addr()) {
p = (VMMemRegion*)next();
}
return insert_record(rec);
}
bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
size_t sz = rgn->size() - new_rgn_size;
// the original region becomes 'new' region
rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
// remaining becomes next region
MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
return insert_reserved_region(&next_rgn);
} else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
rgn->exclude_region(new_rgn_addr, new_rgn_size);
MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
return insert_reserved_region(&next_rgn);
} else {
// the orginal region will be split into three
address rgn_high_addr = rgn->base() + rgn->size();
// first region
rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
// the second region is the new region
MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
if (!insert_reserved_region(&new_rgn)) return false;
// the remaining region
MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
return insert_reserved_region(&rem_rgn);
}
}
static int sort_in_seq_order(const void* p1, const void* p2) { static int sort_in_seq_order(const void* p1, const void* p2) {
assert(p1 != NULL && p2 != NULL, "Sanity check"); assert(p1 != NULL && p2 != NULL, "Sanity check");
const MemPointerRecord* mp1 = (MemPointerRecord*)p1; const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
...@@ -61,11 +275,11 @@ bool StagingArea::init() { ...@@ -61,11 +275,11 @@ bool StagingArea::init() {
} }
MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() { VMRecordIterator StagingArea::virtual_memory_record_walker() {
MemPointerArray* arr = vm_data(); MemPointerArray* arr = vm_data();
// sort into seq number order // sort into seq number order
arr->sort((FN_SORT)sort_in_seq_order); arr->sort((FN_SORT)sort_in_seq_order);
return MemPointerArrayIteratorImpl(arr); return VMRecordIterator(arr);
} }
...@@ -135,6 +349,8 @@ bool MemSnapshot::merge(MemRecorder* rec) { ...@@ -135,6 +349,8 @@ bool MemSnapshot::merge(MemRecorder* rec) {
return false; return false;
} }
} else { } else {
// locate matched record and/or also position the iterator to proper
// location for this incoming record.
p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr()); p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
// we have not seen this memory block, so just add to staging area // we have not seen this memory block, so just add to staging area
if (p2 == NULL) { if (p2 == NULL) {
...@@ -199,7 +415,7 @@ bool MemSnapshot::promote() { ...@@ -199,7 +415,7 @@ bool MemSnapshot::promote() {
MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker(); MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
bool promoted = false; bool promoted = false;
if (promote_malloc_records(&malloc_itr)) { if (promote_malloc_records(&malloc_itr)) {
MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker(); VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
if (promote_virtual_memory_records(&vm_itr)) { if (promote_virtual_memory_records(&vm_itr)) {
promoted = true; promoted = true;
} }
...@@ -218,7 +434,7 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) { ...@@ -218,7 +434,7 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr()); matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
// found matched memory block // found matched memory block
if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) { if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
// snapshot already contains 'lived' records // snapshot already contains 'live' records
assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(), assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
"Sanity check"); "Sanity check");
// update block states // update block states
...@@ -277,86 +493,59 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) { ...@@ -277,86 +493,59 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) { bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
VMMemPointerIterator vm_snapshot_itr(_vm_ptrs); VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
VMMemRegionEx new_vm_rec; VMMemRegion* reserved_rec;
VMMemRegion* matched_rec;
while (new_rec != NULL) { while (new_rec != NULL) {
assert(new_rec->is_vm_pointer(), "Sanity check"); assert(new_rec->is_vm_pointer(), "Sanity check");
if (MemTracker::track_callsite()) {
new_vm_rec.init((MemPointerRecordEx*)new_rec); // locate a reserved region that contains the specified address, or
} else { // the nearest reserved region has base address just above the specified
new_vm_rec.init(new_rec); // address
} reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr()); if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
if (matched_rec != NULL &&
(matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) {
// snapshot can only have 'live' records // snapshot can only have 'live' records
assert(matched_rec->is_reserve_record(), "Sanity check"); assert(reserved_rec->is_reserved_region(), "Sanity check");
if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) { if (new_rec->is_allocation_record()) {
// resize reserved virtual memory range if (!reserved_rec->is_same_region(new_rec)) {
// resize has to cover committed area // only deal with split a bigger reserved region into smaller regions.
assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check"); // So far, CDS is the only use case.
matched_rec->set_reserved_size(new_vm_rec.size()); if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
} else if (new_vm_rec.is_commit_record()) { return false;
// commit memory inside reserved memory range
assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check");
// thread stacks are marked committed, so we ignore 'commit' record for creating
// stack guard pages
if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) {
matched_rec->commit(new_vm_rec.committed_size());
}
} else if (new_vm_rec.is_uncommit_record()) {
if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) {
// ignore 'uncommit' record from removing stack guard pages, uncommit
// thread stack as whole
if (matched_rec->committed_size() == new_vm_rec.committed_size()) {
matched_rec->uncommit(new_vm_rec.committed_size());
}
} else {
// uncommit memory inside reserved memory range
assert(new_vm_rec.committed_size() <= matched_rec->committed_size(),
"Sanity check");
matched_rec->uncommit(new_vm_rec.committed_size());
} }
} else if (new_vm_rec.is_type_tagging_record()) {
// tag this virtual memory range to a memory type
// can not re-tag a memory range to different type
assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone ||
FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()),
"Sanity check");
matched_rec->tag(new_vm_rec.flags());
} else if (new_vm_rec.is_release_record()) {
// release part or whole memory range
if (new_vm_rec.base() == matched_rec->base() &&
new_vm_rec.size() == matched_rec->size()) {
// release whole virtual memory range
assert(matched_rec->committed_size() == 0, "Sanity check");
vm_snapshot_itr.remove();
} else {
// partial release
matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size());
} }
} else { } else if (new_rec->is_uncommit_record()) {
// multiple reserve/commit on the same virtual memory range if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) && return false;
(new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()),
"Sanity check");
matched_rec->tag(new_vm_rec.flags());
} }
} else { } else if (new_rec->is_commit_record()) {
// no matched record // insert or expand existing committed region to cover this
if (new_vm_rec.is_reserve_record()) { // newly committed region
if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) { if (!vm_snapshot_itr.add_committed_region(new_rec)) {
if (!vm_snapshot_itr.insert(&new_vm_rec)) {
return false; return false;
} }
} else { } else if (new_rec->is_deallocation_record()) {
if (!vm_snapshot_itr.insert_after(&new_vm_rec)) { // release part or all memory region
if (!vm_snapshot_itr.remove_released_region(new_rec)) {
return false; return false;
} }
} else if (new_rec->is_type_tagging_record()) {
// tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
// to different type.
assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
"Sanity check");
reserved_rec->tag(new_rec->flags());
} else {
ShouldNotReachHere();
} }
} else { } else {
// throw out obsolete records, which are the commit/uncommit/release/tag records /*
// on memory regions that are already released. * The assertion failure indicates mis-matched virtual memory records. The likely
* scenario is, that some virtual memory operations are not going through os::xxxx_memory()
* api, which have to be tracked manually. (perfMemory is an example).
*/
assert(new_rec->is_allocation_record(), "Sanity check");
if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
return false;
} }
} }
new_rec = (MemPointerRecord*)itr->next(); new_rec = (MemPointerRecord*)itr->next();
...@@ -433,5 +622,33 @@ void MemSnapshot::check_staging_data() { ...@@ -433,5 +622,33 @@ void MemSnapshot::check_staging_data() {
cur = (MemPointerRecord*)vm_itr.next(); cur = (MemPointerRecord*)vm_itr.next();
} }
} }
void MemSnapshot::dump_all_vm_pointers() {
MemPointerArrayIteratorImpl itr(_vm_ptrs);
VMMemRegion* ptr = (VMMemRegion*)itr.current();
tty->print_cr("dump virtual memory pointers:");
while (ptr != NULL) {
if (ptr->is_committed_region()) {
tty->print("\t");
}
tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
(ptr->addr() + ptr->size()), ptr->flags());
if (MemTracker::track_callsite()) {
VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
if (ex->pc() != NULL) {
char buf[1024];
if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
tty->print_cr("\t%s", buf);
} else {
tty->print_cr("");
}
}
}
ptr = (VMMemRegion*)itr.next();
}
tty->flush();
}
#endif // ASSERT #endif // ASSERT
...@@ -111,33 +111,41 @@ class VMMemPointerIterator : public MemPointerIterator { ...@@ -111,33 +111,41 @@ class VMMemPointerIterator : public MemPointerIterator {
MemPointerIterator(arr) { MemPointerIterator(arr) {
} }
// locate an existing record that contains specified address, or // locate an existing reserved memory region that contains specified address,
// the record, where the record with specified address, should // or the reserved region just above this address, where the incoming
// be inserted. // reserved region should be inserted.
// virtual memory record array is sorted in address order, so
// binary search is performed
virtual MemPointer* locate(address addr) { virtual MemPointer* locate(address addr) {
int index_low = 0; reset();
int index_high = _array->length(); VMMemRegion* reg = (VMMemRegion*)current();
int index_mid = (index_high + index_low) / 2; while (reg != NULL) {
int r = 1; if (reg->is_reserved_region()) {
while (index_low < index_high && (r = compare(index_mid, addr)) != 0) { if (reg->contains_address(addr) || addr < reg->base()) {
if (r > 0) { return reg;
index_high = index_mid;
} else {
index_low = index_mid;
} }
index_mid = (index_high + index_low) / 2;
} }
if (r == 0) { reg = (VMMemRegion*)next();
// update current location
_pos = index_mid;
return _array->at(index_mid);
} else {
return NULL;
} }
return NULL;
} }
// following methods update virtual memory in the context
// of 'current' position, which is properly positioned by
// callers via locate method.
bool add_reserved_region(MemPointerRecord* rec);
bool add_committed_region(MemPointerRecord* rec);
bool remove_uncommitted_region(MemPointerRecord* rec);
bool remove_released_region(MemPointerRecord* rec);
// split a reserved region to create a new memory region with specified base and size
bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
private:
bool insert_record(MemPointerRecord* rec);
bool insert_record_after(MemPointerRecord* rec);
bool insert_reserved_region(MemPointerRecord* rec);
// reset current position
inline void reset() { _pos = 0; }
#ifdef ASSERT #ifdef ASSERT
virtual bool is_dup_pointer(const MemPointer* ptr1, virtual bool is_dup_pointer(const MemPointer* ptr1,
const MemPointer* ptr2) const { const MemPointer* ptr2) const {
...@@ -154,32 +162,17 @@ class VMMemPointerIterator : public MemPointerIterator { ...@@ -154,32 +162,17 @@ class VMMemPointerIterator : public MemPointerIterator {
(p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
} }
#endif #endif
// compare if an address falls into a memory region,
// return 0, if the address falls into a memory region at specified index
// return 1, if memory region pointed by specified index is higher than the address
// return -1, if memory region pointed by specified index is lower than the address
int compare(int index, address addr) const {
VMMemRegion* r = (VMMemRegion*)_array->at(index);
assert(r->is_reserve_record(), "Sanity check");
if (r->addr() > addr) {
return 1;
} else if (r->addr() + r->reserved_size() <= addr) {
return -1;
} else {
return 0;
}
}
}; };
class MallocRecordIterator : public MemPointerArrayIterator { class MallocRecordIterator : public MemPointerArrayIterator {
private: protected:
MemPointerArrayIteratorImpl _itr; MemPointerArrayIteratorImpl _itr;
public: public:
MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
} }
MemPointer* current() const { virtual MemPointer* current() const {
MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
assert(cur == NULL || !cur->is_vm_pointer(), "seek error"); assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
...@@ -194,7 +187,7 @@ class MallocRecordIterator : public MemPointerArrayIterator { ...@@ -194,7 +187,7 @@ class MallocRecordIterator : public MemPointerArrayIterator {
} }
} }
MemPointer* next() { virtual MemPointer* next() {
MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check"); assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
MemPointerRecord* next = (MemPointerRecord*)_itr.next(); MemPointerRecord* next = (MemPointerRecord*)_itr.next();
...@@ -214,6 +207,63 @@ class MallocRecordIterator : public MemPointerArrayIterator { ...@@ -214,6 +207,63 @@ class MallocRecordIterator : public MemPointerArrayIterator {
bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
}; };
// collapse duplicated records. Eliminating duplicated records here, is much
// cheaper than during promotion phase. However, it does have limitation - it
// can only eliminate duplicated records within the generation, there are
// still chances seeing duplicated records during promotion.
// We want to use the record with higher sequence number, because it has
// more accurate callsite pc.
class VMRecordIterator : public MallocRecordIterator {
public:
VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
while (next != NULL) {
assert(cur != NULL, "Sanity check");
assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
"pre-sort order");
if (is_duplicated_record(cur, next)) {
_itr.next();
next = (MemPointerRecord*)_itr.peek_next();
} else {
break;
}
}
}
virtual MemPointer* current() const {
return _itr.current();
}
// get next record, but skip the duplicated records
virtual MemPointer* next() {
MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
while (next != NULL) {
assert(cur != NULL, "Sanity check");
assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
"pre-sort order");
if (is_duplicated_record(cur, next)) {
_itr.next();
cur = next;
next = (MemPointerRecord*)_itr.peek_next();
} else {
break;
}
}
return cur;
}
private:
bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
return ret;
}
};
class StagingArea : public _ValueObj { class StagingArea : public _ValueObj {
private: private:
MemPointerArray* _malloc_data; MemPointerArray* _malloc_data;
...@@ -233,7 +283,8 @@ class StagingArea : public _ValueObj { ...@@ -233,7 +283,8 @@ class StagingArea : public _ValueObj {
return MallocRecordIterator(malloc_data()); return MallocRecordIterator(malloc_data());
} }
MemPointerArrayIteratorImpl virtual_memory_record_walker(); VMRecordIterator virtual_memory_record_walker();
bool init(); bool init();
void clear() { void clear() {
assert(_malloc_data != NULL && _vm_data != NULL, "Just check"); assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
...@@ -293,6 +344,8 @@ class MemSnapshot : public CHeapObj<mtNMT> { ...@@ -293,6 +344,8 @@ class MemSnapshot : public CHeapObj<mtNMT> {
NOT_PRODUCT(void check_staging_data();) NOT_PRODUCT(void check_staging_data();)
NOT_PRODUCT(void check_malloc_pointers();) NOT_PRODUCT(void check_malloc_pointers();)
NOT_PRODUCT(bool has_allocation_record(address addr);) NOT_PRODUCT(bool has_allocation_record(address addr);)
// dump all virtual memory pointers in snapshot
DEBUG_ONLY( void dump_all_vm_pointers();)
private: private:
// copy pointer data from src to dest // copy pointer data from src to dest
...@@ -302,5 +355,4 @@ class MemSnapshot : public CHeapObj<mtNMT> { ...@@ -302,5 +355,4 @@ class MemSnapshot : public CHeapObj<mtNMT> {
bool promote_virtual_memory_records(MemPointerArrayIterator* itr); bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
}; };
#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
...@@ -364,7 +364,7 @@ void MemTracker::create_memory_record(address addr, MEMFLAGS flags, ...@@ -364,7 +364,7 @@ void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
if (thread != NULL) { if (thread != NULL) {
if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) { if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
JavaThread* java_thread = static_cast<JavaThread*>(thread); JavaThread* java_thread = (JavaThread*)thread;
JavaThreadState state = java_thread->thread_state(); JavaThreadState state = java_thread->thread_state();
if (SafepointSynchronize::safepoint_safe(java_thread, state)) { if (SafepointSynchronize::safepoint_safe(java_thread, state)) {
// JavaThreads that are safepoint safe, can run through safepoint, // JavaThreads that are safepoint safe, can run through safepoint,
...@@ -472,6 +472,8 @@ void MemTracker::sync() { ...@@ -472,6 +472,8 @@ void MemTracker::sync() {
// it should guarantee that NMT is fully sync-ed. // it should guarantee that NMT is fully sync-ed.
ThreadCritical tc; ThreadCritical tc;
SequenceGenerator::reset();
// walk all JavaThreads to collect recorders // walk all JavaThreads to collect recorders
SyncThreadRecorderClosure stc; SyncThreadRecorderClosure stc;
Threads::threads_do(&stc); Threads::threads_do(&stc);
...@@ -484,11 +486,12 @@ void MemTracker::sync() { ...@@ -484,11 +486,12 @@ void MemTracker::sync() {
pending_recorders = _global_recorder; pending_recorders = _global_recorder;
_global_recorder = NULL; _global_recorder = NULL;
} }
SequenceGenerator::reset();
// check _worker_thread with lock to avoid racing condition // check _worker_thread with lock to avoid racing condition
if (_worker_thread != NULL) { if (_worker_thread != NULL) {
_worker_thread->at_sync_point(pending_recorders); _worker_thread->at_sync_point(pending_recorders);
} }
assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
} }
} }
......
...@@ -113,8 +113,10 @@ class MemTracker : AllStatic { ...@@ -113,8 +113,10 @@ class MemTracker : AllStatic {
#include "thread_solaris.inline.hpp" #include "thread_solaris.inline.hpp"
#endif #endif
#ifdef _DEBUG extern bool NMT_track_callsite;
#define DEBUG_CALLER_PC os::get_caller_pc(3)
#ifdef ASSERT
#define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#else #else
#define DEBUG_CALLER_PC 0 #define DEBUG_CALLER_PC 0
#endif #endif
...@@ -261,7 +263,7 @@ class MemTracker : AllStatic { ...@@ -261,7 +263,7 @@ class MemTracker : AllStatic {
// record a 'malloc' call // record a 'malloc' call
static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
address pc = 0, Thread* thread = NULL) { address pc = 0, Thread* thread = NULL) {
if (NMT_CAN_TRACK(flags)) { if (is_on() && NMT_CAN_TRACK(flags)) {
assert(size > 0, "Sanity check"); assert(size > 0, "Sanity check");
create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread); create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread);
} }
...@@ -275,7 +277,7 @@ class MemTracker : AllStatic { ...@@ -275,7 +277,7 @@ class MemTracker : AllStatic {
// record a 'realloc' call // record a 'realloc' call
static inline void record_realloc(address old_addr, address new_addr, size_t size, static inline void record_realloc(address old_addr, address new_addr, size_t size,
MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
if (is_on()) { if (is_on() && NMT_CAN_TRACK(flags)) {
assert(size > 0, "Sanity check"); assert(size > 0, "Sanity check");
record_free(old_addr, flags, thread); record_free(old_addr, flags, thread);
record_malloc(new_addr, size, flags, pc, thread); record_malloc(new_addr, size, flags, pc, thread);
...@@ -317,6 +319,7 @@ class MemTracker : AllStatic { ...@@ -317,6 +319,7 @@ class MemTracker : AllStatic {
static inline void release_thread_stack(address addr, size_t size, Thread* thr) { static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
if (is_on()) { if (is_on()) {
assert(size > 0 && thr != NULL, "Sanity check"); assert(size > 0 && thr != NULL, "Sanity check");
assert(!thr->is_Java_thread(), "too early");
create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack, create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack,
size, DEBUG_CALLER_PC, thr); size, DEBUG_CALLER_PC, thr);
create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack, create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack,
...@@ -326,11 +329,11 @@ class MemTracker : AllStatic { ...@@ -326,11 +329,11 @@ class MemTracker : AllStatic {
// record a virtual memory 'commit' call // record a virtual memory 'commit' call
static inline void record_virtual_memory_commit(address addr, size_t size, static inline void record_virtual_memory_commit(address addr, size_t size,
address pc = 0, Thread* thread = NULL) { address pc, Thread* thread = NULL) {
if (is_on()) { if (is_on()) {
assert(size > 0, "Sanity check"); assert(size > 0, "Sanity check");
create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(), create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
size, DEBUG_CALLER_PC, thread); size, pc, thread);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册