提交 ed656754 编写于 作者: A amurillo

Merge

上级 7f7baced 6e1d2312
master
Tags不可用
无相关合并请求
......@@ -507,3 +507,4 @@ dc06b830ea95ed953cac02e9e67a75ab682edb97 jdk8u40-b01
f52cb91647590fe4a12af295a8a87e2cb761b044 jdk8u40-b02
fbc31318922c31488c0464ccd864d2cd1d9e21a7 hs25.40-b06
38539608359a6dfc5740abb66f878af643757c3b jdk8u40-b03
c3990b8c710e4c1996b5cd579681645d9f0408c1 hs25.40-b07
......@@ -118,8 +118,8 @@ ifeq ($(INCLUDE_NMT), false)
CFLAGS += -DINCLUDE_NMT=0
Src_Files_EXCLUDE += \
memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
memTracker.cpp nmtDCmd.cpp
memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp
endif
-include $(HS_ALT_MAKE)/excludeSrc.make
......
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=06
HS_BUILD_NUMBER=08
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -2434,23 +2434,25 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
}
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
tkr.record((address)base, bytes);
return true;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
// detaching the SHM segment will also delete it, see reserve_memory_special()
int rslt = shmdt(base);
if (rslt == 0) {
tkr.record((address)base, bytes);
return true;
} else {
return false;
}
} else {
tkr.discard();
return false;
return shmdt(base) == 0;
}
}
size_t os::large_page_size() {
......
......@@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
return mapAddress;
}
......@@ -918,7 +918,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;
......
......@@ -3501,9 +3501,12 @@ char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t al
assert(is_ptr_aligned(start, alignment), "Must be");
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
MemTracker::record_virtual_memory_release((address)start, bytes);
if (MemTracker::tracking_level() > NMT_minimal) {
// os::reserve_memory_special will record this memory area.
// Need to release it here to prevent overlapping reservations.
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
tkr.record((address)start, bytes);
}
char* end = start + bytes;
......@@ -3598,7 +3601,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr,
}
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
}
return addr;
......@@ -3614,24 +3617,30 @@ bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
}
bool os::release_memory_special(char* base, size_t bytes) {
assert(UseLargePages, "only for large pages");
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
res = os::Linux::release_memory_special_impl(base, bytes);
if (res) {
tkr.record((address)base, bytes);
}
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
} else {
res = os::Linux::release_memory_special_impl(base, bytes);
}
return res;
}
bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
assert(UseLargePages, "only for large pages");
bool res;
if (UseSHM) {
res = os::Linux::release_memory_special_shm(base, bytes);
} else {
assert(UseHugeTLBFS, "must be");
res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
}
if (res) {
tkr.record((address)base, bytes);
} else {
tkr.discard();
}
return res;
}
......
......@@ -108,6 +108,7 @@ class Linux {
static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
static bool release_memory_special_impl(char* base, size_t bytes);
static bool release_memory_special_shm(char* base, size_t bytes);
static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
......
......@@ -753,7 +753,7 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
return mapAddress;
}
......@@ -924,7 +924,7 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;
......
......@@ -74,21 +74,41 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
VMError::report_coredump_status(buffer, success);
}
address os::get_caller_pc(int n) {
int os::get_native_stack(address* stack, int frames, int toSkip) {
#ifdef _NMT_NOINLINE_
n ++;
toSkip++;
#endif
int frame_idx = 0;
int num_of_frames; // number of frames captured
frame fr = os::current_frame();
while (n > 0 && fr.pc() &&
!os::is_first_C_frame(&fr) && fr.sender_pc()) {
fr = os::get_sender_for_C_frame(&fr);
n --;
while (fr.pc() && frame_idx < frames) {
if (toSkip > 0) {
toSkip --;
} else {
stack[frame_idx ++] = fr.pc();
}
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
fr = os::get_sender_for_C_frame(&fr);
} else {
break;
}
}
if (n == 0) {
return fr.pc();
} else {
return NULL;
num_of_frames = frame_idx;
for (; frame_idx < frames; frame_idx ++) {
stack[frame_idx] = NULL;
}
return num_of_frames;
}
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (::unsetenv(name) == 0);
}
int os::get_last_error() {
......
......@@ -770,7 +770,8 @@ static char* mmap_create_shared(size_t size) {
(void)::memset((void*) mapAddress, 0, size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
return mapAddress;
}
......@@ -941,7 +942,8 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
*addr = mapAddress;
*sizep = size;
......
......@@ -131,6 +131,7 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
case DLL_PROCESS_DETACH:
if(ForceTimeHighResolution)
timeEndPeriod(1L);
break;
default:
break;
......@@ -153,6 +154,10 @@ bool os::getenv(const char* name, char* buffer, int len) {
return result > 0 && result < len;
}
bool os::unsetenv(const char* name) {
assert(name != NULL, "Null pointer");
return (SetEnvironmentVariable(name, NULL) == TRUE);
}
// No setuid programs under Windows.
bool os::have_special_privileges() {
......@@ -311,15 +316,17 @@ extern "C" void breakpoint() {
* So far, this method is only used by Native Memory Tracking, which is
* only supported on Windows XP or later.
*/
address os::get_caller_pc(int n) {
int os::get_native_stack(address* stack, int frames, int toSkip) {
#ifdef _NMT_NOINLINE_
n ++;
toSkip ++;
#endif
address pc;
if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) {
return pc;
int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames,
(PVOID*)stack, NULL);
for (int index = captured; index < frames; index ++) {
stack[index] = NULL;
}
return NULL;
return captured;
}
......@@ -2904,7 +2911,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
PAGE_READWRITE);
// If reservation failed, return NULL
if (p_buf == NULL) return NULL;
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
os::release_memory(p_buf, bytes + chunk_size);
// we still need to round up to a page boundary (in case we are using large pages)
......@@ -2970,7 +2977,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
// need to create a dummy 'reserve' record to match
// the release.
MemTracker::record_virtual_memory_reserve((address)p_buf,
bytes_to_release, mtNone, CALLER_PC);
bytes_to_release, CALLER_PC);
os::release_memory(p_buf, bytes_to_release);
}
#ifdef ASSERT
......@@ -2989,11 +2996,10 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
}
// Although the memory is allocated individually, it is returned as one.
// NMT records it as one block.
address pc = CALLER_PC;
if ((flags & MEM_COMMIT) != 0) {
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
} else {
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
}
// made it this far, success
......@@ -3191,8 +3197,7 @@ char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, boo
DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
if (res != NULL) {
address pc = CALLER_PC;
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
}
return res;
......
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -1498,7 +1498,8 @@ static char* mapping_create_shared(size_t size) {
(void)memset(mapAddress, '\0', size);
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress,
size, CURRENT_PC, mtInternal);
return (char*) mapAddress;
}
......@@ -1680,7 +1681,8 @@ static void open_file_mapping(const char* user, int vmid,
}
// it does not go through os api, the operation has to record from here
MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)mapAddress, size,
CURRENT_PC, mtInternal);
*addrp = (char*)mapAddress;
......@@ -1834,10 +1836,14 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
return;
}
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
remove_file_mapping(addr);
// it does not go through os api, the operation has to record from here
tkr.record((address)addr, bytes);
if (MemTracker::tracking_level() > NMT_minimal) {
// it does not go through os api, the operation has to record from here
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
remove_file_mapping(addr);
tkr.record((address)addr, bytes);
} else {
remove_file_mapping(addr);
}
}
char* PerfMemory::backing_store_filename() {
......
......@@ -268,7 +268,7 @@ address CodeBuffer::decode_begin() {
GrowableArray<int>* CodeBuffer::create_patch_overflow() {
if (_overflow_arena == NULL) {
_overflow_arena = new (mtCode) Arena();
_overflow_arena = new (mtCode) Arena(mtCode);
}
return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0);
}
......
......@@ -47,7 +47,7 @@ Compiler::Compiler () {}
void Compiler::init_c1_runtime() {
BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
Arena* arena = new (mtCompiler) Arena();
Arena* arena = new (mtCompiler) Arena(mtCompiler);
Runtime1::initialize(buffer_blob);
FrameMap::initialize();
// initialize data structures
......
......@@ -86,7 +86,8 @@ static bool firstEnv = true;
// ------------------------------------------------------------------
// ciEnv::ciEnv
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
: _ciEnv_arena(mtCompiler) {
VM_ENTRY_MARK;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
......@@ -139,7 +140,7 @@ ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter) {
_the_min_jint_string = NULL;
}
ciEnv::ciEnv(Arena* arena) {
ciEnv::ciEnv(Arena* arena) : _ciEnv_arena(mtCompiler) {
ASSERT_IN_VM;
// Set up ciEnv::current immediately, for the sake of ciObjectFactory, etc.
......
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -112,7 +112,7 @@ void ciObjectFactory::initialize() {
// This Arena is long lived and exists in the resource mark of the
// compiler thread that initializes the initial ciObjectFactory which
// creates the shared ciObjects that all later ciObjectFactories use.
Arena* arena = new (mtCompiler) Arena();
Arena* arena = new (mtCompiler) Arena(mtCompiler);
ciEnv initial(arena);
ciEnv* env = ciEnv::current();
env->_factory->init_shared_objects();
......
......@@ -2780,7 +2780,7 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_b
ClassFileStream* cfs = stream();
u1* current_start = cfs->current();
guarantee_property(attribute_byte_length > sizeof(u2),
guarantee_property(attribute_byte_length >= sizeof(u2),
"Invalid BootstrapMethods attribute length %u in class file %s",
attribute_byte_length,
CHECK);
......@@ -2793,11 +2793,6 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_b
"Short length on BootstrapMethods in class file %s",
CHECK);
guarantee_property(attribute_byte_length >= sizeof(u2),
"Invalid BootstrapMethods attribute length %u in class file %s",
attribute_byte_length,
CHECK);
// The attribute contains a counted array of counted tuples of shorts,
// represending bootstrap specifiers:
// length*{bootstrap_method_index, argument_count*{argument_index}}
......
......@@ -130,15 +130,13 @@ void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_dom
}
bool Dictionary::do_unloading() {
void Dictionary::do_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
bool class_was_unloaded = false;
int index = 0; // Defined here for portability! Do not move
// Remove unloadable entries and classes from system dictionary
// The placeholder array has been handled in always_strong_oops_do.
DictionaryEntry* probe = NULL;
for (index = 0; index < table_size(); index++) {
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
Klass* e = probe->klass();
......@@ -158,16 +156,8 @@ bool Dictionary::do_unloading() {
// Do we need to delete this system dictionary entry?
if (loader_data->is_unloading()) {
// If the loader is not live this entry should always be
// removed (will never be looked up again). Note that this is
// not the same as unloading the referred class.
if (k_def_class_loader_data == loader_data) {
// This is the defining entry, so the referred class is about
// to be unloaded.
class_was_unloaded = true;
}
// Also remove this system dictionary entry.
// removed (will never be looked up again).
purge_entry = true;
} else {
// The loader in this entry is alive. If the klass is dead,
// (determined by checking the defining class loader)
......@@ -196,7 +186,6 @@ bool Dictionary::do_unloading() {
p = probe->next_addr();
}
}
return class_was_unloaded;
}
void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
......
......@@ -108,9 +108,8 @@ public:
return (loader_data->is_the_null_class_loader_data() || !ClassUnloading);
}
// Unload (that is, break root links to) all unmarked classes and
// loaders. Returns "true" iff something was unloaded.
bool do_unloading();
// Unload (that is, break root links to) all unmarked classes and loaders.
void do_unloading();
// Protection domains
Klass* find(int index, unsigned int hash, Symbol* name,
......
......@@ -74,9 +74,9 @@ Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS
void SymbolTable::initialize_symbols(int arena_alloc_size) {
// Initialize the arena for global symbols, size passed in depends on CDS.
if (arena_alloc_size == 0) {
_arena = new (mtSymbol) Arena();
_arena = new (mtSymbol) Arena(mtSymbol);
} else {
_arena = new (mtSymbol) Arena(arena_alloc_size);
_arena = new (mtSymbol) Arena(mtSymbol, arena_alloc_size);
}
}
......
......@@ -1662,10 +1662,9 @@ public:
// Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
// First, mark for unload all ClassLoaderData referencing a dead class loader.
bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive);
bool unloading_occurred = false;
if (has_dead_loaders) {
unloading_occurred = dictionary()->do_unloading();
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
if (unloading_occurred) {
dictionary()->do_unloading();
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
}
......
/*
* Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -53,7 +53,8 @@ void ConcurrentMarkSweepPolicy::initialize_alignments() {
}
void ConcurrentMarkSweepPolicy::initialize_generations() {
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_generations == NULL)
vm_exit_during_initialization("Unable to allocate gen spec");
......
......@@ -2336,25 +2336,6 @@ size_t G1CollectedHeap::recalculate_used() const {
return blk.result();
}
size_t G1CollectedHeap::unsafe_max_alloc() {
if (num_free_regions() > 0) return HeapRegion::GrainBytes;
// otherwise, is there space in the current allocation region?
// We need to store the current allocation region in a local variable
// here. The problem is that this method doesn't take any locks and
// there may be other threads which overwrite the current allocation
// region field. attempt_allocation(), for example, sets it to NULL
// and this can happen *after* the NULL check here but before the call
// to free(), resulting in a SIGSEGV. Note that this doesn't appear
// to be a problem in the optimized build, since the two loads of the
// current allocation region field are optimized away.
HeapRegion* hr = _mutator_alloc_region.get();
if (hr == NULL) {
return 0;
}
return hr->free();
}
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
switch (cause) {
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
......@@ -2531,7 +2512,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
}
}
} else {
if (cause == GCCause::_gc_locker
if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause. We're setting word_size
......
......@@ -1170,15 +1170,6 @@ public:
// end fields defining the extent of the contiguous allocation region.)
// But G1CollectedHeap doesn't yet support this.
// Return an estimate of the maximum allocation that could be performed
// without triggering any collection or expansion activity. In a
// generational collector, for example, this is probably the largest
// allocation that could be supported (without expansion) in the youngest
// generation. It is "unsafe" because no locks are taken; the result
// should be treated as an approximation, not a guarantee, for use in
// heuristic resizing decisions.
virtual size_t unsafe_max_alloc();
virtual bool is_maximal_no_gc() const {
return _hrs.available() == 0;
}
......
......@@ -288,7 +288,7 @@ OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
}
_fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
mtGC, 0, AllocFailStrategy::RETURN_NULL);
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_fine_grain_regions == NULL) {
vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
......
......@@ -485,10 +485,6 @@ void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
young_gen()->eden_space()->ensure_parsability();
}
size_t ParallelScavengeHeap::unsafe_max_alloc() {
return young_gen()->eden_space()->free_in_bytes();
}
size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
return young_gen()->eden_space()->tlab_capacity(thr);
}
......
......@@ -184,8 +184,6 @@ class ParallelScavengeHeap : public CollectedHeap {
void accumulate_statistics_all_tlabs();
void resize_all_tlabs();
size_t unsafe_max_alloc();
bool supports_tlab_allocation() const { return true; }
size_t tlab_capacity(Thread* thr) const;
......
......@@ -70,7 +70,7 @@ void VM_ParallelGCSystemGC::doit() {
"must be a ParallelScavengeHeap");
GCCauseSetter gccs(heap, _gc_cause);
if (_gc_cause == GCCause::_gc_locker
if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
// If (and only if) the scavenge fails, this will invoke a full gc.
heap->invoke_scavenge();
......
......@@ -395,15 +395,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// allocation from them and necessitating allocation of new TLABs.
virtual void ensure_parsability(bool retire_tlabs);
// Return an estimate of the maximum allocation that could be performed
// without triggering any collection or expansion activity. In a
// generational collector, for example, this is probably the largest
// allocation that could be supported (without expansion) in the youngest
// generation. It is "unsafe" because no locks are taken; the result
// should be treated as an approximation, not a guarantee, for use in
// heuristic resizing decisions.
virtual size_t unsafe_max_alloc() = 0;
// Section on thread-local allocation buffers (TLABs)
// If the heap supports thread-local allocation buffers, it should override
// the following methods:
......
......@@ -51,6 +51,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _heap_dump:
return "Heap Dump Initiated GC";
case _wb_young_gc:
return "WhiteBox Initiated Young GC";
case _no_gc:
return "No GC";
......
......@@ -46,6 +46,7 @@ class GCCause : public AllStatic {
_gc_locker,
_heap_inspection,
_heap_dump,
_wb_young_gc,
/* implementation independent, but reserved for GC use */
_no_gc,
......
......@@ -438,24 +438,22 @@ void Chunk::start_chunk_pool_cleaner_task() {
}
//------------------------------Arena------------------------------------------
NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
Arena::Arena(size_t init_size) {
Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
size_t round_size = (sizeof (char *)) - 1;
init_size = (init_size+round_size) & ~round_size;
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
MemTracker::record_new_arena(flag);
set_size_in_bytes(init_size);
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}
Arena::Arena() {
Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
MemTracker::record_new_arena(flag);
set_size_in_bytes(Chunk::init_size);
NOT_PRODUCT(Atomic::inc(&_instance_count);)
}
Arena *Arena::move_contents(Arena *copy) {
......@@ -477,7 +475,7 @@ Arena *Arena::move_contents(Arena *copy) {
Arena::~Arena() {
destruct_contents();
NOT_PRODUCT(Atomic::dec(&_instance_count);)
MemTracker::record_arena_free(_flags);
}
void* Arena::operator new(size_t size) throw() {
......@@ -493,21 +491,21 @@ void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant)
// dynamic memory type binding
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
return p;
#else
return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
return (void *) AllocateHeap(size, flags, CALLER_PC);
#endif
}
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
#ifdef ASSERT
void* p = os::malloc(size, flags|otArena, CALLER_PC);
void* p = os::malloc(size, flags, CALLER_PC);
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
return p;
#else
return os::malloc(size, flags|otArena, CALLER_PC);
return os::malloc(size, flags, CALLER_PC);
#endif
}
......@@ -532,8 +530,9 @@ void Arena::destruct_contents() {
// change the size
void Arena::set_size_in_bytes(size_t size) {
if (_size_in_bytes != size) {
long delta = (long)(size - size_in_bytes());
_size_in_bytes = size;
MemTracker::record_arena_size((address)this, size);
MemTracker::record_arena_size_change(delta, _flags);
}
}
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -133,51 +133,34 @@ class AllocatedObj {
/*
* MemoryType bitmap layout:
* | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
* | memory type | object | reserved |
* | | type | |
* Memory types
*/
enum MemoryType {
// Memory type by sub systems. It occupies lower byte.
mtNone = 0x0000, // undefined
mtClass = 0x0100, // memory class for Java classes
mtThread = 0x0200, // memory for thread objects
mtThreadStack = 0x0300,
mtCode = 0x0400, // memory for generated code
mtGC = 0x0500, // memory for GC
mtCompiler = 0x0600, // memory for compiler
mtInternal = 0x0700, // memory used by VM, but does not belong to
mtJavaHeap = 0x00, // Java heap
mtClass = 0x01, // memory class for Java classes
mtThread = 0x02, // memory for thread objects
mtThreadStack = 0x03,
mtCode = 0x04, // memory for generated code
mtGC = 0x05, // memory for GC
mtCompiler = 0x06, // memory for compiler
mtInternal = 0x07, // memory used by VM, but does not belong to
// any of above categories, and not used for
// native memory tracking
mtOther = 0x0800, // memory not used by VM
mtSymbol = 0x0900, // symbol
mtNMT = 0x0A00, // memory used by native memory tracking
mtChunk = 0x0B00, // chunk that holds content of arenas
mtJavaHeap = 0x0C00, // Java heap
mtClassShared = 0x0D00, // class data sharing
mtTest = 0x0E00, // Test type for verifying NMT
mtTracing = 0x0F00, // memory used for Tracing
mt_number_of_types = 0x000F, // number of memory types (mtDontTrack
mtOther = 0x08, // memory not used by VM
mtSymbol = 0x09, // symbol
mtNMT = 0x0A, // memory used by native memory tracking
mtClassShared = 0x0B, // class data sharing
mtChunk = 0x0C, // chunk that holds content of arenas
mtTest = 0x0D, // Test type for verifying NMT
mtTracing = 0x0E, // memory used for Tracing
mtNone = 0x0F, // undefined
mt_number_of_types = 0x10 // number of memory types (mtDontTrack
// is not included as validate type)
mtDontTrack = 0x0F00, // memory we do not or cannot track
mt_masks = 0x7F00,
// object type mask
otArena = 0x0010, // an arena object
otNMTRecorder = 0x0020, // memory recorder object
ot_masks = 0x00F0
};
#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
#define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena)
#define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder)
#define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
typedef MemoryType MEMFLAGS;
typedef unsigned short MEMFLAGS;
#if INCLUDE_NMT
......@@ -189,27 +172,23 @@ const bool NMT_track_callsite = false;
#endif // INCLUDE_NMT
// debug build does not inline
#if defined(_NMT_NOINLINE_)
#define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
#else
#define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0)
#define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
#define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#endif
class NativeCallStack;
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public:
_NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
_NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new(size_t size) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
address caller_pc = 0) throw();
_NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
throw();
_NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
address caller_pc = 0) throw();
const NativeCallStack& stack) throw();
_NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
throw();
void operator delete(void* p);
void operator delete [] (void* p);
};
......@@ -384,13 +363,15 @@ class Chunk: CHeapObj<mtChunk> {
//------------------------------Arena------------------------------------------
// Fast allocation of memory
class Arena : public CHeapObj<mtNone|otArena> {
class Arena : public CHeapObj<mtNone> {
protected:
friend class ResourceMark;
friend class HandleMark;
friend class NoHandleMark;
friend class VMStructs;
MEMFLAGS _flags; // Memory tracking flags
Chunk *_first; // First chunk
Chunk *_chunk; // current chunk
char *_hwm, *_max; // High water mark and max in current chunk
......@@ -418,8 +399,8 @@ protected:
}
public:
Arena();
Arena(size_t init_size);
Arena(MEMFLAGS memflag);
Arena(MEMFLAGS memflag, size_t init_size);
~Arena();
void destruct_contents();
char* hwm() const { return _hwm; }
......@@ -518,8 +499,6 @@ protected:
static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
static void free_all(char** start, char** end) PRODUCT_RETURN;
// how many arena instances
NOT_PRODUCT(static volatile jint _instance_count;)
private:
// Reset this Arena to empty, access will trigger grow if necessary
void reset(void) {
......@@ -681,7 +660,7 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL)
NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
(type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -27,6 +27,7 @@
#include "runtime/atomic.inline.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
// Explicit C-heap memory management
......@@ -49,12 +50,10 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) {
#endif
// allocate using malloc; will fail if no memory available
inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
const NativeCallStack& stack,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
if (pc == 0) {
pc = CURRENT_PC;
}
char* p = (char*) os::malloc(size, flags, pc);
char* p = (char*) os::malloc(size, flags, stack);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
#endif
......@@ -63,10 +62,14 @@ inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
}
return p;
}
inline char* AllocateHeap(size_t size, MEMFLAGS flags,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
}
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
#endif
......@@ -85,32 +88,51 @@ inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
const NativeCallStack& stack) throw() {
void* p = (void*)AllocateHeap(size, F, stack);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
return p;
}
return p;
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
return CHeapObj<F>::operator new(size, CALLER_PC);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
void* p = (void*)AllocateHeap(size, F, stack,
AllocFailStrategy::RETURN_NULL);
#ifdef ASSERT
if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
#endif
return p;
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
const std::nothrow_t& nothrow_constant) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
const NativeCallStack& stack) throw() {
return CHeapObj<F>::operator new(size, stack);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
throw() {
return CHeapObj<F>::operator new(size, CALLER_PC);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
address caller_pc) throw() {
return CHeapObj<F>::operator new(size, caller_pc);
const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, stack);
}
template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
const std::nothrow_t& nothrow_constant) throw() {
return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
}
template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
......
......@@ -56,7 +56,7 @@ CardTableRS::CardTableRS(MemRegion whole_heap,
_ct_bs->initialize();
set_bs(_ct_bs);
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
mtGC, 0, AllocFailStrategy::RETURN_NULL);
mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_last_cur_val_in_gen == NULL) {
vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
}
......
......@@ -969,7 +969,8 @@ void MarkSweepPolicy::initialize_alignments() {
}
void MarkSweepPolicy::initialize_generations() {
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
_generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
if (_generations == NULL) {
vm_exit_during_initialization("Unable to allocate gen spec");
}
......
......@@ -704,10 +704,6 @@ HeapWord** GenCollectedHeap::end_addr() const {
return _gens[0]->end_addr();
}
size_t GenCollectedHeap::unsafe_max_alloc() {
return _gens[0]->unsafe_max_alloc_nogc();
}
// public collection interfaces
void GenCollectedHeap::collect(GCCause::Cause cause) {
......@@ -718,15 +714,18 @@ void GenCollectedHeap::collect(GCCause::Cause cause) {
#else // INCLUDE_ALL_GCS
ShouldNotReachHere();
#endif // INCLUDE_ALL_GCS
} else if (cause == GCCause::_wb_young_gc) {
// minor collection for WhiteBox API
collect(cause, 0);
} else {
#ifdef ASSERT
if (cause == GCCause::_scavenge_alot) {
// minor collection only
collect(cause, 0);
} else {
// Stop-the-world full collection
collect(cause, n_gens() - 1);
}
if (cause == GCCause::_scavenge_alot) {
// minor collection only
collect(cause, 0);
} else {
// Stop-the-world full collection
collect(cause, n_gens() - 1);
}
#else
// Stop-the-world full collection
collect(cause, n_gens() - 1);
......
......@@ -166,14 +166,6 @@ public:
HeapWord** top_addr() const;
HeapWord** end_addr() const;
// Return an estimate of the maximum allocation that could be performed
// without triggering any collection activity. In a generational
// collector, for example, this is probably the largest allocation that
// could be supported in the youngest generation. It is "unsafe" because
// no locks are taken; the result should be treated as an approximation,
// not a guarantee.
size_t unsafe_max_alloc();
// Does this heap support heap inspection? (+PrintClassHistogram)
virtual bool supports_heap_inspection() const { return true; }
......
......@@ -135,7 +135,7 @@ KlassInfoTable::KlassInfoTable(bool need_class_stats) {
_ref = (HeapWord*) Universe::boolArrayKlassObj();
_buckets =
(KlassInfoBucket*) AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
mtInternal, 0, AllocFailStrategy::RETURN_NULL);
mtInternal, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
if (_buckets != NULL) {
_size = _num_buckets;
for (int index = 0; index < _size; index++) {
......
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -103,11 +103,13 @@ MemRegion MemRegion::minus(const MemRegion mr2) const {
}
void* MemRegion::operator new(size_t size) throw() {
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
}
void* MemRegion::operator new [](size_t size) throw() {
return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
return (address)AllocateHeap(size, mtGC, CURRENT_PC,
AllocFailStrategy::RETURN_NULL);
}
void MemRegion::operator delete(void* p) {
FreeHeap(p, mtGC);
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -49,11 +49,11 @@ class ResourceArea: public Arena {
debug_only(static int _warned;) // to suppress multiple warnings
public:
ResourceArea() {
ResourceArea() : Arena(mtThread) {
debug_only(_nesting = 0;)
}
ResourceArea(size_t init_size) : Arena(init_size) {
ResourceArea(size_t init_size) : Arena(mtThread, init_size) {
debug_only(_nesting = 0;);
}
......@@ -64,7 +64,7 @@ public:
if (UseMallocOnly) {
// use malloc, but save pointer in res. area for later freeing
char** save = (char**)internal_malloc_4(sizeof(char*));
return (*save = (char*)os::malloc(size, mtThread));
return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
}
#endif
return (char*)Amalloc(size, alloc_failmode);
......
......@@ -665,6 +665,10 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
_printer(IdealGraphPrinter::printer()),
#endif
_congraph(NULL),
_comp_arena(mtCompiler),
_node_arena(mtCompiler),
_old_arena(mtCompiler),
_Compile_types(mtCompiler),
_replay_inline_data(NULL),
_late_inlines(comp_arena(), 2, 0, NULL),
_string_late_inlines(comp_arena(), 2, 0, NULL),
......@@ -972,6 +976,10 @@ Compile::Compile( ciEnv* ci_env,
_in_dump_cnt(0),
_printer(NULL),
#endif
_comp_arena(mtCompiler),
_node_arena(mtCompiler),
_old_arena(mtCompiler),
_Compile_types(mtCompiler),
_dead_node_list(comp_arena()),
_dead_node_count(0),
_congraph(NULL),
......
......@@ -265,7 +265,7 @@ void Type::Initialize_shared(Compile* current) {
// locking.
Arena* save = current->type_arena();
Arena* shared_type_arena = new (mtCompiler)Arena();
Arena* shared_type_arena = new (mtCompiler)Arena(mtCompiler);
current->set_type_arena(shared_type_arena);
_shared_type_dict =
......
/*
* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -220,10 +220,17 @@
# include "runtime/vmThread.hpp"
# include "runtime/vm_operations.hpp"
# include "runtime/vm_version.hpp"
# include "services/allocationSite.hpp"
# include "services/lowMemoryDetector.hpp"
# include "services/mallocTracker.hpp"
# include "services/memBaseline.hpp"
# include "services/memoryPool.hpp"
# include "services/memoryService.hpp"
# include "services/memoryUsage.hpp"
# include "services/memReporter.hpp"
# include "services/memTracker.hpp"
# include "services/nmtCommon.hpp"
# include "services/virtualMemoryTracker.hpp"
# include "utilities/accessFlags.hpp"
# include "utilities/array.hpp"
# include "utilities/bitMap.hpp"
......@@ -237,6 +244,7 @@
# include "utilities/hashtable.hpp"
# include "utilities/histogram.hpp"
# include "utilities/macros.hpp"
# include "utilities/nativeCallStack.hpp"
# include "utilities/numberSeq.hpp"
# include "utilities/ostream.hpp"
# include "utilities/preserveException.hpp"
......
......@@ -73,6 +73,7 @@
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "trace/tracing.hpp"
#include "utilities/defaultStream.hpp"
......@@ -3582,6 +3583,7 @@ static char* get_bad_address() {
if (bad_address != NULL) {
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
/*is_committed*/false);
MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal);
}
}
return bad_address;
......@@ -5077,6 +5079,7 @@ void TestMetachunk_test();
void TestVirtualSpaceNode_test();
void TestNewSize_test();
void TestKlass_test();
void Test_linked_list();
#if INCLUDE_ALL_GCS
void TestOldFreeSpaceCalculation_test();
void TestG1BiasedArray_test();
......@@ -5104,6 +5107,7 @@ void execute_internal_vm_tests() {
run_unit_test(test_loggc_filename());
run_unit_test(TestNewSize_test());
run_unit_test(TestKlass_test());
run_unit_test(Test_linked_list());
#if INCLUDE_VM_STRUCTS
run_unit_test(VMStructs::test());
#endif
......
......@@ -43,13 +43,16 @@
#include "utilities/exceptions.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef INCLUDE_NMT
#if INCLUDE_NMT
#include "services/mallocSiteTable.hpp"
#include "services/memTracker.hpp"
#include "utilities/nativeCallStack.hpp"
#endif // INCLUDE_NMT
#include "compiler/compileBroker.hpp"
......@@ -221,6 +224,30 @@ WB_ENTRY(jint, WB_StressVirtualSpaceResize(JNIEnv* env, jobject o,
(size_t) magnitude, (size_t) iterations);
WB_END
WB_ENTRY(jboolean, WB_isObjectInOldGen(JNIEnv* env, jobject o, jobject obj))
oop p = JNIHandles::resolve(obj);
#if INCLUDE_ALL_GCS
if (UseG1GC) {
G1CollectedHeap* g1 = G1CollectedHeap::heap();
const HeapRegion* hr = g1->heap_region_containing(p);
if (hr == NULL) {
return false;
}
return !(hr->is_young());
} else if (UseParallelGC) {
ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
return !psh->is_in_young(p);
}
#endif // INCLUDE_ALL_GCS
GenCollectedHeap* gch = GenCollectedHeap::heap();
return !gch->is_in_young(p);
WB_END
WB_ENTRY(jlong, WB_GetObjectSize(JNIEnv* env, jobject o, jobject obj))
oop p = JNIHandles::resolve(obj);
return p->size() * HeapWordSize;
WB_END
#if INCLUDE_ALL_GCS
WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
G1CollectedHeap* g1 = G1CollectedHeap::heap();
......@@ -251,14 +278,18 @@ WB_END
// NMT picks it up correctly
WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
}
return addr;
WB_END
// Alloc memory with pseudo call stack. The test can create psudo malloc
// allocation site to stress the malloc tracking.
WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, jint pseudo_stack))
address pc = (address)(size_t)pseudo_stack;
NativeCallStack stack(&pc, 1);
return (jlong)os::malloc(size, mtTest, stack);
WB_END
// Free the memory allocated by NMTAllocTest
WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
os::free((void*)(uintptr_t)mem, mtTest);
......@@ -267,10 +298,8 @@ WB_END
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
jlong addr = 0;
if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
addr = (jlong)(uintptr_t)os::reserve_memory(size);
MemTracker::record_virtual_memory_type((address)addr, mtTest);
}
return addr;
WB_END
......@@ -289,19 +318,19 @@ WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong siz
os::release_memory((char *)(uintptr_t)addr, size);
WB_END
// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
return MemTracker::tracking_level() == NMT_detail;
WB_END
if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
return false;
WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
address pc = (address)1;
for (jlong index = 0; index < num; index ++) {
NativeCallStack stack(&pc, 1);
os::malloc(0, mtTest, stack);
pc += MallocSiteTable::hash_buckets();
}
return MemTracker::wbtest_wait_for_data_merge();
WB_END
WB_ENTRY(jboolean, WB_NMTIsDetailSupported(JNIEnv* env))
return MemTracker::tracking_level() == MemTracker::NMT_detail;
WB_END
#endif // INCLUDE_NMT
......@@ -668,6 +697,9 @@ WB_ENTRY(void, WB_FullGC(JNIEnv* env, jobject o))
Universe::heap()->collect(GCCause::_last_ditch_collection);
WB_END
WB_ENTRY(void, WB_YoungGC(JNIEnv* env, jobject o))
Universe::heap()->collect(GCCause::_wb_young_gc);
WB_END
WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
// static+volatile in order to force the read to happen
......@@ -811,6 +843,8 @@ bool WhiteBox::lookup_bool(const char* field_name, oop object) {
static JNINativeMethod methods[] = {
{CC"getObjectAddress", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectAddress },
{CC"getObjectSize", CC"(Ljava/lang/Object;)J", (void*)&WB_GetObjectSize },
{CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen },
{CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize },
{CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive },
{CC"parseCommandLine",
......@@ -831,12 +865,13 @@ static JNINativeMethod methods[] = {
#endif // INCLUDE_ALL_GCS
#if INCLUDE_NMT
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
{CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack},
{CC"NMTFree", CC"(J)V", (void*)&WB_NMTFree },
{CC"NMTReserveMemory", CC"(J)J", (void*)&WB_NMTReserveMemory },
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge},
{CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
#endif // INCLUDE_NMT
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
......@@ -885,6 +920,7 @@ static JNINativeMethod methods[] = {
(void*)&WB_GetStringVMFlag},
{CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable },
{CC"fullGC", CC"()V", (void*)&WB_FullGC },
{CC"youngGC", CC"()V", (void*)&WB_YoungGC },
{CC"readReservedMemory", CC"()V", (void*)&WB_ReadReservedMemory },
{CC"allocateMetaspace",
CC"(Ljava/lang/ClassLoader;J)J", (void*)&WB_AllocateMetaspace },
......
......@@ -294,6 +294,7 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "UseMPSS", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseStringCache", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "UseOldInlining", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
#ifdef PRODUCT
{ "DesiredMethodLimit",
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
......@@ -2343,7 +2344,7 @@ bool Arguments::check_vm_args_consistency() {
if (PrintNMTStatistics) {
#if INCLUDE_NMT
if (MemTracker::tracking_level() == MemTracker::NMT_off) {
if (MemTracker::tracking_level() == NMT_off) {
#endif // INCLUDE_NMT
warning("PrintNMTStatistics is disabled, because native memory tracking is not enabled");
PrintNMTStatistics = false;
......@@ -3533,15 +3534,24 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
CommandLineFlags::printFlags(tty, false);
vm_exit(0);
}
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
#if INCLUDE_NMT
MemTracker::init_tracking_options(tail);
#else
jio_fprintf(defaultStream::error_stream(),
"Native Memory Tracking is not supported in this VM\n");
return JNI_ERR;
#endif
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
// The launcher did not setup nmt environment variable properly.
// if (!MemTracker::check_launcher_nmt_support(tail)) {
// warning("Native Memory Tracking did not setup properly, using wrong launcher?");
// }
// Verify if nmt option is valid.
if (MemTracker::verify_nmt_option()) {
// Late initialization, still in single-threaded mode.
if (MemTracker::tracking_level() >= NMT_summary) {
MemTracker::init();
}
} else {
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
}
#endif
#ifndef PRODUCT
......
......@@ -943,11 +943,6 @@ class CommandLineFlags {
diagnostic(bool, PrintNMTStatistics, false, \
"Print native memory tracking summary data if it is on") \
\
diagnostic(bool, AutoShutdownNMT, true, \
"Automatically shutdown native memory tracking under stress " \
"situations. When set to false, native memory tracking tries to " \
"stay alive at the expense of JVM performance") \
\
diagnostic(bool, LogCompilation, false, \
"Log compilation activity in detail to LogFile") \
\
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -227,7 +227,7 @@ class HandleArea: public Arena {
HandleArea* _prev; // link to outer (older) area
public:
// Constructor
HandleArea(HandleArea* prev) : Arena(Chunk::tiny_size) {
HandleArea(HandleArea* prev) : Arena(mtThread, Chunk::tiny_size) {
debug_only(_handle_mark_nesting = 0);
debug_only(_no_handle_mark_nesting = 0);
_prev = prev;
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -34,8 +34,10 @@
#include "runtime/init.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/sharedRuntime.hpp"
#include "services/memTracker.hpp"
#include "utilities/macros.hpp"
// Initialization done by VM thread in vm_init_globals()
void check_ThreadShadow();
void eventlog_init();
......@@ -131,6 +133,12 @@ jint init_globals() {
javaClasses_init(); // must happen after vtable initialization
stubRoutines_init2(); // note: StubRoutines need 2-phase init
#if INCLUDE_NMT
// Solaris stack is walkable only after stubRoutines are set up.
// On Other platforms, the stack is always walkable.
NMT_stack_walkable = true;
#endif // INCLUDE_NMT
// All the flags that get adjusted by VM_Version_init and os::init_2
// have been set so dump the flags now.
if (PrintFlagsFinal) {
......
......@@ -57,7 +57,6 @@
#include "runtime/thread.inline.hpp"
#include "runtime/timer.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memReporter.hpp"
#include "services/memTracker.hpp"
#include "trace/tracing.hpp"
#include "utilities/dtrace.hpp"
......@@ -364,12 +363,7 @@ void print_statistics() {
#endif // ENABLE_ZAP_DEAD_LOCALS
// Native memory tracking data
if (PrintNMTStatistics) {
if (MemTracker::is_on()) {
BaselineTTYOutputer outputer(tty);
MemTracker::print_memory_usage(outputer, K, false);
} else {
tty->print_cr("%s", MemTracker::reason());
}
MemTracker::final_report(tty);
}
}
......@@ -401,12 +395,7 @@ void print_statistics() {
// Native memory tracking data
if (PrintNMTStatistics) {
if (MemTracker::is_on()) {
BaselineTTYOutputer outputer(tty);
MemTracker::print_memory_usage(outputer, K, false);
} else {
tty->print_cr("%s", MemTracker::reason());
}
MemTracker::final_report(tty);
}
}
......@@ -555,10 +544,6 @@ void before_exit(JavaThread * thread) {
BeforeExit_lock->notify_all();
}
// Shutdown NMT before exit. Otherwise,
// it will run into trouble when system destroys static variables.
MemTracker::shutdown(MemTracker::NMT_normal);
if (VerifyStringTableAtExit) {
int fail_cnt = 0;
{
......
......@@ -49,6 +49,7 @@
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.inline.hpp"
#include "services/attachListener.hpp"
#include "services/nmtCommon.hpp"
#include "services/memTracker.hpp"
#include "services/threadService.hpp"
#include "utilities/defaultStream.hpp"
......@@ -561,7 +562,11 @@ static u_char* testMalloc(size_t alloc_size) {
return ptr;
}
void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
void* os::malloc(size_t size, MEMFLAGS flags) {
return os::malloc(size, flags, CALLER_PC);
}
void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
......@@ -587,11 +592,15 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
size = 1;
}
// NMT support
NMT_TrackingLevel level = MemTracker::tracking_level();
size_t nmt_header_size = MemTracker::malloc_header_size(level);
#ifndef ASSERT
const size_t alloc_size = size;
const size_t alloc_size = size + nmt_header_size;
#else
const size_t alloc_size = GuardedMemory::get_total_size(size);
if (size > alloc_size) { // Check for rollover.
const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
if (size + nmt_header_size > alloc_size) { // Check for rollover.
return NULL;
}
#endif
......@@ -610,7 +619,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
return NULL;
}
// Wrap memory with guard
GuardedMemory guarded(ptr, size);
GuardedMemory guarded(ptr, size + nmt_header_size);
ptr = guarded.get_user_ptr();
#endif
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
......@@ -623,48 +632,50 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
}
// we do not track guard memory
MemTracker::record_malloc((address)ptr, size, memflags, caller == 0 ? CALLER_PC : caller);
return ptr;
return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
return os::realloc(memblock, size, flags, CALLER_PC);
}
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
#ifndef ASSERT
NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
void* ptr = ::realloc(memblock, size);
if (ptr != NULL) {
tkr.record((address)memblock, (address)ptr, size, memflags,
caller == 0 ? CALLER_PC : caller);
} else {
tkr.discard();
}
return ptr;
// NMT support
void* membase = MemTracker::record_free(memblock);
NMT_TrackingLevel level = MemTracker::tracking_level();
size_t nmt_header_size = MemTracker::malloc_header_size(level);
void* ptr = ::realloc(membase, size + nmt_header_size);
return MemTracker::record_malloc(ptr, size, memflags, stack, level);
#else
if (memblock == NULL) {
return os::malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
return os::malloc(size, memflags, stack);
}
if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
breakpoint();
}
verify_memory(memblock);
// NMT support
void* membase = MemTracker::malloc_base(memblock);
verify_memory(membase);
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
if (size == 0) {
return NULL;
}
// always move the block
void* ptr = os::malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
void* ptr = os::malloc(size, memflags, stack);
if (PrintMalloc) {
tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
}
// Copy to new memory if malloc didn't fail
if ( ptr != NULL ) {
GuardedMemory guarded(memblock);
memcpy(ptr, memblock, MIN2(size, guarded.get_user_size()));
if (paranoid) verify_memory(ptr);
GuardedMemory guarded(MemTracker::malloc_base(memblock));
// Guard's user data contains NMT header
size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
memcpy(ptr, memblock, MIN2(size, memblock_size));
if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
breakpoint();
......@@ -677,7 +688,6 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
void os::free(void *memblock, MEMFLAGS memflags) {
address trackp = (address) memblock;
NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
#ifdef ASSERT
if (memblock == NULL) return;
......@@ -685,20 +695,22 @@ void os::free(void *memblock, MEMFLAGS memflags) {
if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
breakpoint();
}
verify_memory(memblock);
void* membase = MemTracker::record_free(memblock);
verify_memory(membase);
NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
GuardedMemory guarded(memblock);
GuardedMemory guarded(membase);
size_t size = guarded.get_user_size();
inc_stat_counter(&free_bytes, size);
memblock = guarded.release_for_freeing();
membase = guarded.release_for_freeing();
if (PrintMalloc && tty != NULL) {
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase);
}
::free(membase);
#else
void* membase = MemTracker::record_free(memblock);
::free(membase);
#endif
MemTracker::record_free(trackp, memflags);
::free(memblock);
}
void os::init_random(long initval) {
......@@ -1412,7 +1424,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
}
return result;
......@@ -1422,7 +1434,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
MEMFLAGS flags) {
char* result = pd_reserve_memory(bytes, addr, alignment_hint);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
MemTracker::record_virtual_memory_type((address)result, flags);
}
......@@ -1432,7 +1444,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
char* result = pd_attempt_reserve_memory_at(bytes, addr);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
}
return result;
}
......@@ -1472,23 +1484,29 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
}
bool os::uncommit_memory(char* addr, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
bool res = pd_uncommit_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
res = pd_uncommit_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
}
} else {
tkr.discard();
res = pd_uncommit_memory(addr, bytes);
}
return res;
}
bool os::release_memory(char* addr, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool res = pd_release_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
res = pd_release_memory(addr, bytes);
if (res) {
tkr.record((address)addr, bytes);
}
} else {
tkr.discard();
res = pd_release_memory(addr, bytes);
}
return res;
}
......@@ -1499,7 +1517,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
bool allow_exec) {
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
if (result != NULL) {
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
}
return result;
}
......@@ -1512,12 +1530,15 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
}
bool os::unmap_memory(char *addr, size_t bytes) {
MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
bool result = pd_unmap_memory(addr, bytes);
if (result) {
tkr.record((address)addr, bytes);
bool result;
if (MemTracker::tracking_level() > NMT_minimal) {
Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
result = pd_unmap_memory(addr, bytes);
if (result) {
tkr.record((address)addr, bytes);
}
} else {
tkr.discard();
result = pd_unmap_memory(addr, bytes);
}
return result;
}
......
......@@ -66,6 +66,8 @@ class JavaThread;
class Event;
class DLL;
class FileHandle;
class NativeCallStack;
template<class E> class GrowableArray;
// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
......@@ -97,9 +99,11 @@ const bool ExecMem = true;
// Typedef for structured exception handling support
typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
class MallocTracker;
class os: AllStatic {
friend class VMStructs;
friend class MallocTracker;
public:
enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
......@@ -161,7 +165,10 @@ class os: AllStatic {
// Override me as needed
static int file_name_strcmp(const char* s1, const char* s2);
// get/unset environment variable
static bool getenv(const char* name, char* buffer, int len);
static bool unsetenv(const char* name);
static bool have_special_privileges();
static jlong javaTimeMillis();
......@@ -207,8 +214,13 @@ class os: AllStatic {
// Interface for detecting multiprocessor system
static inline bool is_MP() {
#if !INCLUDE_NMT
assert(_processor_count > 0, "invalid processor count");
return _processor_count > 1 || AssumeMP;
#else
// NMT needs atomic operations before this initialization.
return true;
#endif
}
static julong available_memory();
static julong physical_memory();
......@@ -651,12 +663,20 @@ class os: AllStatic {
static void* thread_local_storage_at(int index);
static void free_thread_local_storage(int index);
// Stack walk
static address get_caller_pc(int n = 0);
// Retrieve native stack frames.
// Parameter:
// stack: an array to storage stack pointers.
// frames: size of above array.
// toSkip: number of stack frames to skip at the beginning.
// Return: number of stack frames captured.
static int get_native_stack(address* stack, int size, int toSkip = 0);
// General allocation (must be MT-safe)
static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0);
static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0);
static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
static void* malloc (size_t size, MEMFLAGS flags);
static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
static void free (void *memblock, MEMFLAGS flags = mtNone);
static bool check_heap(bool force = false); // verify C heap integrity
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
......
......@@ -50,7 +50,6 @@
#include "runtime/sweeper.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
......@@ -547,10 +546,6 @@ void SafepointSynchronize::do_cleanup_tasks() {
TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
ClassLoaderDataGraph::purge_if_needed();
}
if (MemTracker::is_on()) {
MemTracker::sync();
}
}
......
......@@ -331,8 +331,7 @@ void Thread::record_stack_base_and_size() {
#if INCLUDE_NMT
// record thread's native stack, stack grows downward
address stack_low_addr = stack_base() - stack_size();
MemTracker::record_thread_stack(stack_low_addr, stack_size(), this,
CURRENT_PC);
MemTracker::record_thread_stack(stack_low_addr, stack_size());
#endif // INCLUDE_NMT
}
......@@ -350,7 +349,7 @@ Thread::~Thread() {
#if INCLUDE_NMT
if (_stack_base != NULL) {
address low_stack_addr = stack_base() - stack_size();
MemTracker::release_thread_stack(low_stack_addr, stack_size(), this);
MemTracker::release_thread_stack(low_stack_addr, stack_size());
#ifdef ASSERT
set_stack_base(NULL);
#endif
......@@ -1442,9 +1441,6 @@ void JavaThread::initialize() {
set_monitor_chunks(NULL);
set_next(NULL);
set_thread_state(_thread_new);
#if INCLUDE_NMT
set_recorder(NULL);
#endif
_terminated = _not_terminated;
_privileged_stack_top = NULL;
_array_for_gc = NULL;
......@@ -1519,7 +1515,6 @@ JavaThread::JavaThread(bool is_attaching_via_jni) :
_jni_attach_state = _not_attaching_via_jni;
}
assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
_safepoint_visible = false;
}
bool JavaThread::reguard_stack(address cur_sp) {
......@@ -1582,7 +1577,6 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
thr_type = entry_point == &compiler_thread_entry ? os::compiler_thread :
os::java_thread;
os::create_thread(this, thr_type, stack_sz);
_safepoint_visible = false;
// The _osthread may be NULL here because we ran out of memory (too many threads active).
// We need to throw and OutOfMemoryError - however we cannot do this here because the caller
// may hold a lock and all locks must be unlocked before throwing the exception (throwing
......@@ -1600,13 +1594,6 @@ JavaThread::~JavaThread() {
tty->print_cr("terminate thread %p", this);
}
// By now, this thread should already be invisible to safepoint,
// and its per-thread recorder also collected.
assert(!is_safepoint_visible(), "wrong state");
#if INCLUDE_NMT
assert(get_recorder() == NULL, "Already collected");
#endif // INCLUDE_NMT
// JSR166 -- return the parker to the free list
Parker::Release(_parker);
_parker = NULL ;
......@@ -3370,11 +3357,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// intialize TLS
ThreadLocalStorage::init();
// Bootstrap native memory tracking, so it can start recording memory
// activities before worker thread is started. This is the first phase
// of bootstrapping, VM is currently running in single-thread mode.
MemTracker::bootstrap_single_thread();
// Initialize output stream logging
ostream_init_log();
......@@ -3425,9 +3407,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Initialize Java-Level synchronization subsystem
ObjectMonitor::Initialize() ;
// Second phase of bootstrapping, VM is about entering multi-thread mode
MemTracker::bootstrap_multi_thread();
// Initialize global modules
jint status = init_globals();
if (status != JNI_OK) {
......@@ -3449,9 +3428,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// real raw monitor. VM is setup enough here for raw monitor enter.
JvmtiExport::transition_pending_onload_raw_monitors();
// Fully start NMT
MemTracker::start();
// Create the VMThread
{ TraceTime timer("Start VMThread", TraceStartupTime);
VMThread::create();
......@@ -4089,8 +4065,6 @@ void Threads::add(JavaThread* p, bool force_daemon) {
daemon = false;
}
p->set_safepoint_visible(true);
ThreadService::add_thread(p, daemon);
// Possible GC point.
......@@ -4136,13 +4110,6 @@ void Threads::remove(JavaThread* p) {
// to do callbacks into the safepoint code. However, the safepoint code is not aware
// of this thread since it is removed from the queue.
p->set_terminated_value();
// Now, this thread is not visible to safepoint
p->set_safepoint_visible(false);
// once the thread becomes safepoint invisible, we can not use its per-thread
// recorder. And Threads::do_threads() no longer walks this thread, so we have
// to release its per-thread recorder here.
MemTracker::thread_exiting(p);
} // unlock Threads_lock
// Since Events::log uses a lock, we grab it outside the Threads_lock
......
......@@ -43,10 +43,6 @@
#include "runtime/unhandledOops.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_NMT
#include "services/memRecorder.hpp"
#endif // INCLUDE_NMT
#include "trace/traceBackend.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/exceptions.hpp"
......@@ -1059,16 +1055,6 @@ class JavaThread: public Thread {
bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; }
void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
#if INCLUDE_NMT
// native memory tracking
inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; }
inline void set_recorder(MemRecorder* rc) { _recorder = rc; }
private:
// per-thread memory recorder
MemRecorder* volatile _recorder;
#endif // INCLUDE_NMT
// Suspend/resume support for JavaThread
private:
void set_ext_suspended() { set_suspend_flag (_ext_suspended); }
......@@ -1511,19 +1497,6 @@ public:
return result;
}
// NMT (Native memory tracking) support.
// This flag helps NMT to determine if this JavaThread will be blocked
// at safepoint. If not, ThreadCritical is needed for writing memory records.
// JavaThread is only safepoint visible when it is in Threads' thread list,
// it is not visible until it is added to the list and becomes invisible
// once it is removed from the list.
public:
bool is_safepoint_visible() const { return _safepoint_visible; }
void set_safepoint_visible(bool visible) { _safepoint_visible = visible; }
private:
bool _safepoint_visible;
// Static operations
public:
// Returns the running thread as a JavaThread
static inline JavaThread* current();
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
#define SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
#include "memory/allocation.hpp"
#include "utilities/nativeCallStack.hpp"
// Allocation site represents a code path that makes a memory
// allocation
template <class E> class AllocationSite VALUE_OBJ_CLASS_SPEC {
private:
NativeCallStack _call_stack;
E e;
public:
AllocationSite(const NativeCallStack& stack) : _call_stack(stack) { }
int hash() const { return _call_stack.hash(); }
bool equals(const NativeCallStack& stack) const {
return _call_stack.equals(stack);
}
bool equals(const AllocationSite<E>& other) const {
return other.equals(_call_stack);
}
const NativeCallStack* call_stack() const {
return &_call_stack;
}
// Information regarding this allocation
E* data() { return &e; }
const E* peek() const { return &e; }
};
#endif // SHARE_VM_SERVICES_ALLOCATION_SITE_HPP
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
#include "services/mallocSiteTable.hpp"
/*
* Early os::malloc() calls come from initializations of static variables, long before entering any
* VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
* initialized, along with the allocation site for the hashtable entries.
* To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
* call, the hashtable bucket array and hashtable entry allocation site have to be static.
* It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
* allocates a block memory and zero the memory for it.
* But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
* memory for it, but also calls its constructor at some later time. If we initialize the allocation site
* at the first os::malloc() call, the object will be reinitialized when its constructor is called
* by C runtime.
* To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
* the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
* Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
* which is exactly what we want.
* The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
*
* Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
* the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
*/
// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
// Malloc site hashtable buckets
MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
// concurrent access counter
volatile int MallocSiteTable::_access_count = 0;
// Tracking hashtable contention
NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
/*
* Initialize malloc site table.
* Hashtable entry is malloc'd, so it can cause infinite recursion.
* To avoid above problem, we pre-initialize a hash entry for
* this allocation site.
* The method is called during C runtime static variable initialization
* time, it is in single-threaded mode from JVM perspective.
*/
bool MallocSiteTable::initialize() {
assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
"Sanity Check");
assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
// Fake the call stack for hashtable entry allocation
assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
// Create pseudo call stack for hashtable entry allocation
address pc[3];
if (NMT_TrackingStackDepth >= 3) {
pc[2] = (address)MallocSiteTable::allocation_at;
}
if (NMT_TrackingStackDepth >= 2) {
pc[1] = (address)MallocSiteTable::lookup_or_add;
}
pc[0] = (address)MallocSiteTable::new_entry;
// Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
// Instantiate hash entry for hashtable entry allocation callsite
MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
MallocSiteHashtableEntry(*stack);
// Add the allocation site to hashtable.
int index = hash_to_index(stack->hash());
_table[index] = entry;
return true;
}
// Walks entries in the hashtable.
// It stops walk if the walker returns false.
bool MallocSiteTable::walk(MallocSiteWalker* walker) {
MallocSiteHashtableEntry* head;
for (int index = 0; index < table_size; index ++) {
head = _table[index];
while (head != NULL) {
if (!walker->do_malloc_site(head->peek())) {
return false;
}
head = (MallocSiteHashtableEntry*)head->next();
}
}
return true;
}
/*
* The hashtable does not have deletion policy on individual entry,
* and each linked list node is inserted via compare-and-swap,
* so each linked list is stable, the contention only happens
* at the end of linked list.
* This method should not return NULL under normal circumstance.
* If NULL is returned, it indicates:
* 1. Out of memory, it cannot allocate new hash entry.
* 2. Overflow hash bucket.
* Under any of above circumstances, caller should handle the situation.
*/
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
size_t* pos_idx) {
int index = hash_to_index(key.hash());
assert(index >= 0, "Negative index");
*bucket_idx = (size_t)index;
*pos_idx = 0;
// First entry for this hash bucket
if (_table[index] == NULL) {
MallocSiteHashtableEntry* entry = new_entry(key);
// OOM check
if (entry == NULL) return NULL;
// swap in the head
if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
return entry->data();
}
delete entry;
}
MallocSiteHashtableEntry* head = _table[index];
while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
MallocSite* site = head->data();
if (site->equals(key)) {
// found matched entry
return head->data();
}
if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
MallocSiteHashtableEntry* entry = new_entry(key);
// OOM check
if (entry == NULL) return NULL;
if (head->atomic_insert(entry)) {
(*pos_idx) ++;
return entry->data();
}
// contended, other thread won
delete entry;
}
head = (MallocSiteHashtableEntry*)head->next();
(*pos_idx) ++;
}
return NULL;
}
// Access malloc site
MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
assert(bucket_idx < table_size, "Invalid bucket index");
MallocSiteHashtableEntry* head = _table[bucket_idx];
for (size_t index = 0; index < pos_idx && head != NULL;
index ++, head = (MallocSiteHashtableEntry*)head->next());
assert(head != NULL, "Invalid position index");
return head->data();
}
// Allocates MallocSiteHashtableEntry object. Special call stack
// (pre-installed allocation site) has to be used to avoid infinite
// recursion.
MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) {
void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
*hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
return ::new (p) MallocSiteHashtableEntry(key);
}
void MallocSiteTable::reset() {
for (int index = 0; index < table_size; index ++) {
MallocSiteHashtableEntry* head = _table[index];
_table[index] = NULL;
delete_linked_list(head);
}
}
void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
MallocSiteHashtableEntry* p;
while (head != NULL) {
p = head;
head = (MallocSiteHashtableEntry*)head->next();
if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
delete p;
}
}
}
void MallocSiteTable::shutdown() {
AccessLock locker(&_access_count);
locker.exclusiveLock();
reset();
}
bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
assert(walker != NULL, "NuLL walker");
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
return walk(walker);
}
return false;
}
void MallocSiteTable::AccessLock::exclusiveLock() {
jint target;
jint val;
assert(_lock_state != ExclusiveLock, "Can only call once");
assert(*_lock >= 0, "Can not content exclusive lock");
// make counter negative to block out shared locks
do {
val = *_lock;
target = _MAGIC_ + *_lock;
} while (Atomic::cmpxchg(target, _lock, val) != val);
// wait for all readers to exit
while (*_lock != _MAGIC_) {
#ifdef _WINDOWS
os::naked_short_sleep(1);
#else
os::NakedYield();
#endif
}
_lock_state = ExclusiveLock;
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
#define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "services/allocationSite.hpp"
#include "services/mallocTracker.hpp"
#include "services/nmtCommon.hpp"
#include "utilities/nativeCallStack.hpp"
// MallocSite represents a code path that eventually calls
// os::malloc() to allocate memory
class MallocSite : public AllocationSite<MemoryCounter> {
public:
MallocSite() :
AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK) { }
MallocSite(const NativeCallStack& stack) :
AllocationSite<MemoryCounter>(stack) { }
void allocate(size_t size) { data()->allocate(size); }
void deallocate(size_t size) { data()->deallocate(size); }
// Memory allocated from this code path
size_t size() const { return peek()->size(); }
// The number of calls were made
size_t count() const { return peek()->count(); }
};
// Malloc site hashtable entry
class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
private:
MallocSite _malloc_site;
MallocSiteHashtableEntry* _next;
public:
MallocSiteHashtableEntry() : _next(NULL) { }
MallocSiteHashtableEntry(NativeCallStack stack):
_malloc_site(stack), _next(NULL) { }
inline const MallocSiteHashtableEntry* next() const {
return _next;
}
// Insert an entry atomically.
// Return true if the entry is inserted successfully.
// The operation can be failed due to contention from other thread.
bool atomic_insert(const MallocSiteHashtableEntry* entry) {
return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
NULL) == NULL);
}
void set_callsite(const MallocSite& site) {
_malloc_site = site;
}
inline const MallocSite* peek() const { return &_malloc_site; }
inline MallocSite* data() { return &_malloc_site; }
inline long hash() const { return _malloc_site.hash(); }
inline bool equals(const NativeCallStack& stack) const {
return _malloc_site.equals(stack);
}
// Allocation/deallocation on this allocation site
inline void allocate(size_t size) { _malloc_site.allocate(size); }
inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
// Memory counters
inline size_t size() const { return _malloc_site.size(); }
inline size_t count() const { return _malloc_site.count(); }
};
// The walker walks every entry on MallocSiteTable
class MallocSiteWalker : public StackObj {
public:
virtual bool do_malloc_site(const MallocSite* e) { return false; }
};
/*
* Native memory tracking call site table.
* The table is only needed when detail tracking is enabled.
*/
class MallocSiteTable : AllStatic {
private:
// The number of hash bucket in this hashtable. The number should
// be tuned if malloc activities changed significantly.
// The statistics data can be obtained via Jcmd
// jcmd <pid> VM.native_memory statistics.
// Currently, (number of buckets / number of entires) ratio is
// about 1 / 6
enum {
table_base_size = 128, // The base size is calculated from statistics to give
// table ratio around 1:6
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
};
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
// once exclusive access (exclusiveLock) is requested, all shared accesses are
// rejected forever.
class AccessLock : public StackObj {
enum LockState {
NoLock,
SharedLock,
ExclusiveLock
};
private:
// A very large negative number. The only possibility to "overflow"
// this number is when there are more than -min_jint threads in
// this process, which is not going to happen in foreseeable future.
const static int _MAGIC_ = min_jint;
LockState _lock_state;
volatile int* _lock;
public:
AccessLock(volatile int* lock) :
_lock(lock), _lock_state(NoLock) {
}
~AccessLock() {
if (_lock_state == SharedLock) {
Atomic::dec((volatile jint*)_lock);
}
}
// Acquire shared lock.
// Return true if shared access is granted.
inline bool sharedLock() {
jint res = Atomic::add(1, _lock);
if (res < 0) {
Atomic::add(-1, _lock);
return false;
}
_lock_state = SharedLock;
return true;
}
// Acquire exclusive lock
void exclusiveLock();
};
public:
static bool initialize();
static void shutdown();
NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
// Number of hash buckets
static inline int hash_buckets() { return (int)table_size; }
// Access and copy a call stack from this table. Shared lock should be
// acquired before access the entry.
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
size_t pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
stack = *site->call_stack();
return true;
}
}
return false;
}
// Record a new allocation from specified call path.
// Return true if the allocation is recorded successfully, bucket_idx
// and pos_idx are also updated to indicate the entry where the allocation
// information was recorded.
// Return false only occurs under rare scenarios:
// 1. out of memory
// 2. overflow hash bucket
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
if (site != NULL) site->allocate(size);
return site != NULL;
}
return false;
}
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
// information was recorded.
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
site->deallocate(size);
return true;
}
}
return false;
}
// Walk this table.
static bool walk_malloc_site(MallocSiteWalker* walker);
private:
static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
static void reset();
// Delete a bucket linked list
static void delete_linked_list(MallocSiteHashtableEntry* head);
static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
static bool walk(MallocSiteWalker* walker);
static inline int hash_to_index(int hash) {
hash = (hash > 0) ? hash : (-hash);
return (hash % table_size);
}
static inline const NativeCallStack* hash_entry_allocation_stack() {
return (NativeCallStack*)_hash_entry_allocation_stack;
}
private:
// Counter for counting concurrent access
static volatile int _access_count;
// The callsite hashtable. It has to be a static table,
// since malloc call can come from C runtime linker.
static MallocSiteHashtableEntry* _table[table_size];
// Reserve enough memory for placing the objects
// The memory for hashtable entry allocation stack object
static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
// The memory for hashtable entry allocation callsite object
static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
NOT_PRODUCT(static int _peak_count;)
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/atomic.hpp"
#include "runtime/atomic.inline.hpp"
#include "services/mallocSiteTable.hpp"
#include "services/mallocTracker.hpp"
#include "services/mallocTracker.inline.hpp"
#include "services/memTracker.hpp"
size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
// Total malloc'd memory amount
size_t MallocMemorySnapshot::total() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _malloc[index].malloc_size();
}
amount += _tracking_header.size() + total_arena();
return amount;
}
// Total malloc'd memory used by arenas
size_t MallocMemorySnapshot::total_arena() const {
size_t amount = 0;
for (int index = 0; index < mt_number_of_types; index ++) {
amount += _malloc[index].arena_size();
}
return amount;
}
void MallocMemorySnapshot::reset() {
_tracking_header.reset();
for (int index = 0; index < mt_number_of_types; index ++) {
_malloc[index].reset();
}
}
// Make adjustment by subtracting chunks used by arenas
// from total chunks to get total free chunck size
void MallocMemorySnapshot::make_adjustment() {
size_t arena_size = total_arena();
int chunk_idx = NMTUtil::flag_to_index(mtChunk);
_malloc[chunk_idx].record_free(arena_size);
}
void MallocMemorySummary::initialize() {
assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check");
// Uses placement new operator to initialize static area.
::new ((void*)_snapshot)MallocMemorySnapshot();
}
void MallocHeader::release() const {
// Tracking already shutdown, no housekeeping is needed anymore
if (MemTracker::tracking_level() <= NMT_minimal) return;
MallocMemorySummary::record_free(size(), flags());
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
if (tracking_level() == NMT_detail) {
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
}
}
bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx) const {
bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx);
// Something went wrong, could be OOM or overflow malloc site table.
// We want to keep tracking data under OOM circumstance, so transition to
// summary tracking.
if (!ret) {
MemTracker::transition_to(NMT_summary);
}
return ret;
}
bool MallocHeader::get_stack(NativeCallStack& stack) const {
return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
}
bool MallocTracker::initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
MallocMemorySummary::initialize();
}
if (level == NMT_detail) {
return MallocSiteTable::initialize();
}
return true;
}
bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert(from != NMT_off, "Can not transition from off state");
assert(to != NMT_off, "Can not transition to off state");
if (from == NMT_minimal) {
MallocMemorySummary::reset();
}
if (to == NMT_detail) {
assert(from == NMT_minimal || from == NMT_summary, "Just check");
return MallocSiteTable::initialize();
} else if (from == NMT_detail) {
assert(to == NMT_minimal || to == NMT_summary, "Just check");
MallocSiteTable::shutdown();
}
return true;
}
// Record a malloc memory allocation
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
const NativeCallStack& stack, NMT_TrackingLevel level) {
void* memblock; // the address for user data
MallocHeader* header = NULL;
if (malloc_base == NULL) {
return NULL;
}
// Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit
// systems, when malloc size >= 1GB, but is is safe to assume it won't happen.
if (size > MAX_MALLOC_SIZE) {
fatal("Should not use malloc for big memory block, use virtual memory instead");
}
// Uses placement global new operator to initialize malloc header
switch(level) {
case NMT_off:
return malloc_base;
case NMT_minimal: {
MallocHeader* hdr = ::new (malloc_base) MallocHeader();
break;
}
case NMT_summary: {
header = ::new (malloc_base) MallocHeader(size, flags);
break;
}
case NMT_detail: {
header = ::new (malloc_base) MallocHeader(size, flags, stack);
break;
}
default:
ShouldNotReachHere();
}
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
// The alignment check: 8 bytes alignment for 32 bit systems.
// 16 bytes alignment for 64-bit systems.
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
// Sanity check
assert(get_memory_tracking_level(memblock) == level,
"Wrong tracking level");
#ifdef ASSERT
if (level > NMT_minimal) {
// Read back
assert(get_size(memblock) == size, "Wrong size");
assert(get_flags(memblock) == flags, "Wrong flags");
}
#endif
return memblock;
}
void* MallocTracker::record_free(void* memblock) {
// Never turned on
if (MemTracker::tracking_level() == NMT_off ||
memblock == NULL) {
return memblock;
}
MallocHeader* header = malloc_header(memblock);
header->release();
return (void*)header;
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
#define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "services/nmtCommon.hpp"
#include "utilities/nativeCallStack.hpp"
/*
* This counter class counts memory allocation and deallocation,
* records total memory allocation size and number of allocations.
* The counters are updated atomically.
*/
class MemoryCounter VALUE_OBJ_CLASS_SPEC {
private:
size_t _count;
size_t _size;
DEBUG_ONLY(size_t _peak_count;)
DEBUG_ONLY(size_t _peak_size; )
public:
MemoryCounter() : _count(0), _size(0) {
DEBUG_ONLY(_peak_count = 0;)
DEBUG_ONLY(_peak_size = 0;)
}
// Reset counters
void reset() {
_size = 0;
_count = 0;
DEBUG_ONLY(_peak_size = 0;)
DEBUG_ONLY(_peak_count = 0;)
}
inline void allocate(size_t sz) {
Atomic::add(1, (volatile MemoryCounterType*)&_count);
if (sz > 0) {
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
}
DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
}
inline void deallocate(size_t sz) {
assert(_count > 0, "Negative counter");
assert(_size >= sz, "Negative size");
Atomic::add(-1, (volatile MemoryCounterType*)&_count);
if (sz > 0) {
Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
}
}
inline void resize(long sz) {
if (sz != 0) {
Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
}
}
inline size_t count() const { return _count; }
inline size_t size() const { return _size; }
DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
};
/*
* Malloc memory used by a particular subsystem.
* It includes the memory acquired through os::malloc()
* call and arena's backing memory.
*/
class MallocMemory VALUE_OBJ_CLASS_SPEC {
private:
MemoryCounter _malloc;
MemoryCounter _arena;
public:
MallocMemory() { }
inline void record_malloc(size_t sz) {
_malloc.allocate(sz);
}
inline void record_free(size_t sz) {
_malloc.deallocate(sz);
}
inline void record_new_arena() {
_arena.allocate(0);
}
inline void record_arena_free() {
_arena.deallocate(0);
}
inline void record_arena_size_change(long sz) {
_arena.resize(sz);
}
void reset() {
_malloc.reset();
_arena.reset();
}
inline size_t malloc_size() const { return _malloc.size(); }
inline size_t malloc_count() const { return _malloc.count();}
inline size_t arena_size() const { return _arena.size(); }
inline size_t arena_count() const { return _arena.count(); }
DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
};
class MallocMemorySummary;
// A snapshot of malloc'd memory, includes malloc memory
// usage by types and memory used by tracking itself.
class MallocMemorySnapshot : public ResourceObj {
friend class MallocMemorySummary;
private:
MallocMemory _malloc[mt_number_of_types];
MemoryCounter _tracking_header;
public:
inline MallocMemory* by_type(MEMFLAGS flags) {
int index = NMTUtil::flag_to_index(flags);
return &_malloc[index];
}
inline MallocMemory* by_index(int index) {
assert(index >= 0, "Index out of bound");
assert(index < mt_number_of_types, "Index out of bound");
return &_malloc[index];
}
inline MemoryCounter* malloc_overhead() {
return &_tracking_header;
}
// Total malloc'd memory amount
size_t total() const;
// Total malloc'd memory used by arenas
size_t total_arena() const;
inline size_t thread_count() const {
MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
return s->by_type(mtThreadStack)->malloc_count();
}
void reset();
void copy_to(MallocMemorySnapshot* s) {
s->_tracking_header = _tracking_header;
for (int index = 0; index < mt_number_of_types; index ++) {
s->_malloc[index] = _malloc[index];
}
}
// Make adjustment by subtracting chunks used by arenas
// from total chunks to get total free chunk size
void make_adjustment();
};
/*
* This class is for collecting malloc statistics at summary level
*/
class MallocMemorySummary : AllStatic {
private:
// Reserve memory for placement of MallocMemorySnapshot object
static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
public:
static void initialize();
static inline void record_malloc(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_malloc(size);
}
static inline void record_free(size_t size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_free(size);
}
static inline void record_new_arena(MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_new_arena();
}
static inline void record_arena_free(MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_arena_free();
}
static inline void record_arena_size_change(long size, MEMFLAGS flag) {
as_snapshot()->by_type(flag)->record_arena_size_change(size);
}
static void snapshot(MallocMemorySnapshot* s) {
as_snapshot()->copy_to(s);
s->make_adjustment();
}
// Record memory used by malloc tracking header
static inline void record_new_malloc_header(size_t sz) {
as_snapshot()->malloc_overhead()->allocate(sz);
}
static inline void record_free_malloc_header(size_t sz) {
as_snapshot()->malloc_overhead()->deallocate(sz);
}
// The memory used by malloc tracking headers
static inline size_t tracking_overhead() {
return as_snapshot()->malloc_overhead()->size();
}
// Reset all counters to zero
static void reset() {
as_snapshot()->reset();
}
static MallocMemorySnapshot* as_snapshot() {
return (MallocMemorySnapshot*)_snapshot;
}
};
/*
* Malloc tracking header.
* To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
*/
class MallocHeader VALUE_OBJ_CLASS_SPEC {
#ifdef _LP64
size_t _size : 62;
size_t _level : 2;
size_t _flags : 8;
size_t _pos_idx : 16;
size_t _bucket_idx: 40;
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
#define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
#define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
#else
size_t _size : 30;
size_t _level : 2;
size_t _flags : 8;
size_t _pos_idx : 8;
size_t _bucket_idx: 16;
#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
#define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
// Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
#define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
#endif // _LP64
public:
// Summary tracking header
MallocHeader(size_t size, MEMFLAGS flags) {
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
"Wrong header size");
_level = NMT_summary;
_flags = flags;
set_size(size);
MallocMemorySummary::record_malloc(size, flags);
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
}
// Detail tracking header
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
"Wrong header size");
_level = NMT_detail;
_flags = flags;
set_size(size);
size_t bucket_idx;
size_t pos_idx;
if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
_bucket_idx = bucket_idx;
_pos_idx = pos_idx;
}
MallocMemorySummary::record_malloc(size, flags);
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
}
// Minimal tracking header
MallocHeader() {
assert(sizeof(MallocHeader) == sizeof(void*) * 2,
"Wrong header size");
_level = (unsigned short)NMT_minimal;
}
inline NMT_TrackingLevel tracking_level() const {
return (NMT_TrackingLevel)_level;
}
inline size_t size() const { return _size; }
inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
bool get_stack(NativeCallStack& stack) const;
// Cleanup tracking information before the memory is released.
void release() const;
private:
inline void set_size(size_t size) {
assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
_size = size;
}
bool record_malloc_site(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx) const;
};
// Main class called from MemTracker to track malloc activities
class MallocTracker : AllStatic {
public:
// Initialize malloc tracker for specific tracking level
static bool initialize(NMT_TrackingLevel level);
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
// malloc tracking header size for specific tracking level
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
return (level == NMT_off) ? 0 : sizeof(MallocHeader);
}
// Parameter name convention:
// memblock : the beginning address for user data
// malloc_base: the beginning address that includes malloc tracking header
//
// The relationship:
// memblock = (char*)malloc_base + sizeof(nmt header)
//
// Record malloc on specified memory block
static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
const NativeCallStack& stack, NMT_TrackingLevel level);
// Record free on specified memory block
static void* record_free(void* memblock);
// Get tracking level of specified memory block
static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
// Offset memory address to header address
static inline void* get_base(void* memblock);
static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
if (memblock == NULL || level == NMT_off) return memblock;
return (char*)memblock - malloc_header_size(level);
}
// Get memory size
static inline size_t get_size(void* memblock) {
MallocHeader* header = malloc_header(memblock);
assert(header->tracking_level() >= NMT_summary,
"Wrong tracking level");
return header->size();
}
// Get memory type
static inline MEMFLAGS get_flags(void* memblock) {
MallocHeader* header = malloc_header(memblock);
assert(header->tracking_level() >= NMT_summary,
"Wrong tracking level");
return header->flags();
}
// Get header size
static inline size_t get_header_size(void* memblock) {
return (memblock == NULL) ? 0 : sizeof(MallocHeader);
}
static inline void record_new_arena(MEMFLAGS flags) {
MallocMemorySummary::record_new_arena(flags);
}
static inline void record_arena_free(MEMFLAGS flags) {
MallocMemorySummary::record_arena_free(flags);
}
static inline void record_arena_size_change(int size, MEMFLAGS flags) {
MallocMemorySummary::record_arena_size_change(size, flags);
}
private:
static inline MallocHeader* malloc_header(void *memblock) {
assert(memblock != NULL, "NULL pointer");
MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
assert(header->tracking_level() >= NMT_minimal, "Bad header");
return header;
}
};
#endif // INCLUDE_NMT
#endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -22,21 +22,22 @@
*
*/
#include "precompiled.hpp"
#include "services/memPtr.hpp"
#ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
#define SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
#include "services/mallocTracker.hpp"
#include "services/memTracker.hpp"
volatile jint SequenceGenerator::_seq_number = 1;
volatile unsigned long SequenceGenerator::_generation = 1;
NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
inline NMT_TrackingLevel MallocTracker::get_memory_tracking_level(void* memblock) {
assert(memblock != NULL, "Sanity check");
if (MemTracker::tracking_level() == NMT_off) return NMT_off;
MallocHeader* header = malloc_header(memblock);
return header->tracking_level();
}
jint SequenceGenerator::next() {
jint seq = Atomic::add(1, &_seq_number);
if (seq < 0) {
MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
} else {
NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
}
return seq;
inline void* MallocTracker::get_base(void* memblock){
return get_base(memblock, MemTracker::tracking_level());
}
#endif // SHARE_VM_SERVICES_MALLOC_TRACKER_INLINE_HPP
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -25,425 +25,181 @@
#ifndef SHARE_VM_SERVICES_MEM_BASELINE_HPP
#define SHARE_VM_SERVICES_MEM_BASELINE_HPP
#if INCLUDE_NMT
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
#include "services/memPtr.hpp"
#include "services/memSnapshot.hpp"
#include "services/mallocSiteTable.hpp"
#include "services/mallocTracker.hpp"
#include "services/nmtCommon.hpp"
#include "services/virtualMemoryTracker.hpp"
#include "utilities/linkedlist.hpp"
// compare unsigned number
#define UNSIGNED_COMPARE(a, b) ((a > b) ? 1 : ((a == b) ? 0 : -1))
typedef LinkedListIterator<MallocSite> MallocSiteIterator;
typedef LinkedListIterator<VirtualMemoryAllocationSite> VirtualMemorySiteIterator;
typedef LinkedListIterator<ReservedMemoryRegion> VirtualMemoryAllocationIterator;
/*
* MallocCallsitePointer and VMCallsitePointer are used
* to baseline memory blocks with their callsite information.
* They are only available when detail tracking is turned
* on.
* Baseline a memory snapshot
*/
/* baselined malloc record aggregated by callsite */
class MallocCallsitePointer : public MemPointer {
private:
size_t _count; // number of malloc invocation from this callsite
size_t _amount; // total amount of memory malloc-ed from this callsite
class MemBaseline VALUE_OBJ_CLASS_SPEC {
public:
MallocCallsitePointer() {
_count = 0;
_amount = 0;
}
MallocCallsitePointer(address pc) : MemPointer(pc) {
_count = 0;
_amount = 0;
}
MallocCallsitePointer& operator=(const MallocCallsitePointer& p) {
MemPointer::operator=(p);
_count = p.count();
_amount = p.amount();
return *this;
}
inline void inc(size_t size) {
_count ++;
_amount += size;
enum BaselineThreshold {
SIZE_THRESHOLD = K // Only allocation size over this threshold will be baselined.
};
inline size_t count() const {
return _count;
}
enum BaselineType {
Not_baselined,
Summary_baselined,
Detail_baselined
};
inline size_t amount() const {
return _amount;
}
};
enum SortingOrder {
by_address, // by memory address
by_size, // by memory size
by_site // by call site where the memory is allocated from
};
// baselined virtual memory record aggregated by callsite
class VMCallsitePointer : public MemPointer {
private:
size_t _count; // number of invocation from this callsite
size_t _reserved_amount; // total reserved amount
size_t _committed_amount; // total committed amount
public:
VMCallsitePointer() {
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
VMCallsitePointer(address pc) : MemPointer(pc) {
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
VMCallsitePointer& operator=(const VMCallsitePointer& p) {
MemPointer::operator=(p);
_count = p.count();
_reserved_amount = p.reserved_amount();
_committed_amount = p.committed_amount();
return *this;
}
inline void inc(size_t reserved, size_t committed) {
_count ++;
_reserved_amount += reserved;
_committed_amount += committed;
}
// Summary information
MallocMemorySnapshot _malloc_memory_snapshot;
VirtualMemorySnapshot _virtual_memory_snapshot;
inline size_t count() const {
return _count;
}
size_t _class_count;
inline size_t reserved_amount() const {
return _reserved_amount;
}
// Allocation sites information
// Malloc allocation sites
LinkedListImpl<MallocSite> _malloc_sites;
inline size_t committed_amount() const {
return _committed_amount;
}
};
// All virtual memory allocations
LinkedListImpl<ReservedMemoryRegion> _virtual_memory_allocations;
// maps a memory type flag to readable name
typedef struct _memType2Name {
MEMFLAGS _flag;
const char* _name;
} MemType2Name;
// Virtual memory allocations by allocation sites, always in by_address
// order
LinkedListImpl<VirtualMemoryAllocationSite> _virtual_memory_sites;
SortingOrder _malloc_sites_order;
SortingOrder _virtual_memory_sites_order;
// This class aggregates malloc'd records by memory type
class MallocMem VALUE_OBJ_CLASS_SPEC {
private:
MEMFLAGS _type;
size_t _count;
size_t _amount;
BaselineType _baseline_type;
public:
MallocMem() {
_type = mtNone;
_count = 0;
_amount = 0;
// create a memory baseline
MemBaseline():
_baseline_type(Not_baselined),
_class_count(0) {
}
MallocMem(MEMFLAGS flags) {
assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
_type = FLAGS_TO_MEMORY_TYPE(flags);
_count = 0;
_amount = 0;
~MemBaseline() {
reset();
}
inline void set_type(MEMFLAGS flag) {
_type = flag;
}
bool baseline(bool summaryOnly = true);
inline void clear() {
_count = 0;
_amount = 0;
_type = mtNone;
}
BaselineType baseline_type() const { return _baseline_type; }
MallocMem& operator=(const MallocMem& m) {
assert(_type == m.type(), "different type");
_count = m.count();
_amount = m.amount();
return *this;
MallocMemorySnapshot* malloc_memory_snapshot() {
return &_malloc_memory_snapshot;
}
inline void inc(size_t amt) {
_amount += amt;
_count ++;
VirtualMemorySnapshot* virtual_memory_snapshot() {
return &_virtual_memory_snapshot;
}
inline void reduce(size_t amt) {
assert(_amount >= amt, "Just check");
_amount -= amt;
}
MallocSiteIterator malloc_sites(SortingOrder order);
VirtualMemorySiteIterator virtual_memory_sites(SortingOrder order);
inline void overwrite_counter(size_t count) {
_count = count;
// Virtual memory allocation iterator always returns in virtual memory
// base address order.
VirtualMemoryAllocationIterator virtual_memory_allocations() {
assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
}
inline MEMFLAGS type() const {
return _type;
// Total reserved memory = total malloc'd memory + total reserved virtual
// memory
size_t total_reserved_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
size_t amount = _malloc_memory_snapshot.total() +
_virtual_memory_snapshot.total_reserved();
return amount;
}
inline bool is_type(MEMFLAGS flags) const {
return FLAGS_TO_MEMORY_TYPE(flags) == _type;
// Total committed memory = total malloc'd memory + total committed
// virtual memory
size_t total_committed_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
size_t amount = _malloc_memory_snapshot.total() +
_virtual_memory_snapshot.total_committed();
return amount;
}
inline size_t count() const {
return _count;
size_t total_arena_memory() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.total_arena();
}
inline size_t amount() const {
return _amount;
size_t malloc_tracking_overhead() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
MemBaseline* bl = const_cast<MemBaseline*>(this);
return bl->_malloc_memory_snapshot.malloc_overhead()->size();
}
};
// This class records live arena's memory usage
class ArenaMem : public MallocMem {
public:
ArenaMem(MEMFLAGS typeflag): MallocMem(typeflag) {
MallocMemory* malloc_memory(MEMFLAGS flag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.by_type(flag);
}
ArenaMem() { }
};
// This class aggregates virtual memory by its memory type
class VMMem VALUE_OBJ_CLASS_SPEC {
private:
MEMFLAGS _type;
size_t _count;
size_t _reserved_amount;
size_t _committed_amount;
public:
VMMem() {
_type = mtNone;
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
VMMem(MEMFLAGS flags) {
assert(HAS_VALID_MEMORY_TYPE(flags), "no type");
_type = FLAGS_TO_MEMORY_TYPE(flags);
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
VirtualMemory* virtual_memory(MEMFLAGS flag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _virtual_memory_snapshot.by_type(flag);
}
inline void clear() {
_type = mtNone;
_count = 0;
_reserved_amount = 0;
_committed_amount = 0;
}
inline void set_type(MEMFLAGS flags) {
_type = FLAGS_TO_MEMORY_TYPE(flags);
}
VMMem& operator=(const VMMem& m) {
assert(_type == m.type(), "different type");
_count = m.count();
_reserved_amount = m.reserved_amount();
_committed_amount = m.committed_amount();
return *this;
size_t class_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _class_count;
}
inline MEMFLAGS type() const {
return _type;
size_t thread_count() const {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.thread_count();
}
inline bool is_type(MEMFLAGS flags) const {
return FLAGS_TO_MEMORY_TYPE(flags) == _type;
}
inline void inc(size_t reserved_amt, size_t committed_amt) {
_reserved_amount += reserved_amt;
_committed_amount += committed_amt;
_count ++;
}
inline size_t count() const {
return _count;
}
inline size_t reserved_amount() const {
return _reserved_amount;
}
inline size_t committed_amount() const {
return _committed_amount;
}
};
#define NUMBER_OF_MEMORY_TYPE (mt_number_of_types + 1)
class BaselineReporter;
class BaselineComparisonReporter;
/*
* This class baselines current memory snapshot.
* A memory baseline summarizes memory usage by memory type,
* aggregates memory usage by callsites when detail tracking
* is on.
*/
class MemBaseline VALUE_OBJ_CLASS_SPEC {
friend class BaselineReporter;
friend class BaselineComparisonReporter;
private:
// overall summaries
size_t _total_malloced;
size_t _total_vm_reserved;
size_t _total_vm_committed;
size_t _number_of_classes;
size_t _number_of_threads;
// if it has properly baselined
bool _baselined;
// we categorize memory into three categories within the memory type
MallocMem _malloc_data[NUMBER_OF_MEMORY_TYPE];
VMMem _vm_data[NUMBER_OF_MEMORY_TYPE];
ArenaMem _arena_data[NUMBER_OF_MEMORY_TYPE];
// memory records that aggregate memory usage by callsites.
// only available when detail tracking is on.
MemPointerArray* _malloc_cs;
MemPointerArray* _vm_cs;
// virtual memory map
MemPointerArray* _vm_map;
private:
static MemType2Name MemType2NameMap[NUMBER_OF_MEMORY_TYPE];
private:
// should not use copy constructor
MemBaseline(MemBaseline& copy) { ShouldNotReachHere(); }
// check and block at a safepoint
static inline void check_safepoint(JavaThread* thr);
public:
// create a memory baseline
MemBaseline();
~MemBaseline();
inline bool baselined() const {
return _baselined;
}
MemBaseline& operator=(const MemBaseline& other);
// reset the baseline for reuse
void clear();
// baseline the snapshot
bool baseline(MemSnapshot& snapshot, bool summary_only = true);
bool baseline(const MemPointerArray* malloc_records,
const MemPointerArray* vm_records,
bool summary_only = true);
// total malloc'd memory of specified memory type
inline size_t malloc_amount(MEMFLAGS flag) const {
return _malloc_data[flag2index(flag)].amount();
}
// number of malloc'd memory blocks of specified memory type
inline size_t malloc_count(MEMFLAGS flag) const {
return _malloc_data[flag2index(flag)].count();
}
// total memory used by arenas of specified memory type
inline size_t arena_amount(MEMFLAGS flag) const {
return _arena_data[flag2index(flag)].amount();
}
// number of arenas of specified memory type
inline size_t arena_count(MEMFLAGS flag) const {
return _arena_data[flag2index(flag)].count();
}
// total reserved memory of specified memory type
inline size_t reserved_amount(MEMFLAGS flag) const {
return _vm_data[flag2index(flag)].reserved_amount();
}
// total committed memory of specified memory type
inline size_t committed_amount(MEMFLAGS flag) const {
return _vm_data[flag2index(flag)].committed_amount();
}
// total memory (malloc'd + mmap'd + arena) of specified
// memory type
inline size_t total_amount(MEMFLAGS flag) const {
int index = flag2index(flag);
return _malloc_data[index].amount() +
_vm_data[index].reserved_amount() +
_arena_data[index].amount();
}
/* overall summaries */
void reset() {
_baseline_type = Not_baselined;
_malloc_memory_snapshot.reset();
_virtual_memory_snapshot.reset();
_class_count = 0;
// total malloc'd memory in snapshot
inline size_t total_malloc_amount() const {
return _total_malloced;
_malloc_sites.clear();
_virtual_memory_sites.clear();
_virtual_memory_allocations.clear();
}
// total mmap'd memory in snapshot
inline size_t total_reserved_amount() const {
return _total_vm_reserved;
}
// total committed memory in snapshot
inline size_t total_committed_amount() const {
return _total_vm_committed;
}
// number of loaded classes
inline size_t number_of_classes() const {
return _number_of_classes;
}
// number of running threads
inline size_t number_of_threads() const {
return _number_of_threads;
}
// lookup human readable name of a memory type
static const char* type2name(MEMFLAGS type);
private:
// convert memory flag to the index to mapping table
int flag2index(MEMFLAGS flag) const;
// reset baseline values
void reset();
// Baseline summary information
bool baseline_summary();
// summarize the records in global snapshot
bool baseline_malloc_summary(const MemPointerArray* malloc_records);
bool baseline_vm_summary(const MemPointerArray* vm_records);
bool baseline_malloc_details(const MemPointerArray* malloc_records);
bool baseline_vm_details(const MemPointerArray* vm_records);
// Baseline allocation sites (detail tracking only)
bool baseline_allocation_sites();
// print a line of malloc'd memory aggregated by callsite
void print_malloc_callsite(outputStream* st, address pc, size_t size,
size_t count, int diff_amt, int diff_count) const;
// print a line of mmap'd memory aggregated by callsite
void print_vm_callsite(outputStream* st, address pc, size_t rsz,
size_t csz, int diff_rsz, int diff_csz) const;
// Aggregate virtual memory allocation by allocation sites
bool aggregate_virtual_memory_allocation_sites();
// sorting functions for raw records
static int malloc_sort_by_pc(const void* p1, const void* p2);
static int malloc_sort_by_addr(const void* p1, const void* p2);
// Sorting allocation sites in different orders
// Sort allocation sites in size order
void malloc_sites_to_size_order();
// Sort allocation sites in call site address order
void malloc_sites_to_allocation_site_order();
private:
// sorting functions for baselined records
static int bl_malloc_sort_by_size(const void* p1, const void* p2);
static int bl_vm_sort_by_size(const void* p1, const void* p2);
static int bl_malloc_sort_by_pc(const void* p1, const void* p2);
static int bl_vm_sort_by_pc(const void* p1, const void* p2);
// Sort allocation sites in reserved size order
void virtual_memory_sites_to_size_order();
// Sort allocation sites in call site address order
void virtual_memory_sites_to_reservation_site_order();
};
#endif // INCLUDE_NMT
#endif // SHARE_VM_SERVICES_MEM_BASELINE_HPP
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
#define SHARE_VM_SERVICES_MEM_PTR_HPP
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
/*
* global sequence generator that generates sequence numbers to serialize
* memory records.
*/
class SequenceGenerator : AllStatic {
public:
static jint next();
// peek last sequence number
static jint peek() {
return _seq_number;
}
// reset sequence number
static void reset() {
assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
_seq_number = 1;
_generation ++;
};
static unsigned long current_generation() { return _generation; }
NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
private:
static volatile jint _seq_number;
static volatile unsigned long _generation;
NOT_PRODUCT(static jint _max_seq_number; )
};
/*
* followings are the classes that are used to hold memory activity records in different stages.
* MemPointer
* |--------MemPointerRecord
* |
* |----MemPointerRecordEx
* | |
* | |-------SeqMemPointerRecordEx
* |
* |----SeqMemPointerRecord
* |
* |----VMMemRegion
* |
* |-----VMMemRegionEx
*
*
* prefix 'Seq' - sequenced, the record contains a sequence number
* surfix 'Ex' - extension, the record contains a caller's pc
*
* per-thread recorder : SeqMemPointerRecord(Ex)
* snapshot staging : SeqMemPointerRecord(Ex)
* snapshot : MemPointerRecord(Ex) and VMMemRegion(Ex)
*
*/
/*
* class that wraps an address to a memory block,
* the memory pointer either points to a malloc'd
* memory block, or a mmap'd memory block
*/
class MemPointer VALUE_OBJ_CLASS_SPEC {
public:
MemPointer(): _addr(0) { }
MemPointer(address addr): _addr(addr) { }
MemPointer(const MemPointer& copy_from) {
_addr = copy_from.addr();
}
inline address addr() const {
return _addr;
}
inline operator address() const {
return addr();
}
inline bool operator == (const MemPointer& other) const {
return addr() == other.addr();
}
inline MemPointer& operator = (const MemPointer& other) {
_addr = other.addr();
return *this;
}
protected:
inline void set_addr(address addr) { _addr = addr; }
protected:
// memory address
address _addr;
};
/* MemPointerRecord records an activityand associated
* attributes on a memory block.
*/
class MemPointerRecord : public MemPointer {
private:
MEMFLAGS _flags;
size_t _size;
public:
/* extension of MemoryType enum
* see share/vm/memory/allocation.hpp for details.
*
* The tag values are associated to sorting orders, so be
* careful if changes are needed.
* The allocation records should be sorted ahead of tagging
* records, which in turn ahead of deallocation records
*/
enum MemPointerTags {
tag_alloc = 0x0001, // malloc or reserve record
tag_commit = 0x0002, // commit record
tag_type = 0x0003, // tag virtual memory to a memory type
tag_uncommit = 0x0004, // uncommit record
tag_release = 0x0005, // free or release record
tag_size = 0x0006, // arena size
tag_masks = 0x0007, // all tag bits
vmBit = 0x0008
};
/* helper functions to interpret the tagging flags */
inline static bool is_allocation_record(MEMFLAGS flags) {
return (flags & tag_masks) == tag_alloc;
}
inline static bool is_deallocation_record(MEMFLAGS flags) {
return (flags & tag_masks) == tag_release;
}
inline static bool is_arena_record(MEMFLAGS flags) {
return (flags & (otArena | tag_size)) == otArena;
}
inline static bool is_arena_memory_record(MEMFLAGS flags) {
return (flags & (otArena | tag_size)) == (otArena | tag_size);
}
inline static bool is_virtual_memory_record(MEMFLAGS flags) {
return (flags & vmBit) != 0;
}
inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_alloc | vmBit);
}
inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_commit | vmBit);
}
inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_uncommit | vmBit);
}
inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_release | vmBit);
}
inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
return (flags & 0x0F) == (tag_type | vmBit);
}
/* tagging flags */
inline static MEMFLAGS malloc_tag() { return tag_alloc; }
inline static MEMFLAGS free_tag() { return tag_release; }
inline static MEMFLAGS arena_size_tag() { return tag_size | otArena; }
inline static MEMFLAGS virtual_memory_tag() { return vmBit; }
inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
inline static MEMFLAGS virtual_memory_commit_tag() { return (tag_commit | vmBit); }
inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
inline static MEMFLAGS virtual_memory_type_tag() { return (tag_type | vmBit); }
public:
MemPointerRecord(): _size(0), _flags(mtNone) { }
MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
MemPointer(addr), _flags(memflags), _size(size) { }
MemPointerRecord(const MemPointerRecord& copy_from):
MemPointer(copy_from), _flags(copy_from.flags()),
_size(copy_from.size()) {
}
/* MemPointerRecord is not sequenced, it always return
* 0 to indicate non-sequenced
*/
virtual jint seq() const { return 0; }
inline size_t size() const { return _size; }
inline void set_size(size_t size) { _size = size; }
inline MEMFLAGS flags() const { return _flags; }
inline void set_flags(MEMFLAGS flags) { _flags = flags; }
MemPointerRecord& operator= (const MemPointerRecord& ptr) {
MemPointer::operator=(ptr);
_flags = ptr.flags();
#ifdef ASSERT
if (IS_ARENA_OBJ(_flags)) {
assert(!is_vm_pointer(), "wrong flags");
assert((_flags & ot_masks) == otArena, "wrong flags");
}
#endif
_size = ptr.size();
return *this;
}
// if the pointer represents a malloc-ed memory address
inline bool is_malloced_pointer() const {
return !is_vm_pointer();
}
// if the pointer represents a virtual memory address
inline bool is_vm_pointer() const {
return is_virtual_memory_record(_flags);
}
// if this record records a 'malloc' or virtual memory
// 'reserve' call
inline bool is_allocation_record() const {
return is_allocation_record(_flags);
}
// if this record records a size information of an arena
inline bool is_arena_memory_record() const {
return is_arena_memory_record(_flags);
}
// if this pointer represents an address to an arena object
inline bool is_arena_record() const {
return is_arena_record(_flags);
}
// if this record represents a size information of specific arena
inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
assert(is_arena_memory_record(), "not size record");
assert(arena_rc->is_arena_record(), "not arena record");
return (arena_rc->addr() + sizeof(void*)) == addr();
}
// if this record records a 'free' or virtual memory 'free' call
inline bool is_deallocation_record() const {
return is_deallocation_record(_flags);
}
// if this record records a virtual memory 'commit' call
inline bool is_commit_record() const {
return is_virtual_memory_commit_record(_flags);
}
// if this record records a virtual memory 'uncommit' call
inline bool is_uncommit_record() const {
return is_virtual_memory_uncommit_record(_flags);
}
// if this record is a tagging record of a virtual memory block
inline bool is_type_tagging_record() const {
return is_virtual_memory_type_record(_flags);
}
// if the two memory pointer records actually represent the same
// memory block
inline bool is_same_region(const MemPointerRecord* other) const {
return (addr() == other->addr() && size() == other->size());
}
// if this memory region fully contains another one
inline bool contains_region(const MemPointerRecord* other) const {
return contains_region(other->addr(), other->size());
}
// if this memory region fully contains specified memory range
inline bool contains_region(address add, size_t sz) const {
return (addr() <= add && addr() + size() >= add + sz);
}
inline bool contains_address(address add) const {
return (addr() <= add && addr() + size() > add);
}
// if this memory region overlaps another region
inline bool overlaps_region(const MemPointerRecord* other) const {
assert(other != NULL, "Just check");
assert(size() > 0 && other->size() > 0, "empty range");
return contains_address(other->addr()) ||
contains_address(other->addr() + other->size() - 1) || // exclude end address
other->contains_address(addr()) ||
other->contains_address(addr() + size() - 1); // exclude end address
}
};
// MemPointerRecordEx also records callsite pc, from where
// the memory block is allocated
class MemPointerRecordEx : public MemPointerRecord {
private:
address _pc; // callsite pc
public:
MemPointerRecordEx(): _pc(0) { }
MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
MemPointerRecord(addr, memflags, size), _pc(pc) {}
MemPointerRecordEx(const MemPointerRecordEx& copy_from):
MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
inline address pc() const { return _pc; }
void init(const MemPointerRecordEx* mpe) {
MemPointerRecord::operator=(*mpe);
_pc = mpe->pc();
}
void init(const MemPointerRecord* mp) {
MemPointerRecord::operator=(*mp);
_pc = 0;
}
};
// a virtual memory region. The region can represent a reserved
// virtual memory region or a committed memory region
class VMMemRegion : public MemPointerRecord {
public:
VMMemRegion() { }
void init(const MemPointerRecord* mp) {
assert(mp->is_vm_pointer(), "Sanity check");
_addr = mp->addr();
set_size(mp->size());
set_flags(mp->flags());
}
VMMemRegion& operator=(const VMMemRegion& other) {
MemPointerRecord::operator=(other);
return *this;
}
inline bool is_reserved_region() const {
return is_allocation_record();
}
inline bool is_committed_region() const {
return is_commit_record();
}
/* base address of this virtual memory range */
inline address base() const {
return addr();
}
/* tag this virtual memory range to the specified memory type */
inline void tag(MEMFLAGS f) {
set_flags(flags() | (f & mt_masks));
}
// expand this region to also cover specified range.
// The range has to be on either end of the memory region.
void expand_region(address addr, size_t sz) {
if (addr < base()) {
assert(addr + sz == base(), "Sanity check");
_addr = addr;
set_size(size() + sz);
} else {
assert(base() + size() == addr, "Sanity check");
set_size(size() + sz);
}
}
// exclude the specified address range from this region.
// The excluded memory range has to be on either end of this memory
// region.
inline void exclude_region(address add, size_t sz) {
assert(is_reserved_region() || is_committed_region(), "Sanity check");
assert(addr() != NULL && size() != 0, "Sanity check");
assert(add >= addr() && add < addr() + size(), "Sanity check");
assert(add == addr() || (add + sz) == (addr() + size()),
"exclude in the middle");
if (add == addr()) {
set_addr(add + sz);
set_size(size() - sz);
} else {
set_size(size() - sz);
}
}
};
class VMMemRegionEx : public VMMemRegion {
private:
jint _seq; // sequence number
public:
VMMemRegionEx(): _pc(0) { }
void init(const MemPointerRecordEx* mpe) {
VMMemRegion::init(mpe);
_pc = mpe->pc();
}
void init(const MemPointerRecord* mpe) {
VMMemRegion::init(mpe);
_pc = 0;
}
VMMemRegionEx& operator=(const VMMemRegionEx& other) {
VMMemRegion::operator=(other);
_pc = other.pc();
return *this;
}
inline address pc() const { return _pc; }
private:
address _pc;
};
/*
* Sequenced memory record
*/
class SeqMemPointerRecord : public MemPointerRecord {
private:
jint _seq; // sequence number
public:
SeqMemPointerRecord(): _seq(0){ }
SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
: MemPointerRecord(addr, flags, size), _seq(seq) {
}
SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
: MemPointerRecord(copy_from) {
_seq = copy_from.seq();
}
SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
MemPointerRecord::operator=(ptr);
_seq = ptr.seq();
return *this;
}
inline jint seq() const {
return _seq;
}
};
class SeqMemPointerRecordEx : public MemPointerRecordEx {
private:
jint _seq; // sequence number
public:
SeqMemPointerRecordEx(): _seq(0) { }
SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
jint seq, address pc):
MemPointerRecordEx(addr, flags, size, pc), _seq(seq) {
}
SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
: MemPointerRecordEx(copy_from) {
_seq = copy_from.seq();
}
SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
MemPointerRecordEx::operator=(ptr);
_seq = ptr.seq();
return *this;
}
inline jint seq() const {
return _seq;
}
};
#endif // SHARE_VM_SERVICES_MEM_PTR_HPP
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "services/nmtCommon.hpp"
const char* NMTUtil::_memory_type_names[] = {
"Java Heap",
"Class",
"Thread",
"Thread Stack",
"Code",
"GC",
"Compiler",
"Internal",
"Other",
"Symbol",
"Native Memory Tracking",
"Shared class space",
"Arena Chunk",
"Test",
"Tracing",
"Unknown"
};
const char* NMTUtil::scale_name(size_t scale) {
switch(scale) {
case K: return "KB";
case M: return "MB";
case G: return "GB";
}
ShouldNotReachHere();
return NULL;
}
size_t NMTUtil::scale_from_name(const char* scale) {
assert(scale != NULL, "Null pointer check");
if (strncmp(scale, "KB", 2) == 0 ||
strncmp(scale, "kb", 2) == 0) {
return K;
} else if (strncmp(scale, "MB", 2) == 0 ||
strncmp(scale, "mb", 2) == 0) {
return M;
} else if (strncmp(scale, "GB", 2) == 0 ||
strncmp(scale, "gb", 2) == 0) {
return G;
} else {
return 0; // Invalid value
}
return K;
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -772,6 +772,11 @@ void VMError::report(outputStream* st) {
st->cr();
}
STEP(228, "(Native Memory Tracking)" )
if (_verbose) {
MemTracker::final_report(st);
}
STEP(230, "" )
if (_verbose) {
......@@ -895,9 +900,6 @@ void VMError::report_and_die() {
static bool log_done = false; // done saving error log
static bool transmit_report_done = false; // done error reporting
// disble NMT to avoid further exception
MemTracker::shutdown(MemTracker::NMT_error_reporting);
if (SuppressFatalErrorMessage) {
os::abort();
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部