提交 81d0ad1a 编写于 作者: C ctornqvi

Merge

...@@ -76,4 +76,9 @@ public class WhiteBox { ...@@ -76,4 +76,9 @@ public class WhiteBox {
public native long g1NumFreeRegions(); public native long g1NumFreeRegions();
public native int g1RegionSize(); public native int g1RegionSize();
public native Object[] parseCommandLine(String commandline, DiagnosticCommand[] args); public native Object[] parseCommandLine(String commandline, DiagnosticCommand[] args);
// NMT
public native boolean NMTAllocTest();
public native boolean NMTFreeTestMemory();
public native boolean NMTWaitForDataMerge();
} }
...@@ -145,9 +145,10 @@ enum MemoryType { ...@@ -145,9 +145,10 @@ enum MemoryType {
mtChunk = 0x0B00, // chunk that holds content of arenas mtChunk = 0x0B00, // chunk that holds content of arenas
mtJavaHeap = 0x0C00, // Java heap mtJavaHeap = 0x0C00, // Java heap
mtClassShared = 0x0D00, // class data sharing mtClassShared = 0x0D00, // class data sharing
mt_number_of_types = 0x000D, // number of memory types (mtDontTrack mtTest = 0x0E00, // Test type for verifying NMT
mt_number_of_types = 0x000E, // number of memory types (mtDontTrack
// is not included as validate type) // is not included as validate type)
mtDontTrack = 0x0E00, // memory we do not or cannot track mtDontTrack = 0x0F00, // memory we do not or cannot track
mt_masks = 0x7F00, mt_masks = 0x7F00,
// object type mask // object type mask
......
...@@ -43,6 +43,10 @@ ...@@ -43,6 +43,10 @@
#include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp"
#endif // !SERIALGC #endif // !SERIALGC
#ifdef INCLUDE_NMT
#include "services/memTracker.hpp"
#endif // INCLUDE_NMT
bool WhiteBox::_used = false; bool WhiteBox::_used = false;
WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj)) WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
...@@ -110,6 +114,60 @@ WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o)) ...@@ -110,6 +114,60 @@ WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
WB_END WB_END
#endif // !SERIALGC #endif // !SERIALGC
#ifdef INCLUDE_NMT
// Keep track of the 3 allocations in NMTAllocTest so we can free them later
// on and verify that they're not visible anymore
static void* nmtMtTest1 = NULL, *nmtMtTest2 = NULL, *nmtMtTest3 = NULL;
// Alloc memory using the test memory type so that we can use that to see if
// NMT picks it up correctly
WB_ENTRY(jboolean, WB_NMTAllocTest(JNIEnv* env))
void *mem;
if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
return false;
}
// Allocate 2 * 128k + 256k + 1024k and free the 1024k one to make sure we track
// everything correctly. Total should be 512k held alive.
nmtMtTest1 = os::malloc(128 * 1024, mtTest);
mem = os::malloc(1024 * 1024, mtTest);
nmtMtTest2 = os::malloc(256 * 1024, mtTest);
os::free(mem, mtTest);
nmtMtTest3 = os::malloc(128 * 1024, mtTest);
return true;
WB_END
// Free the memory allocated by NMTAllocTest
WB_ENTRY(jboolean, WB_NMTFreeTestMemory(JNIEnv* env))
if (nmtMtTest1 == NULL || nmtMtTest2 == NULL || nmtMtTest3 == NULL) {
return false;
}
os::free(nmtMtTest1, mtTest);
nmtMtTest1 = NULL;
os::free(nmtMtTest2, mtTest);
nmtMtTest2 = NULL;
os::free(nmtMtTest3, mtTest);
nmtMtTest3 = NULL;
return true;
WB_END
// Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
WB_ENTRY(jboolean, WB_NMTWaitForDataMerge(JNIEnv* env))
if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
return false;
}
return MemTracker::wbtest_wait_for_data_merge();
WB_END
#endif // INCLUDE_NMT
//Some convenience methods to deal with objects from java //Some convenience methods to deal with objects from java
int WhiteBox::offset_for_field(const char* field_name, oop object, int WhiteBox::offset_for_field(const char* field_name, oop object,
Symbol* signature_symbol) { Symbol* signature_symbol) {
...@@ -177,6 +235,11 @@ static JNINativeMethod methods[] = { ...@@ -177,6 +235,11 @@ static JNINativeMethod methods[] = {
{CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions }, {CC"g1NumFreeRegions", CC"()J", (void*)&WB_G1NumFreeRegions },
{CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize }, {CC"g1RegionSize", CC"()I", (void*)&WB_G1RegionSize },
#endif // !SERIALGC #endif // !SERIALGC
#ifdef INCLUDE_NMT
{CC"NMTAllocTest", CC"()Z", (void*)&WB_NMTAllocTest },
{CC"NMTFreeTestMemory", CC"()Z", (void*)&WB_NMTFreeTestMemory },
{CC"NMTWaitForDataMerge",CC"()Z", (void*)&WB_NMTWaitForDataMerge},
#endif // INCLUDE_NMT
}; };
#undef CC #undef CC
......
...@@ -40,6 +40,7 @@ MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = { ...@@ -40,6 +40,7 @@ MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
{mtNMT, "Memory Tracking"}, {mtNMT, "Memory Tracking"},
{mtChunk, "Pooled Free Chunks"}, {mtChunk, "Pooled Free Chunks"},
{mtClassShared,"Shared spaces for classes"}, {mtClassShared,"Shared spaces for classes"},
{mtTest, "Test"},
{mtNone, "Unknown"} // It can happen when type tagging records are lagging {mtNone, "Unknown"} // It can happen when type tagging records are lagging
// behind // behind
}; };
......
...@@ -27,8 +27,8 @@ ...@@ -27,8 +27,8 @@
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
volatile jint SequenceGenerator::_seq_number = 1; volatile jint SequenceGenerator::_seq_number = 1;
volatile unsigned long SequenceGenerator::_generation = 1;
NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;) NOT_PRODUCT(jint SequenceGenerator::_max_seq_number = 1;)
DEBUG_ONLY(volatile unsigned long SequenceGenerator::_generation = 0;)
jint SequenceGenerator::next() { jint SequenceGenerator::next() {
jint seq = Atomic::add(1, &_seq_number); jint seq = Atomic::add(1, &_seq_number);
......
...@@ -47,16 +47,16 @@ class SequenceGenerator : AllStatic { ...@@ -47,16 +47,16 @@ class SequenceGenerator : AllStatic {
static void reset() { static void reset() {
assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
_seq_number = 1; _seq_number = 1;
DEBUG_ONLY(_generation ++;) _generation ++;
}; };
DEBUG_ONLY(static unsigned long current_generation() { return (unsigned long)_generation; }) static unsigned long current_generation() { return _generation; }
NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; }) NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
private: private:
static volatile jint _seq_number; static volatile jint _seq_number;
static volatile unsigned long _generation;
NOT_PRODUCT(static jint _max_seq_number; ) NOT_PRODUCT(static jint _max_seq_number; )
DEBUG_ONLY(static volatile unsigned long _generation; )
}; };
/* /*
......
...@@ -55,7 +55,7 @@ volatile jint MemRecorder::_instance_count = 0; ...@@ -55,7 +55,7 @@ volatile jint MemRecorder::_instance_count = 0;
MemRecorder::MemRecorder() { MemRecorder::MemRecorder() {
assert(MemTracker::is_on(), "Native memory tracking is off"); assert(MemTracker::is_on(), "Native memory tracking is off");
Atomic::inc(&_instance_count); Atomic::inc(&_instance_count);
debug_only(set_generation();) set_generation();
if (MemTracker::track_callsite()) { if (MemTracker::track_callsite()) {
_pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx, _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
...@@ -151,11 +151,12 @@ SequencedRecordIterator MemRecorder::pointer_itr() { ...@@ -151,11 +151,12 @@ SequencedRecordIterator MemRecorder::pointer_itr() {
} }
#ifdef ASSERT
void MemRecorder::set_generation() { void MemRecorder::set_generation() {
_generation = SequenceGenerator::current_generation(); _generation = SequenceGenerator::current_generation();
} }
#ifdef ASSERT
void MemRecorder::check_dup_seq(jint seq) const { void MemRecorder::check_dup_seq(jint seq) const {
MemPointerArrayIteratorImpl itr(_pointer_records); MemPointerArrayIteratorImpl itr(_pointer_records);
MemPointerRecord* rc = (MemPointerRecord*)itr.current(); MemPointerRecord* rc = (MemPointerRecord*)itr.current();
......
...@@ -213,7 +213,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> { ...@@ -213,7 +213,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
// used for linked list // used for linked list
MemRecorder* _next; MemRecorder* _next;
// active recorder can only record a certain generation data // active recorder can only record a certain generation data
debug_only(unsigned long _generation;) unsigned long _generation;
protected: protected:
_NOINLINE_ MemRecorder(); _NOINLINE_ MemRecorder();
...@@ -251,6 +251,8 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> { ...@@ -251,6 +251,8 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
SequencedRecordIterator pointer_itr(); SequencedRecordIterator pointer_itr();
// return the generation of this recorder which it belongs to
unsigned long get_generation() const { return _generation; }
protected: protected:
// number of MemRecorder instance // number of MemRecorder instance
static volatile jint _instance_count; static volatile jint _instance_count;
...@@ -263,7 +265,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> { ...@@ -263,7 +265,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
static int sort_record_fn(const void* e1, const void* e2); static int sort_record_fn(const void* e1, const void* e2);
debug_only(void check_dup_seq(jint seq) const;) debug_only(void check_dup_seq(jint seq) const;)
debug_only(void set_generation();) void set_generation();
}; };
#endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP #endif // SHARE_VM_SERVICES_MEM_RECORDER_HPP
...@@ -91,6 +91,8 @@ void MemTrackWorker::run() { ...@@ -91,6 +91,8 @@ void MemTrackWorker::run() {
MemSnapshot* snapshot = MemTracker::get_snapshot(); MemSnapshot* snapshot = MemTracker::get_snapshot();
assert(snapshot != NULL, "Worker should not be started"); assert(snapshot != NULL, "Worker should not be started");
MemRecorder* rec; MemRecorder* rec;
unsigned long processing_generation = 0;
bool worker_idle = false;
while (!MemTracker::shutdown_in_progress()) { while (!MemTracker::shutdown_in_progress()) {
NOT_PRODUCT(_last_gen_in_use = generations_in_use();) NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
...@@ -100,6 +102,12 @@ void MemTrackWorker::run() { ...@@ -100,6 +102,12 @@ void MemTrackWorker::run() {
rec = _gen[_head].next_recorder(); rec = _gen[_head].next_recorder();
} }
if (rec != NULL) { if (rec != NULL) {
if (rec->get_generation() != processing_generation || worker_idle) {
processing_generation = rec->get_generation();
worker_idle = false;
MemTracker::set_current_processing_generation(processing_generation);
}
// merge the recorder into staging area // merge the recorder into staging area
if (!snapshot->merge(rec)) { if (!snapshot->merge(rec)) {
MemTracker::shutdown(MemTracker::NMT_out_of_memory); MemTracker::shutdown(MemTracker::NMT_out_of_memory);
...@@ -129,6 +137,9 @@ void MemTrackWorker::run() { ...@@ -129,6 +137,9 @@ void MemTrackWorker::run() {
MemTracker::shutdown(MemTracker::NMT_out_of_memory); MemTracker::shutdown(MemTracker::NMT_out_of_memory);
} }
} else { } else {
// worker thread is idle
worker_idle = true;
MemTracker::report_worker_idle();
snapshot->wait(1000); snapshot->wait(1000);
ThreadCritical tc; ThreadCritical tc;
// check if more data arrived // check if more data arrived
......
...@@ -107,6 +107,7 @@ class MemTrackWorker : public NamedThread { ...@@ -107,6 +107,7 @@ class MemTrackWorker : public NamedThread {
NOT_PRODUCT(int _merge_count;) NOT_PRODUCT(int _merge_count;)
NOT_PRODUCT(int _last_gen_in_use;) NOT_PRODUCT(int _last_gen_in_use;)
// how many generations are queued
inline int generations_in_use() const { inline int generations_in_use() const {
return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1)); return (_tail >= _head ? (_tail - _head + 1) : (MAX_GENERATIONS - (_head - _tail) + 1));
} }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp" #include "runtime/safepoint.hpp"
#include "runtime/threadCritical.hpp" #include "runtime/threadCritical.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memPtr.hpp" #include "services/memPtr.hpp"
#include "services/memReporter.hpp" #include "services/memReporter.hpp"
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
...@@ -65,6 +66,8 @@ volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; ...@@ -65,6 +66,8 @@ volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited;
MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none;
int MemTracker::_thread_count = 255; int MemTracker::_thread_count = 255;
volatile jint MemTracker::_pooled_recorder_count = 0; volatile jint MemTracker::_pooled_recorder_count = 0;
volatile unsigned long MemTracker::_processing_generation = 0;
volatile bool MemTracker::_worker_thread_idle = false;
debug_only(intx MemTracker::_main_thread_tid = 0;) debug_only(intx MemTracker::_main_thread_tid = 0;)
NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;)
...@@ -279,7 +282,7 @@ MemRecorder* MemTracker::get_new_or_pooled_instance() { ...@@ -279,7 +282,7 @@ MemRecorder* MemTracker::get_new_or_pooled_instance() {
} }
cur_head->set_next(NULL); cur_head->set_next(NULL);
Atomic::dec(&_pooled_recorder_count); Atomic::dec(&_pooled_recorder_count);
debug_only(cur_head->set_generation();) cur_head->set_generation();
return cur_head; return cur_head;
} }
} }
...@@ -570,6 +573,51 @@ bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool sum ...@@ -570,6 +573,51 @@ bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool sum
return false; return false;
} }
// Whitebox API for blocking until the current generation of NMT data has been merged
bool MemTracker::wbtest_wait_for_data_merge() {
// NMT can't be shutdown while we're holding _query_lock
MutexLockerEx lock(_query_lock, true);
assert(_worker_thread != NULL, "Invalid query");
// the generation at query time, so NMT will spin till this generation is processed
unsigned long generation_at_query_time = SequenceGenerator::current_generation();
unsigned long current_processing_generation = _processing_generation;
// if generation counter overflown
bool generation_overflown = (generation_at_query_time < current_processing_generation);
long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
// spin
while (!shutdown_in_progress()) {
if (!generation_overflown) {
if (current_processing_generation > generation_at_query_time) {
return true;
}
} else {
assert(generations_to_wrap >= 0, "Sanity check");
long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
assert(current_generations_to_wrap >= 0, "Sanity check");
// to overflow an unsigned long should take long time, so to_wrap check should be sufficient
if (current_generations_to_wrap > generations_to_wrap &&
current_processing_generation > generation_at_query_time) {
return true;
}
}
// if worker thread is idle, but generation is not advancing, that means
// there is not safepoint to let NMT advance generation, force one.
if (_worker_thread_idle) {
VM_ForceSafepoint vfs;
VMThread::execute(&vfs);
}
MemSnapshot* snapshot = get_snapshot();
if (snapshot == NULL) {
return false;
}
snapshot->wait(1000);
current_processing_generation = _processing_generation;
}
// We end up here if NMT is shutting down before our data has been merged
return false;
}
// compare memory usage between current snapshot and baseline // compare memory usage between current snapshot and baseline
bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
MutexLockerEx lock(_query_lock, true); MutexLockerEx lock(_query_lock, true);
......
...@@ -91,9 +91,10 @@ class MemTracker : AllStatic { ...@@ -91,9 +91,10 @@ class MemTracker : AllStatic {
static bool compare_memory_usage(BaselineOutputer& out, size_t unit, static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true) { } bool summary_only = true) { }
static bool wbtest_wait_for_data_merge() { }
static inline void sync() { } static inline void sync() { }
static inline void thread_exiting(JavaThread* thread) { } static inline void thread_exiting(JavaThread* thread) { }
}; };
...@@ -111,6 +112,10 @@ class MemTracker : AllStatic { ...@@ -111,6 +112,10 @@ class MemTracker : AllStatic {
extern bool NMT_track_callsite; extern bool NMT_track_callsite;
#ifndef MAX_UNSIGNED_LONG
#define MAX_UNSIGNED_LONG (unsigned long)(-1)
#endif
#ifdef ASSERT #ifdef ASSERT
#define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) #define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#else #else
...@@ -380,6 +385,11 @@ class MemTracker : AllStatic { ...@@ -380,6 +385,11 @@ class MemTracker : AllStatic {
static bool compare_memory_usage(BaselineOutputer& out, size_t unit, static bool compare_memory_usage(BaselineOutputer& out, size_t unit,
bool summary_only = true); bool summary_only = true);
// the version for whitebox testing support, it ensures that all memory
// activities before this method call, are reflected in the snapshot
// database.
static bool wbtest_wait_for_data_merge();
// sync is called within global safepoint to synchronize nmt data // sync is called within global safepoint to synchronize nmt data
static void sync(); static void sync();
...@@ -432,6 +442,15 @@ class MemTracker : AllStatic { ...@@ -432,6 +442,15 @@ class MemTracker : AllStatic {
static void create_record_in_recorder(address addr, MEMFLAGS type, static void create_record_in_recorder(address addr, MEMFLAGS type,
size_t size, address pc, JavaThread* thread); size_t size, address pc, JavaThread* thread);
static void set_current_processing_generation(unsigned long generation) {
_worker_thread_idle = false;
_processing_generation = generation;
}
static void report_worker_idle() {
_worker_thread_idle = true;
}
private: private:
// global memory snapshot // global memory snapshot
static MemSnapshot* _snapshot; static MemSnapshot* _snapshot;
...@@ -483,6 +502,11 @@ class MemTracker : AllStatic { ...@@ -483,6 +502,11 @@ class MemTracker : AllStatic {
static volatile enum NMTStates _state; static volatile enum NMTStates _state;
// the reason for shutting down nmt // the reason for shutting down nmt
static enum ShutdownReason _reason; static enum ShutdownReason _reason;
// the generation that NMT is processing
static volatile unsigned long _processing_generation;
// although NMT is still procesing current generation, but
// there is not more recorder to process, set idle state
static volatile bool _worker_thread_idle;
}; };
#endif // !INCLUDE_NMT #endif // !INCLUDE_NMT
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册