提交 8482aaa3 编写于 作者: Z zgu

Merge

...@@ -161,6 +161,8 @@ HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end, ...@@ -161,6 +161,8 @@ HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
#endif // ndef DTRACE_ENABLED #endif // ndef DTRACE_ENABLED
volatile int InstanceKlass::_total_instanceKlass_count = 0;
Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data, Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data,
int vtable_len, int vtable_len,
int itable_len, int itable_len,
...@@ -204,6 +206,7 @@ Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data, ...@@ -204,6 +206,7 @@ Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data,
access_flags, !host_klass.is_null()); access_flags, !host_klass.is_null());
} }
Atomic::inc(&_total_instanceKlass_count);
return ik; return ik;
} }
...@@ -2331,6 +2334,9 @@ void InstanceKlass::release_C_heap_structures() { ...@@ -2331,6 +2334,9 @@ void InstanceKlass::release_C_heap_structures() {
if (_array_name != NULL) _array_name->decrement_refcount(); if (_array_name != NULL) _array_name->decrement_refcount();
if (_source_file_name != NULL) _source_file_name->decrement_refcount(); if (_source_file_name != NULL) _source_file_name->decrement_refcount();
if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass); if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass);
assert(_total_instanceKlass_count >= 1, "Sanity check");
Atomic::dec(&_total_instanceKlass_count);
} }
void InstanceKlass::set_source_file_name(Symbol* n) { void InstanceKlass::set_source_file_name(Symbol* n) {
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "oops/fieldInfo.hpp" #include "oops/fieldInfo.hpp"
#include "oops/instanceOop.hpp" #include "oops/instanceOop.hpp"
#include "oops/klassVtable.hpp" #include "oops/klassVtable.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.hpp" #include "runtime/handles.hpp"
#include "runtime/os.hpp" #include "runtime/os.hpp"
#include "utilities/accessFlags.hpp" #include "utilities/accessFlags.hpp"
...@@ -170,6 +171,11 @@ class InstanceKlass: public Klass { ...@@ -170,6 +171,11 @@ class InstanceKlass: public Klass {
initialization_error // error happened during initialization initialization_error // error happened during initialization
}; };
static int number_of_instance_classes() { return _total_instanceKlass_count; }
private:
static volatile int _total_instanceKlass_count;
protected: protected:
// Protection domain. // Protection domain.
oop _protection_domain; oop _protection_domain;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
* *
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "services/memBaseline.hpp" #include "services/memBaseline.hpp"
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
...@@ -349,7 +348,7 @@ bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) { ...@@ -349,7 +348,7 @@ bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
reset(); reset();
_baselined = baseline_malloc_summary(snapshot._alloc_ptrs) && _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
baseline_vm_summary(snapshot._vm_ptrs); baseline_vm_summary(snapshot._vm_ptrs);
_number_of_classes = SystemDictionary::number_of_classes(); _number_of_classes = snapshot.number_of_classes();
if (!summary_only && MemTracker::track_callsite() && _baselined) { if (!summary_only && MemTracker::track_callsite() && _baselined) {
_baselined = baseline_malloc_details(snapshot._alloc_ptrs) && _baselined = baseline_malloc_details(snapshot._alloc_ptrs) &&
......
...@@ -84,10 +84,13 @@ MemRecorder::~MemRecorder() { ...@@ -84,10 +84,13 @@ MemRecorder::~MemRecorder() {
} }
delete _pointer_records; delete _pointer_records;
} }
if (_next != NULL) { // delete all linked recorders
delete _next; while (_next != NULL) {
MemRecorder* tmp = _next;
_next = _next->next();
tmp->set_next(NULL);
delete tmp;
} }
Atomic::dec(&_instance_count); Atomic::dec(&_instance_count);
} }
......
...@@ -203,6 +203,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> { ...@@ -203,6 +203,7 @@ class MemRecorder : public CHeapObj<mtNMT|otNMTRecorder> {
friend class MemSnapshot; friend class MemSnapshot;
friend class MemTracker; friend class MemTracker;
friend class MemTrackWorker; friend class MemTrackWorker;
friend class GenerationData;
protected: protected:
// the array that holds memory records // the array that holds memory records
......
...@@ -384,6 +384,7 @@ MemSnapshot::MemSnapshot() { ...@@ -384,6 +384,7 @@ MemSnapshot::MemSnapshot() {
_staging_area.init(); _staging_area.init();
_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock"); _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
NOT_PRODUCT(_untracked_count = 0;) NOT_PRODUCT(_untracked_count = 0;)
_number_of_classes = 0;
} }
MemSnapshot::~MemSnapshot() { MemSnapshot::~MemSnapshot() {
...@@ -479,7 +480,7 @@ bool MemSnapshot::merge(MemRecorder* rec) { ...@@ -479,7 +480,7 @@ bool MemSnapshot::merge(MemRecorder* rec) {
// promote data to next generation // promote data to next generation
bool MemSnapshot::promote() { bool MemSnapshot::promote(int number_of_classes) {
assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check"); assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL, assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
"Just check"); "Just check");
...@@ -496,6 +497,7 @@ bool MemSnapshot::promote() { ...@@ -496,6 +497,7 @@ bool MemSnapshot::promote() {
NOT_PRODUCT(check_malloc_pointers();) NOT_PRODUCT(check_malloc_pointers();)
_staging_area.clear(); _staging_area.clear();
_number_of_classes = number_of_classes;
return promoted; return promoted;
} }
......
...@@ -355,6 +355,9 @@ class MemSnapshot : public CHeapObj<mtNMT> { ...@@ -355,6 +355,9 @@ class MemSnapshot : public CHeapObj<mtNMT> {
// the lock to protect this snapshot // the lock to protect this snapshot
Monitor* _lock; Monitor* _lock;
// the number of instance classes
int _number_of_classes;
NOT_PRODUCT(size_t _untracked_count;) NOT_PRODUCT(size_t _untracked_count;)
friend class MemBaseline; friend class MemBaseline;
...@@ -375,8 +378,9 @@ class MemSnapshot : public CHeapObj<mtNMT> { ...@@ -375,8 +378,9 @@ class MemSnapshot : public CHeapObj<mtNMT> {
// merge a per-thread memory recorder into staging area // merge a per-thread memory recorder into staging area
bool merge(MemRecorder* rec); bool merge(MemRecorder* rec);
// promote staged data to snapshot // promote staged data to snapshot
bool promote(); bool promote(int number_of_classes);
int number_of_classes() const { return _number_of_classes; }
void wait(long timeout) { void wait(long timeout) {
assert(_lock != NULL, "Just check"); assert(_lock != NULL, "Just check");
......
...@@ -29,6 +29,16 @@ ...@@ -29,6 +29,16 @@
#include "utilities/decoder.hpp" #include "utilities/decoder.hpp"
#include "utilities/vmError.hpp" #include "utilities/vmError.hpp"
void GenerationData::reset() {
_number_of_classes = 0;
while (_recorder_list != NULL) {
MemRecorder* tmp = _recorder_list;
_recorder_list = _recorder_list->next();
MemTracker::release_thread_recorder(tmp);
}
}
MemTrackWorker::MemTrackWorker() { MemTrackWorker::MemTrackWorker() {
// create thread uses cgc thread type for now. We should revisit // create thread uses cgc thread type for now. We should revisit
// the option, or create new thread type. // the option, or create new thread type.
...@@ -39,7 +49,7 @@ MemTrackWorker::MemTrackWorker() { ...@@ -39,7 +49,7 @@ MemTrackWorker::MemTrackWorker() {
if (!has_error()) { if (!has_error()) {
_head = _tail = 0; _head = _tail = 0;
for(int index = 0; index < MAX_GENERATIONS; index ++) { for(int index = 0; index < MAX_GENERATIONS; index ++) {
_gen[index] = NULL; ::new ((void*)&_gen[index]) GenerationData();
} }
} }
NOT_PRODUCT(_sync_point_count = 0;) NOT_PRODUCT(_sync_point_count = 0;)
...@@ -49,10 +59,7 @@ MemTrackWorker::MemTrackWorker() { ...@@ -49,10 +59,7 @@ MemTrackWorker::MemTrackWorker() {
MemTrackWorker::~MemTrackWorker() { MemTrackWorker::~MemTrackWorker() {
for (int index = 0; index < MAX_GENERATIONS; index ++) { for (int index = 0; index < MAX_GENERATIONS; index ++) {
MemRecorder* rc = _gen[index]; _gen[index].reset();
if (rc != NULL) {
delete rc;
}
} }
} }
...@@ -90,12 +97,7 @@ void MemTrackWorker::run() { ...@@ -90,12 +97,7 @@ void MemTrackWorker::run() {
{ {
// take a recorder from earliest generation in buffer // take a recorder from earliest generation in buffer
ThreadCritical tc; ThreadCritical tc;
rec = _gen[_head]; rec = _gen[_head].next_recorder();
if (rec != NULL) {
_gen[_head] = rec->next();
}
assert(count_recorder(_gen[_head]) <= MemRecorder::_instance_count,
"infinite loop after dequeue");
} }
if (rec != NULL) { if (rec != NULL) {
// merge the recorder into staging area // merge the recorder into staging area
...@@ -109,16 +111,20 @@ void MemTrackWorker::run() { ...@@ -109,16 +111,20 @@ void MemTrackWorker::run() {
// no more recorder to merge, promote staging area // no more recorder to merge, promote staging area
// to snapshot // to snapshot
if (_head != _tail) { if (_head != _tail) {
long number_of_classes;
{ {
ThreadCritical tc; ThreadCritical tc;
if (_gen[_head] != NULL || _head == _tail) { if (_gen[_head].has_more_recorder() || _head == _tail) {
continue; continue;
} }
number_of_classes = _gen[_head].number_of_classes();
_gen[_head].reset();
// done with this generation, increment _head pointer // done with this generation, increment _head pointer
_head = (_head + 1) % MAX_GENERATIONS; _head = (_head + 1) % MAX_GENERATIONS;
} }
// promote this generation data to snapshot // promote this generation data to snapshot
if (!snapshot->promote()) { if (!snapshot->promote(number_of_classes)) {
// failed to promote, means out of memory // failed to promote, means out of memory
MemTracker::shutdown(MemTracker::NMT_out_of_memory); MemTracker::shutdown(MemTracker::NMT_out_of_memory);
} }
...@@ -126,8 +132,8 @@ void MemTrackWorker::run() { ...@@ -126,8 +132,8 @@ void MemTrackWorker::run() {
snapshot->wait(1000); snapshot->wait(1000);
ThreadCritical tc; ThreadCritical tc;
// check if more data arrived // check if more data arrived
if (_gen[_head] == NULL) { if (!_gen[_head].has_more_recorder()) {
_gen[_head] = MemTracker::get_pending_recorders(); _gen[_head].add_recorders(MemTracker::get_pending_recorders());
} }
} }
} }
...@@ -147,7 +153,7 @@ void MemTrackWorker::run() { ...@@ -147,7 +153,7 @@ void MemTrackWorker::run() {
// 1. add all recorders in pending queue to current generation // 1. add all recorders in pending queue to current generation
// 2. increase generation // 2. increase generation
void MemTrackWorker::at_sync_point(MemRecorder* rec) { void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
NOT_PRODUCT(_sync_point_count ++;) NOT_PRODUCT(_sync_point_count ++;)
assert(count_recorder(rec) <= MemRecorder::_instance_count, assert(count_recorder(rec) <= MemRecorder::_instance_count,
"pending queue has infinite loop"); "pending queue has infinite loop");
...@@ -155,23 +161,15 @@ void MemTrackWorker::at_sync_point(MemRecorder* rec) { ...@@ -155,23 +161,15 @@ void MemTrackWorker::at_sync_point(MemRecorder* rec) {
bool out_of_generation_buffer = false; bool out_of_generation_buffer = false;
// check shutdown state inside ThreadCritical // check shutdown state inside ThreadCritical
if (MemTracker::shutdown_in_progress()) return; if (MemTracker::shutdown_in_progress()) return;
_gen[_tail].set_number_of_classes(number_of_classes);
// append the recorders to the end of the generation // append the recorders to the end of the generation
if( rec != NULL) { _gen[_tail].add_recorders(rec);
MemRecorder* cur_head = _gen[_tail]; assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
if (cur_head == NULL) {
_gen[_tail] = rec;
} else {
while (cur_head->next() != NULL) {
cur_head = cur_head->next();
}
cur_head->set_next(rec);
}
}
assert(count_recorder(rec) <= MemRecorder::_instance_count,
"after add to current generation has infinite loop"); "after add to current generation has infinite loop");
// we have collected all recorders for this generation. If there is data, // we have collected all recorders for this generation. If there is data,
// we need to increment _tail to start a new generation. // we need to increment _tail to start a new generation.
if (_gen[_tail] != NULL || _head == _tail) { if (_gen[_tail].has_more_recorder() || _head == _tail) {
_tail = (_tail + 1) % MAX_GENERATIONS; _tail = (_tail + 1) % MAX_GENERATIONS;
out_of_generation_buffer = (_tail == _head); out_of_generation_buffer = (_tail == _head);
} }
...@@ -194,7 +192,7 @@ int MemTrackWorker::count_recorder(const MemRecorder* head) { ...@@ -194,7 +192,7 @@ int MemTrackWorker::count_recorder(const MemRecorder* head) {
int MemTrackWorker::count_pending_recorders() const { int MemTrackWorker::count_pending_recorders() const {
int count = 0; int count = 0;
for (int index = 0; index < MAX_GENERATIONS; index ++) { for (int index = 0; index < MAX_GENERATIONS; index ++) {
MemRecorder* head = _gen[index]; MemRecorder* head = _gen[index].peek();
if (head != NULL) { if (head != NULL) {
count += count_recorder(head); count += count_recorder(head);
} }
......
...@@ -32,14 +32,55 @@ ...@@ -32,14 +32,55 @@
// Maximum MAX_GENERATIONS generation data can be tracked. // Maximum MAX_GENERATIONS generation data can be tracked.
#define MAX_GENERATIONS 512 #define MAX_GENERATIONS 512
class GenerationData : public _ValueObj {
private:
int _number_of_classes;
MemRecorder* _recorder_list;
public:
GenerationData(): _number_of_classes(0), _recorder_list(NULL) { }
inline int number_of_classes() const { return _number_of_classes; }
inline void set_number_of_classes(long num) { _number_of_classes = num; }
inline MemRecorder* next_recorder() {
if (_recorder_list == NULL) {
return NULL;
} else {
MemRecorder* tmp = _recorder_list;
_recorder_list = _recorder_list->next();
return tmp;
}
}
inline bool has_more_recorder() const {
return (_recorder_list != NULL);
}
// add recorders to this generation
void add_recorders(MemRecorder* head) {
if (head != NULL) {
if (_recorder_list == NULL) {
_recorder_list = head;
} else {
MemRecorder* tmp = _recorder_list;
for (; tmp->next() != NULL; tmp = tmp->next());
tmp->set_next(head);
}
}
}
void reset();
NOT_PRODUCT(MemRecorder* peek() const { return _recorder_list; })
};
class MemTrackWorker : public NamedThread { class MemTrackWorker : public NamedThread {
private: private:
// circular buffer. This buffer contains recorders to be merged into global // circular buffer. This buffer contains generation data to be merged into global
// snaphsot. // snaphsot.
// Each slot holds a linked list of memory recorders, that contains one // Each slot holds a generation
// generation of memory data. GenerationData _gen[MAX_GENERATIONS];
MemRecorder* _gen[MAX_GENERATIONS];
int _head, _tail; // head and tail pointers to above circular buffer int _head, _tail; // head and tail pointers to above circular buffer
bool _has_error; bool _has_error;
...@@ -56,7 +97,7 @@ class MemTrackWorker : public NamedThread { ...@@ -56,7 +97,7 @@ class MemTrackWorker : public NamedThread {
inline bool has_error() const { return _has_error; } inline bool has_error() const { return _has_error; }
// task at synchronization point // task at synchronization point
void at_sync_point(MemRecorder* pending_recorders); void at_sync_point(MemRecorder* pending_recorders, int number_of_classes);
// for debugging purpose, they are not thread safe. // for debugging purpose, they are not thread safe.
NOT_PRODUCT(static int count_recorder(const MemRecorder* head);) NOT_PRODUCT(static int count_recorder(const MemRecorder* head);)
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "oops/instanceKlass.hpp"
#include "runtime/atomic.hpp" #include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.hpp" #include "runtime/interfaceSupport.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
...@@ -485,7 +486,7 @@ void MemTracker::sync() { ...@@ -485,7 +486,7 @@ void MemTracker::sync() {
} }
// check _worker_thread with lock to avoid racing condition // check _worker_thread with lock to avoid racing condition
if (_worker_thread != NULL) { if (_worker_thread != NULL) {
_worker_thread->at_sync_point(pending_recorders); _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
} }
assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
......
...@@ -142,6 +142,7 @@ class Thread; ...@@ -142,6 +142,7 @@ class Thread;
* MemTracker is the 'gate' class to native memory tracking runtime. * MemTracker is the 'gate' class to native memory tracking runtime.
*/ */
class MemTracker : AllStatic { class MemTracker : AllStatic {
friend class GenerationData;
friend class MemTrackWorker; friend class MemTrackWorker;
friend class MemSnapshot; friend class MemSnapshot;
friend class SyncThreadRecorderClosure; friend class SyncThreadRecorderClosure;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册