提交 eabcae02 编写于 作者: I iklam

8014912: Restore PrintSharedSpaces functionality after NPG

Summary: Added dumping of object sizes in CDS archive, sorted by MetaspaceObj::Type
Reviewed-by: coleenp, acorn
上级 01508cd1
......@@ -60,10 +60,11 @@ void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0;
void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size, bool read_only, TRAPS) {
size_t word_size, bool read_only,
MetaspaceObj::Type type, TRAPS) {
// Klass has it's own operator new
return Metaspace::allocate(loader_data, word_size, read_only,
Metaspace::NonClassType, CHECK_NULL);
type, CHECK_NULL);
}
bool MetaspaceObj::is_shared() const {
......
......@@ -268,8 +268,55 @@ class MetaspaceObj {
bool is_shared() const;
void print_address_on(outputStream* st) const; // nonvirtual address printing
#define METASPACE_OBJ_TYPES_DO(f) \
f(Unknown) \
f(Class) \
f(Symbol) \
f(TypeArrayU1) \
f(TypeArrayU2) \
f(TypeArrayU4) \
f(TypeArrayU8) \
f(TypeArrayOther) \
f(Method) \
f(ConstMethod) \
f(MethodData) \
f(ConstantPool) \
f(ConstantPoolCache) \
f(Annotation) \
f(MethodCounters)
#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
_number_of_types
};
static const char * type_name(Type type) {
switch(type) {
METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return NULL;
}
}
static MetaspaceObj::Type array_type(size_t elem_size) {
switch (elem_size) {
case 1: return TypeArrayU1Type;
case 2: return TypeArrayU2Type;
case 4: return TypeArrayU4Type;
case 8: return TypeArrayU8Type;
default:
return TypeArrayOtherType;
}
}
void* operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size, bool read_only, Thread* thread);
size_t word_size, bool read_only,
Type type, Thread* thread);
// can't use TRAPS from this header file.
void operator delete(void* p) { ShouldNotCallThis(); }
};
......
......@@ -713,6 +713,23 @@ class SpaceManager : public CHeapObj<mtClass> {
#ifdef ASSERT
void verify_allocated_blocks_words();
#endif
size_t get_raw_word_size(size_t word_size) {
// If only the dictionary is going to be used (i.e., no
// indexed free list), then there is a minimum size requirement.
// MinChunkSize is a placeholder for the real minimum size JJJ
size_t byte_size = word_size * BytesPerWord;
size_t byte_size_with_overhead = byte_size + Metablock::overhead();
size_t raw_bytes_size = MAX2(byte_size_with_overhead,
Metablock::min_block_byte_size());
raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
size_t raw_word_size = raw_bytes_size / BytesPerWord;
assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
return raw_word_size;
}
};
uint const SpaceManager::_small_chunk_limit = 4;
......@@ -2320,19 +2337,7 @@ Metachunk* SpaceManager::get_new_chunk(size_t word_size,
MetaWord* SpaceManager::allocate(size_t word_size) {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
// If only the dictionary is going to be used (i.e., no
// indexed free list), then there is a minimum size requirement.
// MinChunkSize is a placeholder for the real minimum size JJJ
size_t byte_size = word_size * BytesPerWord;
size_t byte_size_with_overhead = byte_size + Metablock::overhead();
size_t raw_bytes_size = MAX2(byte_size_with_overhead,
Metablock::min_block_byte_size());
raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
size_t raw_word_size = raw_bytes_size / BytesPerWord;
assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem");
size_t raw_word_size = get_raw_word_size(word_size);
BlockFreelist* fl = block_freelists();
MetaWord* p = NULL;
// Allocation from the dictionary is expensive in the sense that
......@@ -2896,6 +2901,9 @@ void Metaspace::initialize(Mutex* lock,
if (class_chunk != NULL) {
class_vsm()->add_chunk(class_chunk, true);
}
_alloc_record_head = NULL;
_alloc_record_tail = NULL;
}
size_t Metaspace::align_word_size_up(size_t word_size) {
......@@ -3000,12 +3008,14 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
}
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
bool read_only, MetadataType mdtype, TRAPS) {
bool read_only, MetaspaceObj::Type type, TRAPS) {
if (HAS_PENDING_EXCEPTION) {
assert(false, "Should not allocate with exception pending");
return NULL; // caller does a CHECK_NULL too
}
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
// SSS: Should we align the allocations and make sure the sizes are aligned.
MetaWord* result = NULL;
......@@ -3015,13 +3025,13 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
// with the SymbolTable_lock. Dumping is single threaded for now. We'll have
// to revisit this for application class data sharing.
if (DumpSharedSpaces) {
if (read_only) {
result = loader_data->ro_metaspace()->allocate(word_size, NonClassType);
} else {
result = loader_data->rw_metaspace()->allocate(word_size, NonClassType);
}
assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
result = space->allocate(word_size, NonClassType);
if (result == NULL) {
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
} else {
space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
}
return Metablock::initialize(result, word_size);
}
......@@ -3056,6 +3066,38 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
return Metablock::initialize(result, word_size);
}
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
assert(DumpSharedSpaces, "sanity");
AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
if (_alloc_record_head == NULL) {
_alloc_record_head = _alloc_record_tail = rec;
} else {
_alloc_record_tail->_next = rec;
_alloc_record_tail = rec;
}
}
void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
address last_addr = (address)bottom();
for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
address ptr = rec->_ptr;
if (last_addr < ptr) {
closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
}
closure->doit(ptr, rec->_type, rec->_byte_size);
last_addr = ptr + rec->_byte_size;
}
address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
if (last_addr < top) {
closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
}
}
void Metaspace::purge() {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
......
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -127,6 +127,23 @@ class Metaspace : public CHeapObj<mtClass> {
static VirtualSpaceList* space_list() { return _space_list; }
static VirtualSpaceList* class_space_list() { return _class_space_list; }
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
class AllocRecord : public CHeapObj<mtClass> {
public:
AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
: _next(NULL), _ptr(ptr), _type(type), _byte_size(byte_size) {}
AllocRecord *_next;
address _ptr;
MetaspaceObj::Type _type;
int _byte_size;
};
AllocRecord * _alloc_record_head;
AllocRecord * _alloc_record_tail;
public:
Metaspace(Mutex* lock, MetaspaceType type);
......@@ -148,8 +165,8 @@ class Metaspace : public CHeapObj<mtClass> {
size_t used_bytes_slow(MetadataType mdtype) const;
size_t capacity_bytes_slow(MetadataType mdtype) const;
static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
bool read_only, MetadataType mdtype, TRAPS);
static Metablock* allocate(ClassLoaderData* loader_data, size_t word_size,
bool read_only, MetaspaceObj::Type type, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
MetaWord* expand_and_allocate(size_t size,
......@@ -166,6 +183,13 @@ class Metaspace : public CHeapObj<mtClass> {
void print_on(outputStream* st) const;
// Debugging support
void verify();
class AllocRecordClosure : public StackObj {
public:
virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) = 0;
};
void iterate(AllocRecordClosure *closure);
};
class MetaspaceAux : AllStatic {
......
......@@ -243,6 +243,147 @@ public:
bool reading() const { return false; }
};
// This is for dumping detailed statistics for the allocations
// in the shared spaces.
class DumpAllocClosure : public Metaspace::AllocRecordClosure {
public:
// Here's poor man's enum inheritance
#define SHAREDSPACE_OBJ_TYPES_DO(f) \
METASPACE_OBJ_TYPES_DO(f) \
f(SymbolHashentry) \
f(SymbolBuckets) \
f(Other)
#define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type,
#define SHAREDSPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
enum Type {
// Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_DECLARE)
_number_of_types
};
static const char * type_name(Type type) {
switch(type) {
SHAREDSPACE_OBJ_TYPES_DO(SHAREDSPACE_OBJ_TYPE_NAME_CASE)
default:
ShouldNotReachHere();
return NULL;
}
}
public:
enum {
RO = 0,
RW = 1
};
int _counts[2][_number_of_types];
int _bytes [2][_number_of_types];
int _which;
DumpAllocClosure() {
memset(_counts, 0, sizeof(_counts));
memset(_bytes, 0, sizeof(_bytes));
};
void iterate_metaspace(Metaspace* space, int which) {
assert(which == RO || which == RW, "sanity");
_which = which;
space->iterate(this);
}
virtual void doit(address ptr, MetaspaceObj::Type type, int byte_size) {
assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
_counts[_which][type] ++;
_bytes [_which][type] += byte_size;
}
void dump_stats(int ro_all, int rw_all, int md_all, int mc_all);
};
void DumpAllocClosure::dump_stats(int ro_all, int rw_all, int md_all, int mc_all) {
rw_all += (md_all + mc_all); // md and mc are all mapped Read/Write
int other_bytes = md_all + mc_all;
// Calculate size of data that was not allocated by Metaspace::allocate()
int symbol_count = _counts[RO][MetaspaceObj::SymbolType];
int symhash_bytes = symbol_count * sizeof (HashtableEntry<Symbol*, mtSymbol>);
int symbuck_count = SymbolTable::the_table()->table_size();
int symbuck_bytes = symbuck_count * sizeof(HashtableBucket<mtSymbol>);
_counts[RW][SymbolHashentryType] = symbol_count;
_bytes [RW][SymbolHashentryType] = symhash_bytes;
other_bytes -= symhash_bytes;
_counts[RW][SymbolBucketsType] = symbuck_count;
_bytes [RW][SymbolBucketsType] = symbuck_bytes;
other_bytes -= symbuck_bytes;
// TODO: count things like dictionary, vtable, etc
_bytes[RW][OtherType] = other_bytes;
// prevent divide-by-zero
if (ro_all < 1) {
ro_all = 1;
}
if (rw_all < 1) {
rw_all = 1;
}
int all_ro_count = 0;
int all_ro_bytes = 0;
int all_rw_count = 0;
int all_rw_bytes = 0;
const char *fmt = "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f";
const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
tty->print_cr("Detailed metadata info (rw includes md and mc):");
tty->print_cr(hdr);
tty->print_cr(sep);
for (int type = 0; type < int(_number_of_types); type ++) {
const char *name = type_name((Type)type);
int ro_count = _counts[RO][type];
int ro_bytes = _bytes [RO][type];
int rw_count = _counts[RW][type];
int rw_bytes = _bytes [RW][type];
int count = ro_count + rw_count;
int bytes = ro_bytes + rw_bytes;
double ro_perc = 100.0 * double(ro_bytes) / double(ro_all);
double rw_perc = 100.0 * double(rw_bytes) / double(rw_all);
double perc = 100.0 * double(bytes) / double(ro_all + rw_all);
tty->print_cr(fmt, name,
ro_count, ro_bytes, ro_perc,
rw_count, rw_bytes, rw_perc,
count, bytes, perc);
all_ro_count += ro_count;
all_ro_bytes += ro_bytes;
all_rw_count += rw_count;
all_rw_bytes += rw_bytes;
}
int all_count = all_ro_count + all_rw_count;
int all_bytes = all_ro_bytes + all_rw_bytes;
double all_ro_perc = 100.0 * double(all_ro_bytes) / double(ro_all);
double all_rw_perc = 100.0 * double(all_rw_bytes) / double(rw_all);
double all_perc = 100.0 * double(all_bytes) / double(ro_all + rw_all);
tty->print_cr(sep);
tty->print_cr(fmt, "Total",
all_ro_count, all_ro_bytes, all_ro_perc,
all_rw_count, all_rw_bytes, all_rw_perc,
all_count, all_bytes, all_perc);
assert(all_ro_bytes == ro_all, "everything should have been counted");
assert(all_rw_bytes == rw_all, "everything should have been counted");
}
// Populate the shared space.
......@@ -454,6 +595,14 @@ void VM_PopulateDumpSharedSpace::doit() {
mapinfo->close();
memmove(vtbl_list, saved_vtbl, vtbl_list_size * sizeof(void*));
if (PrintSharedSpaces) {
DumpAllocClosure dac;
dac.iterate_metaspace(_loader_data->ro_metaspace(), DumpAllocClosure::RO);
dac.iterate_metaspace(_loader_data->rw_metaspace(), DumpAllocClosure::RW);
dac.dump_stats(int(ro_bytes), int(rw_bytes), int(md_bytes), int(mc_bytes));
}
}
static void link_shared_classes(Klass* obj, TRAPS) {
......
......@@ -33,7 +33,7 @@
// Allocate annotations in metadata area
Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
return new (loader_data, size(), true, THREAD) Annotations();
return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations();
}
// helper
......
......@@ -40,7 +40,7 @@ ConstMethod* ConstMethod::allocate(ClassLoaderData* loader_data,
MethodType method_type,
TRAPS) {
int size = ConstMethod::size(byte_code_size, sizes);
return new (loader_data, size, true, THREAD) ConstMethod(
return new (loader_data, size, true, MetaspaceObj::ConstMethodType, THREAD) ConstMethod(
byte_code_size, sizes, method_type, size);
}
......
......@@ -55,7 +55,7 @@ ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, T
// the resolved_references array, which is recreated at startup time.
// But that could be moved to InstanceKlass (although a pain to access from
// assembly code). Maybe it could be moved to the cpCache which is RW.
return new (loader_data, size, false, THREAD) ConstantPool(tags);
return new (loader_data, size, false, MetaspaceObj::ConstantPoolType, THREAD) ConstantPool(tags);
}
ConstantPool::ConstantPool(Array<u1>* tags) {
......
......@@ -542,7 +542,8 @@ ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
const intStack& invokedynamic_map, TRAPS) {
int size = ConstantPoolCache::size(length);
return new (loader_data, size, false, THREAD) ConstantPoolCache(length, index_map, invokedynamic_map);
return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
ConstantPoolCache(length, index_map, invokedynamic_map);
}
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
......
......@@ -140,7 +140,7 @@ Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
Metaspace::ClassType, CHECK_NULL);
MetaspaceObj::ClassType, CHECK_NULL);
}
Klass::Klass() {
......
......@@ -74,7 +74,7 @@ Method* Method::allocate(ClassLoaderData* loader_data,
int size = Method::size(access_flags.is_native());
return new (loader_data, size, false, THREAD) Method(cm, access_flags, size);
return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags, size);
}
Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
......
......@@ -26,7 +26,7 @@
#include "runtime/thread.inline.hpp"
MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
return new(loader_data, size(), false, THREAD) MethodCounters();
return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters();
}
void MethodCounters::clear_counters() {
......
......@@ -388,7 +388,8 @@ void ArgInfoData::print_data_on(outputStream* st) {
MethodData* MethodData::allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS) {
int size = MethodData::compute_allocation_size_in_words(method);
return new (loader_data, size, false, THREAD) MethodData(method(), size, CHECK_NULL);
return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
MethodData(method(), size, CHECK_NULL);
}
int MethodData::bytecode_cell_count(Bytecodes::Code code) {
......
......@@ -55,7 +55,7 @@ void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRA
address res;
int alloc_size = size(len)*HeapWordSize;
res = (address) Metaspace::allocate(loader_data, size(len), true,
Metaspace::NonClassType, CHECK_NULL);
MetaspaceObj::SymbolType, CHECK_NULL);
return res;
}
......
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -320,7 +320,7 @@ protected:
void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) {
size_t word_size = Array::size(length);
return (void*) Metaspace::allocate(loader_data, word_size, read_only,
Metaspace::NonClassType, CHECK_NULL);
MetaspaceObj::array_type(sizeof(T)), CHECK_NULL);
}
static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册