提交 bed9d59a 编写于 作者: C coleenp

7196103: NPG: Unable to allocate bit map for parallel garbage collection for...

7196103: NPG: Unable to allocate bit map for parallel garbage collection for the requested heap size
Summary: Don't allocate huge class metaspace size by default on x64
Reviewed-by: stefank, jmasa, kvn
上级 87b19080
...@@ -2779,19 +2779,22 @@ void Metaspace::initialize_class_space(ReservedSpace rs) { ...@@ -2779,19 +2779,22 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
_class_space_list = new VirtualSpaceList(rs); _class_space_list = new VirtualSpaceList(rs);
} }
// Class space probably needs a lot less than data space
const int class_space_divisor = 4;
void Metaspace::initialize(Mutex* lock, size_t initial_size) { void Metaspace::initialize(Mutex* lock, size_t initial_size) {
// Use SmallChunk size if not specified, adjust class to smaller size if so. // Use SmallChunk size if not specified. If specified, use this size for
// the data metaspace.
size_t word_size; size_t word_size;
size_t class_word_size; size_t class_word_size;
if (initial_size == 0) { if (initial_size == 0) {
word_size = (size_t) SpaceManager::SmallChunk; word_size = (size_t) SpaceManager::SmallChunk;
class_word_size = word_size; class_word_size = (size_t) SpaceManager::SmallChunk;
} else { } else {
word_size = initial_size; word_size = initial_size;
class_word_size = initial_size/class_space_divisor; // Make the first class chunk bigger than a medium chunk so it's not put
// on the medium chunk list. The next chunk will be small and progress
// from there. This size calculated by -version.
class_word_size = MIN2((size_t)SpaceManager::MediumChunk*5,
(ClassMetaspaceSize/BytesPerWord)*2);
} }
assert(space_list() != NULL, assert(space_list() != NULL,
......
...@@ -858,7 +858,7 @@ jint Universe::initialize_heap() { ...@@ -858,7 +858,7 @@ jint Universe::initialize_heap() {
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
// Add in the class metaspace area so the classes in the headers can // Add in the class metaspace area so the classes in the headers can
// be compressed the same as instances. // be compressed the same as instances.
size_t total_reserved = heap_size + ClassMetaspaceSize; size_t total_reserved = align_size_up(heap_size + ClassMetaspaceSize, alignment);
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr); ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
......
...@@ -1427,6 +1427,16 @@ void Arguments::set_ergonomics_flags() { ...@@ -1427,6 +1427,16 @@ void Arguments::set_ergonomics_flags() {
// if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) { // if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
// FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true); // FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
// } // }
// Set the ClassMetaspaceSize to something that will not need to be
// expanded, since it cannot be expanded.
if (UseCompressedKlassPointers && FLAG_IS_DEFAULT(ClassMetaspaceSize)) {
// 100,000 classes seems like a good size, so 100M assumes around 1K
// per klass. The vtable and oopMap is embedded so we don't have a fixed
// size per klass. Eventually, this will be parameterized because it
// would also be useful to determine the optimal size of the
// systemDictionary.
FLAG_SET_ERGO(uintx, ClassMetaspaceSize, 100*M);
}
} }
// Also checks that certain machines are slower with compressed oops // Also checks that certain machines are slower with compressed oops
// in vm_version initialization code. // in vm_version initialization code.
...@@ -1965,6 +1975,9 @@ bool Arguments::check_vm_args_consistency() { ...@@ -1965,6 +1975,9 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_object_alignment(); status = status && verify_object_alignment();
status = status && verify_min_value(ClassMetaspaceSize, 1*M,
"ClassMetaspaceSize");
return status; return status;
} }
...@@ -2916,7 +2929,7 @@ void Arguments::set_shared_spaces_flags() { ...@@ -2916,7 +2929,7 @@ void Arguments::set_shared_spaces_flags() {
(UseLargePages && FLAG_IS_CMDLINE(UseLargePages)); (UseLargePages && FLAG_IS_CMDLINE(UseLargePages));
if (cannot_share) { if (cannot_share) {
if (must_share) { if (must_share) {
warning("disabling large pages%s" warning("disabling large pages %s"
"because of %s", "" LP64_ONLY("and compressed oops "), "because of %s", "" LP64_ONLY("and compressed oops "),
DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on"); DumpSharedSpaces ? "-Xshare:dump" : "-Xshare:on");
FLAG_SET_CMDLINE(bool, UseLargePages, false); FLAG_SET_CMDLINE(bool, UseLargePages, false);
......
...@@ -2993,7 +2993,7 @@ class CommandLineFlags { ...@@ -2993,7 +2993,7 @@ class CommandLineFlags {
product(uintx, MaxMetaspaceSize, max_uintx, \ product(uintx, MaxMetaspaceSize, max_uintx, \
"Maximum size of Metaspaces (in bytes)") \ "Maximum size of Metaspaces (in bytes)") \
\ \
product(uintx, ClassMetaspaceSize, NOT_LP64(1*M) LP64_ONLY(512*M), \ product(uintx, ClassMetaspaceSize, 2*M, \
"Maximum size of InstanceKlass area in Metaspace used for " \ "Maximum size of InstanceKlass area in Metaspace used for " \
"UseCompressedKlassPointers") \ "UseCompressedKlassPointers") \
\ \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册