提交 4c1c3163 编写于 作者: C ccheung

Merge

...@@ -381,3 +381,5 @@ a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50 ...@@ -381,3 +381,5 @@ a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
566db1b0e6efca31f181456e54c8911d0192410d hs25-b51 566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109 c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52 58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.memory;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class ProtectionDomainCacheEntry extends VMObject {
private static sun.jvm.hotspot.types.OopField protectionDomainField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("ProtectionDomainCacheEntry");
protectionDomainField = type.getOopField("_literal");
}
public ProtectionDomainCacheEntry(Address addr) {
super(addr);
}
public Oop protectionDomain() {
return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
}
}
/* /*
* Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -32,7 +32,7 @@ import sun.jvm.hotspot.types.*; ...@@ -32,7 +32,7 @@ import sun.jvm.hotspot.types.*;
public class ProtectionDomainEntry extends VMObject { public class ProtectionDomainEntry extends VMObject {
private static AddressField nextField; private static AddressField nextField;
private static sun.jvm.hotspot.types.OopField protectionDomainField; private static AddressField pdCacheField;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
...@@ -46,7 +46,7 @@ public class ProtectionDomainEntry extends VMObject { ...@@ -46,7 +46,7 @@ public class ProtectionDomainEntry extends VMObject {
Type type = db.lookupType("ProtectionDomainEntry"); Type type = db.lookupType("ProtectionDomainEntry");
nextField = type.getAddressField("_next"); nextField = type.getAddressField("_next");
protectionDomainField = type.getOopField("_protection_domain"); pdCacheField = type.getAddressField("_pd_cache");
} }
public ProtectionDomainEntry(Address addr) { public ProtectionDomainEntry(Address addr) {
...@@ -54,10 +54,12 @@ public class ProtectionDomainEntry extends VMObject { ...@@ -54,10 +54,12 @@ public class ProtectionDomainEntry extends VMObject {
} }
public ProtectionDomainEntry next() { public ProtectionDomainEntry next() {
return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr); return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr));
} }
public Oop protectionDomain() { public Oop protectionDomain() {
return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr)); ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry)
VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr));
return pd_cache.protectionDomain();
} }
} }
...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013 ...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25 HS_MAJOR_VER=25
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=53 HS_BUILD_NUMBER=54
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8
......
...@@ -37,6 +37,9 @@ ...@@ -37,6 +37,9 @@
#include "runtime/vframeArray.hpp" #include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "vmreg_sparc.inline.hpp" #include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Implementation of StubAssembler // Implementation of StubAssembler
...@@ -912,7 +915,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -912,7 +915,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register tmp2 = G3_scratch; Register tmp2 = G3_scratch;
jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
Label not_already_dirty, restart, refill; Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64 #ifdef _LP64
__ srlx(addr, CardTableModRefBS::card_shift, addr); __ srlx(addr, CardTableModRefBS::card_shift, addr);
...@@ -924,9 +927,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -924,9 +927,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ set(rs, cardtable); // cardtable := <card table base> __ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
__ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: return. // We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf // Use return-from-leaf
__ retl(); __ retl();
......
...@@ -3752,7 +3752,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { ...@@ -3752,7 +3752,7 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
#define __ masm. #define __ masm.
address start = __ pc(); address start = __ pc();
Label not_already_dirty, restart, refill; Label not_already_dirty, restart, refill, young_card;
#ifdef _LP64 #ifdef _LP64
__ srlx(O0, CardTableModRefBS::card_shift, O0); __ srlx(O0, CardTableModRefBS::card_shift, O0);
...@@ -3763,9 +3763,15 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { ...@@ -3763,9 +3763,15 @@ static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
__ set(addrlit, O1); // O1 := <card table base> __ set(addrlit, O1); // O1 := <card table base>
__ ldub(O0, O1, O2); // O2 := [O0 + O1] __ ldub(O0, O1, O2); // O2 := [O0 + O1]
__ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code"); assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty); __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
// We didn't take the branch, so we're already dirty: return. // We didn't take the branch, so we're already dirty: return.
// Use return-from-leaf // Use return-from-leaf
__ retl(); __ retl();
......
...@@ -38,6 +38,9 @@ ...@@ -38,6 +38,9 @@
#include "runtime/vframeArray.hpp" #include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp" #include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Implementation of StubAssembler // Implementation of StubAssembler
...@@ -1753,13 +1756,17 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { ...@@ -1753,13 +1756,17 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
#endif #endif
__ cmpb(Address(card_addr, 0), 0); __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
__ jcc(Assembler::equal, done);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
__ jcc(Assembler::equal, done); __ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean. // storing region crossing non-NULL, card is clean.
// dirty card and log. // dirty card and log.
__ movb(Address(card_addr, 0), 0); __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
__ cmpl(queue_index, 0); __ cmpl(queue_index, 0);
__ jcc(Assembler::equal, runtime); __ jcc(Assembler::equal, runtime);
......
...@@ -3389,13 +3389,18 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr, ...@@ -3389,13 +3389,18 @@ void MacroAssembler::g1_write_barrier_post(Register store_addr,
const Register card_addr = tmp; const Register card_addr = tmp;
lea(card_addr, as_Address(ArrayAddress(cardtable, index))); lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
#endif #endif
cmpb(Address(card_addr, 0), 0); cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
jcc(Assembler::equal, done); jcc(Assembler::equal, done);
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
jcc(Assembler::equal, done);
// storing a region crossing, non-NULL oop, card is clean. // storing a region crossing, non-NULL oop, card is clean.
// dirty card and log. // dirty card and log.
movb(Address(card_addr, 0), 0); movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
cmpl(queue_index, 0); cmpl(queue_index, 0);
jcc(Assembler::equal, runtime); jcc(Assembler::equal, runtime);
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
// Defines Linux-specific default values. The flags are available on all // Defines Linux-specific default values. The flags are available on all
// platforms, but they may have different default values on other platforms. // platforms, but they may have different default values on other platforms.
// //
define_pd_global(bool, UseLargePages, true); define_pd_global(bool, UseLargePages, false);
define_pd_global(bool, UseLargePagesIndividualAllocation, false); define_pd_global(bool, UseLargePagesIndividualAllocation, false);
define_pd_global(bool, UseOSErrorReporting, false); define_pd_global(bool, UseOSErrorReporting, false);
define_pd_global(bool, UseThreadPriorities, true) ; define_pd_global(bool, UseThreadPriorities, true) ;
......
...@@ -3361,13 +3361,15 @@ bool os::Linux::setup_large_page_type(size_t page_size) { ...@@ -3361,13 +3361,15 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
if (FLAG_IS_DEFAULT(UseHugeTLBFS) && if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
FLAG_IS_DEFAULT(UseSHM) && FLAG_IS_DEFAULT(UseSHM) &&
FLAG_IS_DEFAULT(UseTransparentHugePages)) { FLAG_IS_DEFAULT(UseTransparentHugePages)) {
// If UseLargePages is specified on the command line try all methods,
// if it's default, then try only UseTransparentHugePages. // The type of large pages has not been specified by the user.
if (FLAG_IS_DEFAULT(UseLargePages)) {
UseTransparentHugePages = true; // Try UseHugeTLBFS and then UseSHM.
} else { UseHugeTLBFS = UseSHM = true;
UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
} // Don't try UseTransparentHugePages since there are known
// performance issues with it turned on. This might change in the future.
UseTransparentHugePages = false;
} }
if (UseTransparentHugePages) { if (UseTransparentHugePages) {
...@@ -3393,9 +3395,19 @@ bool os::Linux::setup_large_page_type(size_t page_size) { ...@@ -3393,9 +3395,19 @@ bool os::Linux::setup_large_page_type(size_t page_size) {
} }
void os::large_page_init() { void os::large_page_init() {
if (!UseLargePages) { if (!UseLargePages &&
UseHugeTLBFS = false; !UseTransparentHugePages &&
!UseHugeTLBFS &&
!UseSHM) {
// Not using large pages.
return;
}
if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
// The user explicitly turned off large pages.
// Ignore the rest of the large pages flags.
UseTransparentHugePages = false; UseTransparentHugePages = false;
UseHugeTLBFS = false;
UseSHM = false; UseSHM = false;
return; return;
} }
......
/* /*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/dictionary.hpp" #include "classfile/dictionary.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp"
#include "utilities/hashtable.inline.hpp" #include "utilities/hashtable.inline.hpp"
...@@ -38,17 +39,21 @@ Dictionary::Dictionary(int table_size) ...@@ -38,17 +39,21 @@ Dictionary::Dictionary(int table_size)
: TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) { : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) {
_current_class_index = 0; _current_class_index = 0;
_current_class_entry = NULL; _current_class_entry = NULL;
_pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
}; };
Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t, Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t,
int number_of_entries) int number_of_entries)
: TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) { : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
_current_class_index = 0; _current_class_index = 0;
_current_class_entry = NULL; _current_class_entry = NULL;
_pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
}; };
ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) {
return _pd_cache_table->get(protection_domain);
}
DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass, DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass,
ClassLoaderData* loader_data) { ClassLoaderData* loader_data) {
...@@ -105,11 +110,12 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const { ...@@ -105,11 +110,12 @@ bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
} }
void DictionaryEntry::add_protection_domain(oop protection_domain) { void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) {
assert_locked_or_safepoint(SystemDictionary_lock); assert_locked_or_safepoint(SystemDictionary_lock);
if (!contains_protection_domain(protection_domain)) { if (!contains_protection_domain(protection_domain)) {
ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain);
ProtectionDomainEntry* new_head = ProtectionDomainEntry* new_head =
new ProtectionDomainEntry(protection_domain, _pd_set); new ProtectionDomainEntry(entry, _pd_set);
// Warning: Preserve store ordering. The SystemDictionary is read // Warning: Preserve store ordering. The SystemDictionary is read
// without locks. The new ProtectionDomainEntry must be // without locks. The new ProtectionDomainEntry must be
// complete before other threads can be allowed to see it // complete before other threads can be allowed to see it
...@@ -193,7 +199,10 @@ bool Dictionary::do_unloading() { ...@@ -193,7 +199,10 @@ bool Dictionary::do_unloading() {
void Dictionary::always_strong_oops_do(OopClosure* blk) { void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary // Follow all system classes and temporary placeholders in dictionary; only
// protection domain oops contain references into the heap. In a first
// pass over the system dictionary determine which need to be treated as
// strongly reachable and mark them as such.
for (int index = 0; index < table_size(); index++) { for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry *probe = bucket(index); for (DictionaryEntry *probe = bucket(index);
probe != NULL; probe != NULL;
...@@ -201,10 +210,13 @@ void Dictionary::always_strong_oops_do(OopClosure* blk) { ...@@ -201,10 +210,13 @@ void Dictionary::always_strong_oops_do(OopClosure* blk) {
Klass* e = probe->klass(); Klass* e = probe->klass();
ClassLoaderData* loader_data = probe->loader_data(); ClassLoaderData* loader_data = probe->loader_data();
if (is_strongly_reachable(loader_data, e)) { if (is_strongly_reachable(loader_data, e)) {
probe->protection_domain_set_oops_do(blk); probe->set_strongly_reachable();
} }
} }
} }
// Then iterate over the protection domain cache to apply the closure on the
// previously marked ones.
_pd_cache_table->always_strong_oops_do(blk);
} }
...@@ -266,18 +278,12 @@ void Dictionary::classes_do(void f(Klass*, ClassLoaderData*)) { ...@@ -266,18 +278,12 @@ void Dictionary::classes_do(void f(Klass*, ClassLoaderData*)) {
} }
} }
void Dictionary::oops_do(OopClosure* f) { void Dictionary::oops_do(OopClosure* f) {
for (int index = 0; index < table_size(); index++) { // Only the protection domain oops contain references into the heap. Iterate
for (DictionaryEntry* probe = bucket(index); // over all of them.
probe != NULL; _pd_cache_table->oops_do(f);
probe = probe->next()) {
probe->protection_domain_set_oops_do(f);
}
}
} }
void Dictionary::methods_do(void f(Method*)) { void Dictionary::methods_do(void f(Method*)) {
for (int index = 0; index < table_size(); index++) { for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index); for (DictionaryEntry* probe = bucket(index);
...@@ -292,6 +298,11 @@ void Dictionary::methods_do(void f(Method*)) { ...@@ -292,6 +298,11 @@ void Dictionary::methods_do(void f(Method*)) {
} }
} }
void Dictionary::unlink(BoolObjectClosure* is_alive) {
// Only the protection domain cache table may contain references to the heap
// that need to be unlinked.
_pd_cache_table->unlink(is_alive);
}
Klass* Dictionary::try_get_next_class() { Klass* Dictionary::try_get_next_class() {
while (true) { while (true) {
...@@ -306,7 +317,6 @@ Klass* Dictionary::try_get_next_class() { ...@@ -306,7 +317,6 @@ Klass* Dictionary::try_get_next_class() {
// never reached // never reached
} }
// Add a loaded class to the system dictionary. // Add a loaded class to the system dictionary.
// Readers of the SystemDictionary aren't always locked, so _buckets // Readers of the SystemDictionary aren't always locked, so _buckets
// is volatile. The store of the next field in the constructor is // is volatile. The store of the next field in the constructor is
...@@ -396,7 +406,7 @@ void Dictionary::add_protection_domain(int index, unsigned int hash, ...@@ -396,7 +406,7 @@ void Dictionary::add_protection_domain(int index, unsigned int hash,
assert(protection_domain() != NULL, assert(protection_domain() != NULL,
"real protection domain should be present"); "real protection domain should be present");
entry->add_protection_domain(protection_domain()); entry->add_protection_domain(this, protection_domain());
assert(entry->contains_protection_domain(protection_domain()), assert(entry->contains_protection_domain(protection_domain()),
"now protection domain should be present"); "now protection domain should be present");
...@@ -446,6 +456,146 @@ void Dictionary::reorder_dictionary() { ...@@ -446,6 +456,146 @@ void Dictionary::reorder_dictionary() {
} }
} }
ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
: Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
{
}
void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be");
for (int i = 0; i < table_size(); ++i) {
ProtectionDomainCacheEntry** p = bucket_addr(i);
ProtectionDomainCacheEntry* entry = bucket(i);
while (entry != NULL) {
if (is_alive->do_object_b(entry->literal())) {
p = entry->next_addr();
} else {
*p = entry->next();
free_entry(entry);
}
entry = *p;
}
}
}
void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->oops_do(f);
}
}
}
uint ProtectionDomainCacheTable::bucket_size() {
return sizeof(ProtectionDomainCacheEntry);
}
#ifndef PRODUCT
void ProtectionDomainCacheTable::print() {
tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
table_size(), number_of_entries());
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->print();
}
}
}
void ProtectionDomainCacheEntry::print() {
tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT,
this, (void*)literal(), _strongly_reachable, next());
}
#endif
void ProtectionDomainCacheTable::verify() {
int element_count = 0;
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
probe->verify();
element_count++;
}
}
guarantee(number_of_entries() == element_count,
"Verify of protection domain cache table failed");
debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
}
void ProtectionDomainCacheEntry::verify() {
guarantee(literal()->is_oop(), "must be an oop");
}
void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) {
// the caller marked the protection domain cache entries that we need to apply
// the closure on. Only process them.
for (int index = 0; index < table_size(); index++) {
for (ProtectionDomainCacheEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
if (probe->is_strongly_reachable()) {
probe->reset_strongly_reachable();
probe->oops_do(f);
}
}
}
}
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) {
unsigned int hash = compute_hash(protection_domain);
int index = hash_to_index(hash);
ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain);
if (entry == NULL) {
entry = add_entry(index, hash, protection_domain);
}
return entry;
}
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) {
for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
if (e->protection_domain() == protection_domain) {
return e;
}
}
return NULL;
}
ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) {
assert_locked_or_safepoint(SystemDictionary_lock);
assert(index == index_for(protection_domain), "incorrect index?");
assert(find_entry(index, protection_domain) == NULL, "no double entry");
ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain);
Hashtable<oop, mtClass>::add_entry(index, p);
return p;
}
void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) {
unsigned int hash = compute_hash(to_delete->protection_domain());
int index = hash_to_index(hash);
ProtectionDomainCacheEntry** p = bucket_addr(index);
ProtectionDomainCacheEntry* entry = bucket(index);
while (true) {
assert(entry != NULL, "sanity");
if (entry == to_delete) {
*p = entry->next();
Hashtable<oop, mtClass>::free_entry(entry);
break;
} else {
p = entry->next_addr();
entry = *p;
}
}
}
SymbolPropertyTable::SymbolPropertyTable(int table_size) SymbolPropertyTable::SymbolPropertyTable(int table_size)
: Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry)) : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
{ {
...@@ -532,11 +682,13 @@ void Dictionary::print() { ...@@ -532,11 +682,13 @@ void Dictionary::print() {
tty->cr(); tty->cr();
} }
} }
tty->cr();
_pd_cache_table->print();
tty->cr();
} }
#endif #endif
void Dictionary::verify() { void Dictionary::verify() {
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed"); guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
...@@ -563,5 +715,7 @@ void Dictionary::verify() { ...@@ -563,5 +715,7 @@ void Dictionary::verify() {
guarantee(number_of_entries() == element_count, guarantee(number_of_entries() == element_count,
"Verify of system dictionary failed"); "Verify of system dictionary failed");
debug_only(verify_lookup_length((double)number_of_entries() / table_size())); debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
_pd_cache_table->verify();
} }
...@@ -27,11 +27,14 @@ ...@@ -27,11 +27,14 @@
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "oops/instanceKlass.hpp" #include "oops/instanceKlass.hpp"
#include "oops/oop.hpp" #include "oops/oop.inline.hpp"
#include "utilities/hashtable.hpp" #include "utilities/hashtable.hpp"
class DictionaryEntry; class DictionaryEntry;
class PSPromotionManager; class PSPromotionManager;
class ProtectionDomainCacheTable;
class ProtectionDomainCacheEntry;
class BoolObjectClosure;
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// The data structure for the system dictionary (and the shared system // The data structure for the system dictionary (and the shared system
...@@ -45,6 +48,8 @@ private: ...@@ -45,6 +48,8 @@ private:
// pointer to the current hash table entry. // pointer to the current hash table entry.
static DictionaryEntry* _current_class_entry; static DictionaryEntry* _current_class_entry;
ProtectionDomainCacheTable* _pd_cache_table;
DictionaryEntry* get_entry(int index, unsigned int hash, DictionaryEntry* get_entry(int index, unsigned int hash,
Symbol* name, ClassLoaderData* loader_data); Symbol* name, ClassLoaderData* loader_data);
...@@ -93,6 +98,7 @@ public: ...@@ -93,6 +98,7 @@ public:
void methods_do(void f(Method*)); void methods_do(void f(Method*));
void unlink(BoolObjectClosure* is_alive);
// Classes loaded by the bootstrap loader are always strongly reachable. // Classes loaded by the bootstrap loader are always strongly reachable.
// If we're not doing class unloading, all classes are strongly reachable. // If we're not doing class unloading, all classes are strongly reachable.
...@@ -118,6 +124,7 @@ public: ...@@ -118,6 +124,7 @@ public:
// Sharing support // Sharing support
void reorder_dictionary(); void reorder_dictionary();
ProtectionDomainCacheEntry* cache_get(oop protection_domain);
#ifndef PRODUCT #ifndef PRODUCT
void print(); void print();
...@@ -126,21 +133,112 @@ public: ...@@ -126,21 +133,112 @@ public:
}; };
// The following classes can be in dictionary.cpp, but we need these // The following classes can be in dictionary.cpp, but we need these
// to be in header file so that SA's vmStructs can access. // to be in header file so that SA's vmStructs can access them.
class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
friend class VMStructs;
private:
// Flag indicating whether this protection domain entry is strongly reachable.
// Used during iterating over the system dictionary to remember oops that need
// to be updated.
bool _strongly_reachable;
public:
oop protection_domain() { return literal(); }
void init() {
_strongly_reachable = false;
}
ProtectionDomainCacheEntry* next() {
return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
}
ProtectionDomainCacheEntry** next_addr() {
return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr();
}
void oops_do(OopClosure* f) {
f->do_oop(literal_addr());
}
void set_strongly_reachable() { _strongly_reachable = true; }
bool is_strongly_reachable() { return _strongly_reachable; }
void reset_strongly_reachable() { _strongly_reachable = false; }
void print() PRODUCT_RETURN;
void verify();
};
// The ProtectionDomainCacheTable contains all protection domain oops. The system
// dictionary entries reference its entries instead of having references to oops
// directly.
// This is used to speed up system dictionary iteration: the oops in the
// protection domain are the only ones referring the Java heap. So when there is
// need to update these, instead of going over every entry of the system dictionary,
// we only need to iterate over this set.
// The amount of different protection domains used is typically magnitudes smaller
// than the number of system dictionary entries (loaded classes).
class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> {
friend class VMStructs;
private:
ProtectionDomainCacheEntry* bucket(int i) {
return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i);
}
// The following method is not MT-safe and must be done under lock.
ProtectionDomainCacheEntry** bucket_addr(int i) {
return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
}
ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) {
ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain);
entry->init();
return entry;
}
static unsigned int compute_hash(oop protection_domain) {
return (unsigned int)(protection_domain->identity_hash());
}
int index_for(oop protection_domain) {
return hash_to_index(compute_hash(protection_domain));
}
ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain);
ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain);
public:
ProtectionDomainCacheTable(int table_size);
ProtectionDomainCacheEntry* get(oop protection_domain);
void free(ProtectionDomainCacheEntry* entry);
void unlink(BoolObjectClosure* cl);
// GC support
void oops_do(OopClosure* f);
void always_strong_oops_do(OopClosure* f);
static uint bucket_size();
void print() PRODUCT_RETURN;
void verify();
};
class ProtectionDomainEntry :public CHeapObj<mtClass> { class ProtectionDomainEntry :public CHeapObj<mtClass> {
friend class VMStructs; friend class VMStructs;
public: public:
ProtectionDomainEntry* _next; ProtectionDomainEntry* _next;
oop _protection_domain; ProtectionDomainCacheEntry* _pd_cache;
ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) { ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) {
_protection_domain = protection_domain; _pd_cache = pd_cache;
_next = next; _next = next;
} }
ProtectionDomainEntry* next() { return _next; } ProtectionDomainEntry* next() { return _next; }
oop protection_domain() { return _protection_domain; } oop protection_domain() { return _pd_cache->protection_domain(); }
}; };
// An entry in the system dictionary, this describes a class as // An entry in the system dictionary, this describes a class as
...@@ -151,6 +249,24 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> { ...@@ -151,6 +249,24 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
private: private:
// Contains the set of approved protection domains that can access // Contains the set of approved protection domains that can access
// this system dictionary entry. // this system dictionary entry.
//
// This protection domain set is a set of tuples:
//
// (InstanceKlass C, initiating class loader ICL, Protection Domain PD)
//
// [Note that C.protection_domain(), which is stored in the java.lang.Class
// mirror of C, is NOT the same as PD]
//
// If such an entry (C, ICL, PD) exists in the table, it means that
// it is okay for a class Foo to reference C, where
//
// Foo.protection_domain() == PD, and
// Foo's defining class loader == ICL
//
// The usage of the PD set can be seen in SystemDictionary::validate_protection_domain()
// It is essentially a cache to avoid repeated Java up-calls to
// ClassLoader.checkPackageAccess().
//
ProtectionDomainEntry* _pd_set; ProtectionDomainEntry* _pd_set;
ClassLoaderData* _loader_data; ClassLoaderData* _loader_data;
...@@ -158,7 +274,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> { ...@@ -158,7 +274,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
// Tells whether a protection is in the approved set. // Tells whether a protection is in the approved set.
bool contains_protection_domain(oop protection_domain) const; bool contains_protection_domain(oop protection_domain) const;
// Adds a protection domain to the approved set. // Adds a protection domain to the approved set.
void add_protection_domain(oop protection_domain); void add_protection_domain(Dictionary* dict, oop protection_domain);
Klass* klass() const { return (Klass*)literal(); } Klass* klass() const { return (Klass*)literal(); }
Klass** klass_addr() { return (Klass**)literal_addr(); } Klass** klass_addr() { return (Klass**)literal_addr(); }
...@@ -189,12 +305,11 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> { ...@@ -189,12 +305,11 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
: contains_protection_domain(protection_domain()); : contains_protection_domain(protection_domain());
} }
void set_strongly_reachable() {
void protection_domain_set_oops_do(OopClosure* f) {
for (ProtectionDomainEntry* current = _pd_set; for (ProtectionDomainEntry* current = _pd_set;
current != NULL; current != NULL;
current = current->_next) { current = current->_next) {
f->do_oop(&(current->_protection_domain)); current->_pd_cache->set_strongly_reachable();
} }
} }
...@@ -202,7 +317,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> { ...@@ -202,7 +317,7 @@ class DictionaryEntry : public HashtableEntry<Klass*, mtClass> {
for (ProtectionDomainEntry* current = _pd_set; for (ProtectionDomainEntry* current = _pd_set;
current != NULL; current != NULL;
current = current->_next) { current = current->_next) {
current->_protection_domain->verify(); current->_pd_cache->protection_domain()->verify();
} }
} }
......
...@@ -1697,6 +1697,24 @@ int SystemDictionary::calculate_systemdictionary_size(int classcount) { ...@@ -1697,6 +1697,24 @@ int SystemDictionary::calculate_systemdictionary_size(int classcount) {
return newsize; return newsize;
} }
#ifdef ASSERT
class VerifySDReachableAndLiveClosure : public OopClosure {
private:
BoolObjectClosure* _is_alive;
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live");
}
public:
VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { }
virtual void do_oop(oop* p) { do_oop_work(p); }
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
};
#endif
// Assumes classes in the SystemDictionary are only unloaded at a safepoint // Assumes classes in the SystemDictionary are only unloaded at a safepoint
// Note: anonymous classes are not in the SD. // Note: anonymous classes are not in the SD.
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
...@@ -1707,7 +1725,15 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { ...@@ -1707,7 +1725,15 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
unloading_occurred = dictionary()->do_unloading(); unloading_occurred = dictionary()->do_unloading();
constraints()->purge_loader_constraints(); constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors(); resolution_errors()->purge_resolution_errors();
} }
// Oops referenced by the system dictionary may get unreachable independently
// of the class loader (eg. cached protection domain oops). So we need to
// explicitly unlink them here instead of in Dictionary::do_unloading.
dictionary()->unlink(is_alive);
#ifdef ASSERT
VerifySDReachableAndLiveClosure cl(is_alive);
dictionary()->oops_do(&cl);
#endif
return unloading_occurred; return unloading_occurred;
} }
......
...@@ -6035,7 +6035,11 @@ void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) { ...@@ -6035,7 +6035,11 @@ void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
// is dirty. // is dirty.
G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
MemRegion mr(hr->bottom(), hr->pre_dummy_top()); MemRegion mr(hr->bottom(), hr->pre_dummy_top());
ct_bs->verify_dirty_region(mr); if (hr->is_young()) {
ct_bs->verify_g1_young_region(mr);
} else {
ct_bs->verify_dirty_region(mr);
}
} }
void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1CollectedHeap.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1AllocRegion.inline.hpp" #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "utilities/taskqueue.hpp" #include "utilities/taskqueue.hpp"
...@@ -134,7 +135,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { ...@@ -134,7 +135,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
MemRegion mr(start, end); MemRegion mr(start, end);
g1_barrier_set()->dirty(mr); g1_barrier_set()->g1_mark_as_young(mr);
} }
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
......
...@@ -319,10 +319,10 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -319,10 +319,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
} }
void G1CollectorPolicy::initialize_flags() { void G1CollectorPolicy::initialize_flags() {
set_min_alignment(HeapRegion::GrainBytes); _min_alignment = HeapRegion::GrainBytes;
size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size)); _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size);
if (SurvivorRatio < 1) { if (SurvivorRatio < 1) {
vm_exit_during_initialization("Invalid survivor ratio specified"); vm_exit_during_initialization("Invalid survivor ratio specified");
} }
......
...@@ -70,6 +70,12 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { ...@@ -70,6 +70,12 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
return false; return false;
} }
if (val == g1_young_gen) {
// the card is for a young gen region. We don't need to keep track of all pointers into young
return false;
}
// Cached bit can be installed either on a clean card or on a claimed card. // Cached bit can be installed either on a clean card or on a claimed card.
jbyte new_val = val; jbyte new_val = val;
if (val == clean_card_val()) { if (val == clean_card_val()) {
...@@ -85,6 +91,19 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { ...@@ -85,6 +91,19 @@ bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
return true; return true;
} }
void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
memset(first, g1_young_gen, last - first);
}
#ifndef PRODUCT
void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
verify_region(mr, g1_young_gen, true);
}
#endif
G1SATBCardTableLoggingModRefBS:: G1SATBCardTableLoggingModRefBS::
G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
int max_covered_regions) : int max_covered_regions) :
...@@ -97,7 +116,11 @@ G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, ...@@ -97,7 +116,11 @@ G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
void void
G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field, G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
oop new_val) { oop new_val) {
jbyte* byte = byte_for(field); volatile jbyte* byte = byte_for(field);
if (*byte == g1_young_gen) {
return;
}
OrderAccess::storeload();
if (*byte != dirty_card) { if (*byte != dirty_card) {
*byte = dirty_card; *byte = dirty_card;
Thread* thr = Thread::current(); Thread* thr = Thread::current();
...@@ -129,7 +152,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field, ...@@ -129,7 +152,7 @@ G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
void void
G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
jbyte* byte = byte_for(mr.start()); volatile jbyte* byte = byte_for(mr.start());
jbyte* last_byte = byte_for(mr.last()); jbyte* last_byte = byte_for(mr.last());
Thread* thr = Thread::current(); Thread* thr = Thread::current();
if (whole_heap) { if (whole_heap) {
...@@ -138,25 +161,35 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) { ...@@ -138,25 +161,35 @@ G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
byte++; byte++;
} }
} else { } else {
// Enqueue if necessary. // skip all consecutive young cards
if (thr->is_Java_thread()) { for (; byte <= last_byte && *byte == g1_young_gen; byte++);
JavaThread* jt = (JavaThread*)thr;
while (byte <= last_byte) { if (byte <= last_byte) {
if (*byte != dirty_card) { OrderAccess::storeload();
*byte = dirty_card; // Enqueue if necessary.
jt->dirty_card_queue().enqueue(byte); if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
for (; byte <= last_byte; byte++) {
if (*byte == g1_young_gen) {
continue;
}
if (*byte != dirty_card) {
*byte = dirty_card;
jt->dirty_card_queue().enqueue(byte);
}
} }
byte++; } else {
} MutexLockerEx x(Shared_DirtyCardQ_lock,
} else { Mutex::_no_safepoint_check_flag);
MutexLockerEx x(Shared_DirtyCardQ_lock, for (; byte <= last_byte; byte++) {
Mutex::_no_safepoint_check_flag); if (*byte == g1_young_gen) {
while (byte <= last_byte) { continue;
if (*byte != dirty_card) { }
*byte = dirty_card; if (*byte != dirty_card) {
_dcqs.shared_dirty_card_queue()->enqueue(byte); *byte = dirty_card;
_dcqs.shared_dirty_card_queue()->enqueue(byte);
}
} }
byte++;
} }
} }
} }
......
...@@ -38,7 +38,14 @@ class DirtyCardQueueSet; ...@@ -38,7 +38,14 @@ class DirtyCardQueueSet;
// snapshot-at-the-beginning marking. // snapshot-at-the-beginning marking.
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS { class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
protected:
enum G1CardValues {
g1_young_gen = CT_MR_BS_last_reserved << 1
};
public: public:
static int g1_young_card_val() { return g1_young_gen; }
// Add "pre_val" to a set of objects that may have been disconnected from the // Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph. // pre-marking object graph.
static void enqueue(oop pre_val); static void enqueue(oop pre_val);
...@@ -118,6 +125,9 @@ public: ...@@ -118,6 +125,9 @@ public:
_byte_map[card_index] = val; _byte_map[card_index] = val;
} }
void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
void g1_mark_as_young(const MemRegion& mr);
bool mark_card_deferred(size_t card_index); bool mark_card_deferred(size_t card_index);
bool is_card_deferred(size_t card_index) { bool is_card_deferred(size_t card_index) {
......
...@@ -80,6 +80,10 @@ public: ...@@ -80,6 +80,10 @@ public:
void reset() { if (_buf != NULL) _index = _sz; } void reset() { if (_buf != NULL) _index = _sz; }
void enqueue(volatile void* ptr) {
enqueue((void*)(ptr));
}
// Enqueues the given "obj". // Enqueues the given "obj".
void enqueue(void* ptr) { void enqueue(void* ptr) {
if (!_active) return; if (!_active) return;
......
...@@ -214,9 +214,6 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation { ...@@ -214,9 +214,6 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
_loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
} }
~VM_CollectForMetadataAllocation() {
MetaspaceGC::set_expand_after_GC(false);
}
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
virtual void doit(); virtual void doit();
MetaWord* result() const { return _result; } MetaWord* result() const { return _result; }
......
...@@ -202,12 +202,6 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { ...@@ -202,12 +202,6 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
ShouldNotReachHere(); // Unexpected use of this function ShouldNotReachHere(); // Unexpected use of this function
} }
} }
MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
ClassLoaderData* loader_data,
size_t size, Metaspace::MetadataType mdtype) {
return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
}
void CollectedHeap::pre_initialize() { void CollectedHeap::pre_initialize() {
// Used for ReduceInitialCardMarks (when COMPILER2 is used); // Used for ReduceInitialCardMarks (when COMPILER2 is used);
......
...@@ -475,11 +475,6 @@ class CollectedHeap : public CHeapObj<mtInternal> { ...@@ -475,11 +475,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// the context of the vm thread. // the context of the vm thread.
virtual void collect_as_vm_thread(GCCause::Cause cause); virtual void collect_as_vm_thread(GCCause::Cause cause);
// Callback from VM_CollectForMetadataAllocation operation.
MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
size_t size,
Metaspace::MetadataType mdtype);
// Returns the barrier set for this heap // Returns the barrier set for this heap
BarrierSet* barrier_set() { return _barrier_set; } BarrierSet* barrier_set() { return _barrier_set; }
......
...@@ -47,85 +47,53 @@ ...@@ -47,85 +47,53 @@
// CollectorPolicy methods. // CollectorPolicy methods.
// Align down. If the aligning result in 0, return 'alignment'.
static size_t restricted_align_down(size_t size, size_t alignment) {
return MAX2(alignment, align_size_down_(size, alignment));
}
void CollectorPolicy::initialize_flags() { void CollectorPolicy::initialize_flags() {
assert(max_alignment() >= min_alignment(), assert(_max_alignment >= _min_alignment,
err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
max_alignment(), min_alignment())); _max_alignment, _min_alignment));
assert(max_alignment() % min_alignment() == 0, assert(_max_alignment % _min_alignment == 0,
err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
max_alignment(), min_alignment())); _max_alignment, _min_alignment));
if (MaxHeapSize < InitialHeapSize) { if (MaxHeapSize < InitialHeapSize) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
} }
// Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
// override if MaxMetaspaceSize was set on the command line or not.
// This information is needed later to conform to the specification of the
// java.lang.management.MemoryUsage API.
//
// Ideally, we would be able to set the default value of MaxMetaspaceSize in
// globals.hpp to the aligned value, but this is not possible, since the
// alignment depends on other flags being parsed.
MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment());
if (MetaspaceSize > MaxMetaspaceSize) {
MetaspaceSize = MaxMetaspaceSize;
}
MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment());
assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
assert(MetaspaceSize % min_alignment() == 0, "metapace alignment");
assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
if (MetaspaceSize < 256*K) {
vm_exit_during_initialization("Too small initial Metaspace size");
}
} }
void CollectorPolicy::initialize_size_info() { void CollectorPolicy::initialize_size_info() {
// User inputs from -mx and ms must be aligned // User inputs from -mx and ms must be aligned
set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment())); _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment())); _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment())); _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
// Check heap parameter properties // Check heap parameter properties
if (initial_heap_byte_size() < M) { if (_initial_heap_byte_size < M) {
vm_exit_during_initialization("Too small initial heap"); vm_exit_during_initialization("Too small initial heap");
} }
// Check heap parameter properties // Check heap parameter properties
if (min_heap_byte_size() < M) { if (_min_heap_byte_size < M) {
vm_exit_during_initialization("Too small minimum heap"); vm_exit_during_initialization("Too small minimum heap");
} }
if (initial_heap_byte_size() <= NewSize) { if (_initial_heap_byte_size <= NewSize) {
// make sure there is at least some room in old space // make sure there is at least some room in old space
vm_exit_during_initialization("Too small initial heap for new size specified"); vm_exit_during_initialization("Too small initial heap for new size specified");
} }
if (max_heap_byte_size() < min_heap_byte_size()) { if (_max_heap_byte_size < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
} }
if (initial_heap_byte_size() < min_heap_byte_size()) { if (_initial_heap_byte_size < _min_heap_byte_size) {
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
} }
if (max_heap_byte_size() < initial_heap_byte_size()) { if (_max_heap_byte_size < _initial_heap_byte_size) {
vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
} }
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap "
SIZE_FORMAT " Maximum heap " SIZE_FORMAT, SIZE_FORMAT " Maximum heap " SIZE_FORMAT,
min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size()); _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
} }
} }
...@@ -180,15 +148,15 @@ size_t CollectorPolicy::compute_max_alignment() { ...@@ -180,15 +148,15 @@ size_t CollectorPolicy::compute_max_alignment() {
size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
size_t x = base_size / (NewRatio+1); size_t x = base_size / (NewRatio+1);
size_t new_gen_size = x > min_alignment() ? size_t new_gen_size = x > _min_alignment ?
align_size_down(x, min_alignment()) : align_size_down(x, _min_alignment) :
min_alignment(); _min_alignment;
return new_gen_size; return new_gen_size;
} }
size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
size_t maximum_size) { size_t maximum_size) {
size_t alignment = min_alignment(); size_t alignment = _min_alignment;
size_t max_minus = maximum_size - alignment; size_t max_minus = maximum_size - alignment;
return desired_size < max_minus ? desired_size : max_minus; return desired_size < max_minus ? desired_size : max_minus;
} }
...@@ -207,8 +175,8 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, ...@@ -207,8 +175,8 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
void GenCollectorPolicy::initialize_flags() { void GenCollectorPolicy::initialize_flags() {
// All sizes must be multiples of the generation granularity. // All sizes must be multiples of the generation granularity.
set_min_alignment((uintx) Generation::GenGrain); _min_alignment = (uintx) Generation::GenGrain;
set_max_alignment(compute_max_alignment()); _max_alignment = compute_max_alignment();
CollectorPolicy::initialize_flags(); CollectorPolicy::initialize_flags();
...@@ -218,26 +186,26 @@ void GenCollectorPolicy::initialize_flags() { ...@@ -218,26 +186,26 @@ void GenCollectorPolicy::initialize_flags() {
if (NewSize > MaxNewSize) { if (NewSize > MaxNewSize) {
MaxNewSize = NewSize; MaxNewSize = NewSize;
} }
NewSize = align_size_down(NewSize, min_alignment()); NewSize = align_size_down(NewSize, _min_alignment);
MaxNewSize = align_size_down(MaxNewSize, min_alignment()); MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
// Check validity of heap flags // Check validity of heap flags
assert(NewSize % min_alignment() == 0, "eden space alignment"); assert(NewSize % _min_alignment == 0, "eden space alignment");
assert(MaxNewSize % min_alignment() == 0, "survivor space alignment"); assert(MaxNewSize % _min_alignment == 0, "survivor space alignment");
if (NewSize < 3*min_alignment()) { if (NewSize < 3 * _min_alignment) {
// make sure there room for eden and two survivor spaces // make sure there room for eden and two survivor spaces
vm_exit_during_initialization("Too small new size specified"); vm_exit_during_initialization("Too small new size specified");
} }
if (SurvivorRatio < 1 || NewRatio < 1) { if (SurvivorRatio < 1 || NewRatio < 1) {
vm_exit_during_initialization("Invalid heap ratio specified"); vm_exit_during_initialization("Invalid young gen ratio specified");
} }
} }
void TwoGenerationCollectorPolicy::initialize_flags() { void TwoGenerationCollectorPolicy::initialize_flags() {
GenCollectorPolicy::initialize_flags(); GenCollectorPolicy::initialize_flags();
OldSize = align_size_down(OldSize, min_alignment()); OldSize = align_size_down(OldSize, _min_alignment);
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
// NewRatio will be used later to set the young generation size so we use // NewRatio will be used later to set the young generation size so we use
...@@ -246,11 +214,11 @@ void TwoGenerationCollectorPolicy::initialize_flags() { ...@@ -246,11 +214,11 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
assert(NewRatio > 0, "NewRatio should have been set up earlier"); assert(NewRatio > 0, "NewRatio should have been set up earlier");
size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
calculated_heapsize = align_size_up(calculated_heapsize, max_alignment()); calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
MaxHeapSize = calculated_heapsize; MaxHeapSize = calculated_heapsize;
InitialHeapSize = calculated_heapsize; InitialHeapSize = calculated_heapsize;
} }
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
// adjust max heap size if necessary // adjust max heap size if necessary
if (NewSize + OldSize > MaxHeapSize) { if (NewSize + OldSize > MaxHeapSize) {
...@@ -260,18 +228,18 @@ void TwoGenerationCollectorPolicy::initialize_flags() { ...@@ -260,18 +228,18 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
uintx calculated_size = NewSize + OldSize; uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size; double shrink_factor = (double) MaxHeapSize / calculated_size;
// align // align
NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
// OldSize is already aligned because above we aligned MaxHeapSize to // OldSize is already aligned because above we aligned MaxHeapSize to
// max_alignment(), and we just made sure that NewSize is aligned to // _max_alignment, and we just made sure that NewSize is aligned to
// min_alignment(). In initialize_flags() we verified that max_alignment() // _min_alignment. In initialize_flags() we verified that _max_alignment
// is a multiple of min_alignment(). // is a multiple of _min_alignment.
OldSize = MaxHeapSize - NewSize; OldSize = MaxHeapSize - NewSize;
} else { } else {
MaxHeapSize = NewSize + OldSize; MaxHeapSize = NewSize + OldSize;
} }
} }
// need to do this again // need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
// adjust max heap size if necessary // adjust max heap size if necessary
if (NewSize + OldSize > MaxHeapSize) { if (NewSize + OldSize > MaxHeapSize) {
...@@ -281,24 +249,24 @@ void TwoGenerationCollectorPolicy::initialize_flags() { ...@@ -281,24 +249,24 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
uintx calculated_size = NewSize + OldSize; uintx calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size; double shrink_factor = (double) MaxHeapSize / calculated_size;
// align // align
NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment()); NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
// OldSize is already aligned because above we aligned MaxHeapSize to // OldSize is already aligned because above we aligned MaxHeapSize to
// max_alignment(), and we just made sure that NewSize is aligned to // _max_alignment, and we just made sure that NewSize is aligned to
// min_alignment(). In initialize_flags() we verified that max_alignment() // _min_alignment. In initialize_flags() we verified that _max_alignment
// is a multiple of min_alignment(). // is a multiple of _min_alignment.
OldSize = MaxHeapSize - NewSize; OldSize = MaxHeapSize - NewSize;
} else { } else {
MaxHeapSize = NewSize + OldSize; MaxHeapSize = NewSize + OldSize;
} }
} }
// need to do this again // need to do this again
MaxHeapSize = align_size_up(MaxHeapSize, max_alignment()); MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
always_do_update_barrier = UseConcMarkSweepGC; always_do_update_barrier = UseConcMarkSweepGC;
// Check validity of heap flags // Check validity of heap flags
assert(OldSize % min_alignment() == 0, "old space alignment"); assert(OldSize % _min_alignment == 0, "old space alignment");
assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment"); assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
} }
// Values set on the command line win over any ergonomically // Values set on the command line win over any ergonomically
...@@ -313,7 +281,7 @@ void TwoGenerationCollectorPolicy::initialize_flags() { ...@@ -313,7 +281,7 @@ void TwoGenerationCollectorPolicy::initialize_flags() {
void GenCollectorPolicy::initialize_size_info() { void GenCollectorPolicy::initialize_size_info() {
CollectorPolicy::initialize_size_info(); CollectorPolicy::initialize_size_info();
// min_alignment() is used for alignment within a generation. // _min_alignment is used for alignment within a generation.
// There is additional alignment done down stream for some // There is additional alignment done down stream for some
// collectors that sometimes causes unwanted rounding up of // collectors that sometimes causes unwanted rounding up of
// generations sizes. // generations sizes.
...@@ -322,18 +290,18 @@ void GenCollectorPolicy::initialize_size_info() { ...@@ -322,18 +290,18 @@ void GenCollectorPolicy::initialize_size_info() {
size_t max_new_size = 0; size_t max_new_size = 0;
if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
if (MaxNewSize < min_alignment()) { if (MaxNewSize < _min_alignment) {
max_new_size = min_alignment(); max_new_size = _min_alignment;
} }
if (MaxNewSize >= max_heap_byte_size()) { if (MaxNewSize >= _max_heap_byte_size) {
max_new_size = align_size_down(max_heap_byte_size() - min_alignment(), max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
min_alignment()); _min_alignment);
warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
"greater than the entire heap (" SIZE_FORMAT "k). A " "greater than the entire heap (" SIZE_FORMAT "k). A "
"new generation size of " SIZE_FORMAT "k will be used.", "new generation size of " SIZE_FORMAT "k will be used.",
MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K); MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
} else { } else {
max_new_size = align_size_down(MaxNewSize, min_alignment()); max_new_size = align_size_down(MaxNewSize, _min_alignment);
} }
// The case for FLAG_IS_ERGO(MaxNewSize) could be treated // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
...@@ -351,7 +319,7 @@ void GenCollectorPolicy::initialize_size_info() { ...@@ -351,7 +319,7 @@ void GenCollectorPolicy::initialize_size_info() {
// just accept those choices. The choices currently made are // just accept those choices. The choices currently made are
// not always "wise". // not always "wise".
} else { } else {
max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size()); max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
// Bound the maximum size by NewSize below (since it historically // Bound the maximum size by NewSize below (since it historically
// would have been NewSize and because the NewRatio calculation could // would have been NewSize and because the NewRatio calculation could
// yield a size that is too small) and bound it by MaxNewSize above. // yield a size that is too small) and bound it by MaxNewSize above.
...@@ -364,13 +332,13 @@ void GenCollectorPolicy::initialize_size_info() { ...@@ -364,13 +332,13 @@ void GenCollectorPolicy::initialize_size_info() {
// Given the maximum gen0 size, determine the initial and // Given the maximum gen0 size, determine the initial and
// minimum gen0 sizes. // minimum gen0 sizes.
if (max_heap_byte_size() == min_heap_byte_size()) { if (_max_heap_byte_size == _min_heap_byte_size) {
// The maximum and minimum heap sizes are the same so // The maximum and minimum heap sizes are the same so
// the generations minimum and initial must be the // the generations minimum and initial must be the
// same as its maximum. // same as its maximum.
set_min_gen0_size(max_new_size); _min_gen0_size = max_new_size;
set_initial_gen0_size(max_new_size); _initial_gen0_size = max_new_size;
set_max_gen0_size(max_new_size); _max_gen0_size = max_new_size;
} else { } else {
size_t desired_new_size = 0; size_t desired_new_size = 0;
if (!FLAG_IS_DEFAULT(NewSize)) { if (!FLAG_IS_DEFAULT(NewSize)) {
...@@ -391,43 +359,37 @@ void GenCollectorPolicy::initialize_size_info() { ...@@ -391,43 +359,37 @@ void GenCollectorPolicy::initialize_size_info() {
// Use the default NewSize as the floor for these values. If // Use the default NewSize as the floor for these values. If
// NewRatio is overly large, the resulting sizes can be too // NewRatio is overly large, the resulting sizes can be too
// small. // small.
_min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()), _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
NewSize);
desired_new_size = desired_new_size =
MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()), MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
NewSize);
} }
assert(_min_gen0_size > 0, "Sanity check"); assert(_min_gen0_size > 0, "Sanity check");
set_initial_gen0_size(desired_new_size); _initial_gen0_size = desired_new_size;
set_max_gen0_size(max_new_size); _max_gen0_size = max_new_size;
// At this point the desirable initial and minimum sizes have been // At this point the desirable initial and minimum sizes have been
// determined without regard to the maximum sizes. // determined without regard to the maximum sizes.
// Bound the sizes by the corresponding overall heap sizes. // Bound the sizes by the corresponding overall heap sizes.
set_min_gen0_size( _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
bound_minus_alignment(_min_gen0_size, min_heap_byte_size())); _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
set_initial_gen0_size( _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
set_max_gen0_size(
bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
// At this point all three sizes have been checked against the // At this point all three sizes have been checked against the
// maximum sizes but have not been checked for consistency // maximum sizes but have not been checked for consistency
// among the three. // among the three.
// Final check min <= initial <= max // Final check min <= initial <= max
set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size)); _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
set_initial_gen0_size( _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size)); _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
} }
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size()); _min_gen0_size, _initial_gen0_size, _max_gen0_size);
} }
} }
...@@ -447,19 +409,17 @@ bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, ...@@ -447,19 +409,17 @@ bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
(heap_size >= min_gen1_size + min_alignment())) { (heap_size >= min_gen1_size + _min_alignment)) {
// Adjust gen0 down to accommodate min_gen1_size // Adjust gen0 down to accommodate min_gen1_size
*gen0_size_ptr = heap_size - min_gen1_size; *gen0_size_ptr = heap_size - min_gen1_size;
*gen0_size_ptr = *gen0_size_ptr =
MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()), MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment);
min_alignment());
assert(*gen0_size_ptr > 0, "Min gen0 is too large"); assert(*gen0_size_ptr > 0, "Min gen0 is too large");
result = true; result = true;
} else { } else {
*gen1_size_ptr = heap_size - *gen0_size_ptr; *gen1_size_ptr = heap_size - *gen0_size_ptr;
*gen1_size_ptr = *gen1_size_ptr =
MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()), MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment);
min_alignment());
} }
} }
return result; return result;
...@@ -480,10 +440,9 @@ void TwoGenerationCollectorPolicy::initialize_size_info() { ...@@ -480,10 +440,9 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// The maximum gen1 size can be determined from the maximum gen0 // The maximum gen1 size can be determined from the maximum gen0
// and maximum heap size since no explicit flags exits // and maximum heap size since no explicit flags exits
// for setting the gen1 maximum. // for setting the gen1 maximum.
_max_gen1_size = max_heap_byte_size() - _max_gen0_size; _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
_max_gen1_size = _max_gen1_size =
MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()), MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
min_alignment());
// If no explicit command line flag has been set for the // If no explicit command line flag has been set for the
// gen1 size, use what is left for gen1. // gen1 size, use what is left for gen1.
if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
...@@ -492,70 +451,66 @@ void TwoGenerationCollectorPolicy::initialize_size_info() { ...@@ -492,70 +451,66 @@ void TwoGenerationCollectorPolicy::initialize_size_info() {
// with the overall heap size). In either case make // with the overall heap size). In either case make
// the minimum, maximum and initial sizes consistent // the minimum, maximum and initial sizes consistent
// with the gen0 sizes and the overall heap sizes. // with the gen0 sizes and the overall heap sizes.
assert(min_heap_byte_size() > _min_gen0_size, assert(_min_heap_byte_size > _min_gen0_size,
"gen0 has an unexpected minimum size"); "gen0 has an unexpected minimum size");
set_min_gen1_size(min_heap_byte_size() - min_gen0_size()); _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
set_min_gen1_size( _min_gen1_size =
MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()), MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
min_alignment())); _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size()); _initial_gen1_size =
set_initial_gen1_size( MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
min_alignment()));
} else { } else {
// It's been explicitly set on the command line. Use the // It's been explicitly set on the command line. Use the
// OldSize and then determine the consequences. // OldSize and then determine the consequences.
set_min_gen1_size(OldSize); _min_gen1_size = OldSize;
set_initial_gen1_size(OldSize); _initial_gen1_size = OldSize;
// If the user has explicitly set an OldSize that is inconsistent // If the user has explicitly set an OldSize that is inconsistent
// with other command line flags, issue a warning. // with other command line flags, issue a warning.
// The generation minimums and the overall heap mimimum should // The generation minimums and the overall heap mimimum should
// be within one heap alignment. // be within one heap alignment.
if ((_min_gen1_size + _min_gen0_size + min_alignment()) < if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
min_heap_byte_size()) {
warning("Inconsistency between minimum heap size and minimum " warning("Inconsistency between minimum heap size and minimum "
"generation sizes: using minimum heap = " SIZE_FORMAT, "generation sizes: using minimum heap = " SIZE_FORMAT,
min_heap_byte_size()); _min_heap_byte_size);
} }
if ((OldSize > _max_gen1_size)) { if ((OldSize > _max_gen1_size)) {
warning("Inconsistency between maximum heap size and maximum " warning("Inconsistency between maximum heap size and maximum "
"generation sizes: using maximum heap = " SIZE_FORMAT "generation sizes: using maximum heap = " SIZE_FORMAT
" -XX:OldSize flag is being ignored", " -XX:OldSize flag is being ignored",
max_heap_byte_size()); _max_heap_byte_size);
} }
// If there is an inconsistency between the OldSize and the minimum and/or // If there is an inconsistency between the OldSize and the minimum and/or
// initial size of gen0, since OldSize was explicitly set, OldSize wins. // initial size of gen0, since OldSize was explicitly set, OldSize wins.
if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
min_heap_byte_size(), OldSize)) { _min_heap_byte_size, OldSize)) {
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size()); _min_gen0_size, _initial_gen0_size, _max_gen0_size);
} }
} }
// Initial size // Initial size
if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
initial_heap_byte_size(), OldSize)) { _initial_heap_byte_size, OldSize)) {
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
min_gen0_size(), initial_gen0_size(), max_gen0_size()); _min_gen0_size, _initial_gen0_size, _max_gen0_size);
} }
} }
} }
// Enforce the maximum gen1 size. // Enforce the maximum gen1 size.
set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size)); _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
// Check that min gen1 <= initial gen1 <= max gen1 // Check that min gen1 <= initial gen1 <= max gen1
set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size)); _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size)); _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 "
SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT,
min_gen1_size(), initial_gen1_size(), max_gen1_size()); _min_gen1_size, _initial_gen1_size, _max_gen1_size);
} }
} }
......
...@@ -101,17 +101,12 @@ class CollectorPolicy : public CHeapObj<mtGC> { ...@@ -101,17 +101,12 @@ class CollectorPolicy : public CHeapObj<mtGC> {
// Return maximum heap alignment that may be imposed by the policy // Return maximum heap alignment that may be imposed by the policy
static size_t compute_max_alignment(); static size_t compute_max_alignment();
void set_min_alignment(size_t align) { _min_alignment = align; }
size_t min_alignment() { return _min_alignment; } size_t min_alignment() { return _min_alignment; }
void set_max_alignment(size_t align) { _max_alignment = align; }
size_t max_alignment() { return _max_alignment; } size_t max_alignment() { return _max_alignment; }
size_t initial_heap_byte_size() { return _initial_heap_byte_size; } size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
size_t max_heap_byte_size() { return _max_heap_byte_size; } size_t max_heap_byte_size() { return _max_heap_byte_size; }
void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
size_t min_heap_byte_size() { return _min_heap_byte_size; } size_t min_heap_byte_size() { return _min_heap_byte_size; }
void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
enum Name { enum Name {
CollectorPolicyKind, CollectorPolicyKind,
...@@ -248,12 +243,9 @@ class GenCollectorPolicy : public CollectorPolicy { ...@@ -248,12 +243,9 @@ class GenCollectorPolicy : public CollectorPolicy {
public: public:
// Accessors // Accessors
size_t min_gen0_size() { return _min_gen0_size; } size_t min_gen0_size() { return _min_gen0_size; }
void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
size_t initial_gen0_size() { return _initial_gen0_size; } size_t initial_gen0_size() { return _initial_gen0_size; }
void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; } size_t max_gen0_size() { return _max_gen0_size; }
size_t max_gen0_size() { return _max_gen0_size; }
void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
virtual int number_of_generations() = 0; virtual int number_of_generations() = 0;
...@@ -302,12 +294,9 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy { ...@@ -302,12 +294,9 @@ class TwoGenerationCollectorPolicy : public GenCollectorPolicy {
public: public:
// Accessors // Accessors
size_t min_gen1_size() { return _min_gen1_size; } size_t min_gen1_size() { return _min_gen1_size; }
void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
size_t initial_gen1_size() { return _initial_gen1_size; } size_t initial_gen1_size() { return _initial_gen1_size; }
void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; } size_t max_gen1_size() { return _max_gen1_size; }
size_t max_gen1_size() { return _max_gen1_size; }
void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
// Inherited methods // Inherited methods
TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define SHARE_VM_MEMORY_FILEMAP_HPP #define SHARE_VM_MEMORY_FILEMAP_HPP
#include "memory/metaspaceShared.hpp" #include "memory/metaspaceShared.hpp"
#include "memory/metaspace.hpp"
// Layout of the file: // Layout of the file:
// header: dump of archive instance plus versioning info, datestamp, etc. // header: dump of archive instance plus versioning info, datestamp, etc.
......
...@@ -29,17 +29,21 @@ ...@@ -29,17 +29,21 @@
#include "memory/collectorPolicy.hpp" #include "memory/collectorPolicy.hpp"
#include "memory/filemap.hpp" #include "memory/filemap.hpp"
#include "memory/freeList.hpp" #include "memory/freeList.hpp"
#include "memory/gcLocker.hpp"
#include "memory/metablock.hpp" #include "memory/metablock.hpp"
#include "memory/metachunk.hpp" #include "memory/metachunk.hpp"
#include "memory/metaspace.hpp" #include "memory/metaspace.hpp"
#include "memory/metaspaceShared.hpp" #include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "memory/universe.hpp" #include "memory/universe.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/globals.hpp" #include "runtime/globals.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp" #include "runtime/java.hpp"
#include "runtime/mutex.hpp" #include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp" #include "runtime/orderAccess.hpp"
#include "services/memTracker.hpp" #include "services/memTracker.hpp"
#include "services/memoryService.hpp"
#include "utilities/copy.hpp" #include "utilities/copy.hpp"
#include "utilities/debug.hpp" #include "utilities/debug.hpp"
...@@ -84,13 +88,7 @@ static ChunkIndex next_chunk_index(ChunkIndex i) { ...@@ -84,13 +88,7 @@ static ChunkIndex next_chunk_index(ChunkIndex i) {
return (ChunkIndex) (i+1); return (ChunkIndex) (i+1);
} }
// Originally _capacity_until_GC was set to MetaspaceSize here but volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
// the default MetaspaceSize before argument processing was being
// used which was not the desired value. See the code
// in should_expand() to see how the initialization is handled
// now.
size_t MetaspaceGC::_capacity_until_GC = 0;
bool MetaspaceGC::_expand_after_GC = false;
uint MetaspaceGC::_shrink_factor = 0; uint MetaspaceGC::_shrink_factor = 0;
bool MetaspaceGC::_should_concurrent_collect = false; bool MetaspaceGC::_should_concurrent_collect = false;
...@@ -293,9 +291,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> { ...@@ -293,9 +291,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
size_t expanded_words() const { return _virtual_space.committed_size() / BytesPerWord; }
size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
bool is_pre_committed() const { return _virtual_space.special(); }
// address of next available space in _virtual_space; // address of next available space in _virtual_space;
// Accessors // Accessors
VirtualSpaceNode* next() { return _next; } VirtualSpaceNode* next() { return _next; }
...@@ -337,7 +336,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> { ...@@ -337,7 +336,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// Expands/shrinks the committed space in a virtual space. Delegates // Expands/shrinks the committed space in a virtual space. Delegates
// to Virtualspace // to Virtualspace
bool expand_by(size_t words, bool pre_touch = false); bool expand_by(size_t min_words, size_t preferred_words);
// In preparation for deleting this node, remove all the chunks // In preparation for deleting this node, remove all the chunks
// in the node from any freelist. // in the node from any freelist.
...@@ -351,29 +350,64 @@ class VirtualSpaceNode : public CHeapObj<mtClass> { ...@@ -351,29 +350,64 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
}; };
#define assert_is_ptr_aligned(ptr, alignment) \
assert(is_ptr_aligned(ptr, alignment), \
err_msg(PTR_FORMAT " is not aligned to " \
SIZE_FORMAT, ptr, alignment))
#define assert_is_size_aligned(size, alignment) \
assert(is_size_aligned(size, alignment), \
err_msg(SIZE_FORMAT " is not aligned to " \
SIZE_FORMAT, size, alignment))
// Decide if large pages should be committed when the memory is reserved.
static bool should_commit_large_pages_when_reserving(size_t bytes) {
if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
size_t words = bytes / BytesPerWord;
bool is_class = false; // We never reserve large pages for the class space.
if (MetaspaceGC::can_expand(words, is_class) &&
MetaspaceGC::allowed_expansion() >= words) {
return true;
}
}
return false;
}
// byte_size is the size of the associated virtualspace. // byte_size is the size of the associated virtualspace.
VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) { VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
// align up to vm allocation granularity assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
// This allocates memory with mmap. For DumpSharedspaces, try to reserve // This allocates memory with mmap. For DumpSharedspaces, try to reserve
// configurable address, generally at the top of the Java heap so other // configurable address, generally at the top of the Java heap so other
// memory addresses don't conflict. // memory addresses don't conflict.
if (DumpSharedSpaces) { if (DumpSharedSpaces) {
char* shared_base = (char*)SharedBaseAddress; bool large_pages = false; // No large pages when dumping the CDS archive.
_rs = ReservedSpace(byte_size, 0, false, shared_base, 0); char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
if (_rs.is_reserved()) { if (_rs.is_reserved()) {
assert(shared_base == 0 || _rs.base() == shared_base, "should match"); assert(shared_base == 0 || _rs.base() == shared_base, "should match");
} else { } else {
// Get a mmap region anywhere if the SharedBaseAddress fails. // Get a mmap region anywhere if the SharedBaseAddress fails.
_rs = ReservedSpace(byte_size); _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
} }
MetaspaceShared::set_shared_rs(&_rs); MetaspaceShared::set_shared_rs(&_rs);
} else { } else {
_rs = ReservedSpace(byte_size); bool large_pages = should_commit_large_pages_when_reserving(bytes);
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
} }
MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); if (_rs.is_reserved()) {
assert(_rs.base() != NULL, "Catch if we get a NULL address");
assert(_rs.size() != 0, "Catch if we get a 0 size");
assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
}
} }
void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
...@@ -410,8 +444,6 @@ uint VirtualSpaceNode::container_count_slow() { ...@@ -410,8 +444,6 @@ uint VirtualSpaceNode::container_count_slow() {
#endif #endif
// List of VirtualSpaces for metadata allocation. // List of VirtualSpaces for metadata allocation.
// It has a _next link for singly linked list and a MemRegion
// for total space in the VirtualSpace.
class VirtualSpaceList : public CHeapObj<mtClass> { class VirtualSpaceList : public CHeapObj<mtClass> {
friend class VirtualSpaceNode; friend class VirtualSpaceNode;
...@@ -419,16 +451,13 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -419,16 +451,13 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
VirtualSpaceSize = 256 * K VirtualSpaceSize = 256 * K
}; };
// Global list of virtual spaces
// Head of the list // Head of the list
VirtualSpaceNode* _virtual_space_list; VirtualSpaceNode* _virtual_space_list;
// virtual space currently being used for allocations // virtual space currently being used for allocations
VirtualSpaceNode* _current_virtual_space; VirtualSpaceNode* _current_virtual_space;
// Can this virtual list allocate >1 spaces? Also, used to determine // Is this VirtualSpaceList used for the compressed class space
// whether to allocate unlimited small chunks in this virtual space
bool _is_class; bool _is_class;
bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
// Sum of reserved and committed memory in the virtual spaces // Sum of reserved and committed memory in the virtual spaces
size_t _reserved_words; size_t _reserved_words;
...@@ -453,7 +482,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -453,7 +482,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
// Get another virtual space and add it to the list. This // Get another virtual space and add it to the list. This
// is typically prompted by a failed attempt to allocate a chunk // is typically prompted by a failed attempt to allocate a chunk
// and is typically followed by the allocation of a chunk. // and is typically followed by the allocation of a chunk.
bool grow_vs(size_t vs_word_size); bool create_new_virtual_space(size_t vs_word_size);
public: public:
VirtualSpaceList(size_t word_size); VirtualSpaceList(size_t word_size);
...@@ -465,12 +494,12 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -465,12 +494,12 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
size_t grow_chunks_by_words, size_t grow_chunks_by_words,
size_t medium_chunk_bunch); size_t medium_chunk_bunch);
bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false); bool expand_node_by(VirtualSpaceNode* node,
size_t min_words,
size_t preferred_words);
// Get the first chunk for a Metaspace. Used for bool expand_by(size_t min_words,
// special cases such as the boot class loader, reflection size_t preferred_words);
// class loader and anonymous class loader.
Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
VirtualSpaceNode* current_virtual_space() { VirtualSpaceNode* current_virtual_space() {
return _current_virtual_space; return _current_virtual_space;
...@@ -478,8 +507,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> { ...@@ -478,8 +507,7 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
bool is_class() const { return _is_class; } bool is_class() const { return _is_class; }
// Allocate the first virtualspace. bool initialization_succeeded() { return _virtual_space_list != NULL; }
void initialize(size_t word_size);
size_t reserved_words() { return _reserved_words; } size_t reserved_words() { return _reserved_words; }
size_t reserved_bytes() { return reserved_words() * BytesPerWord; } size_t reserved_bytes() { return reserved_words() * BytesPerWord; }
...@@ -708,6 +736,9 @@ class SpaceManager : public CHeapObj<mtClass> { ...@@ -708,6 +736,9 @@ class SpaceManager : public CHeapObj<mtClass> {
// and allocates from that chunk. // and allocates from that chunk.
MetaWord* grow_and_allocate(size_t word_size); MetaWord* grow_and_allocate(size_t word_size);
// Notify memory usage to MemoryService.
void track_metaspace_memory_usage();
// debugging support. // debugging support.
void dump(outputStream* const out) const; void dump(outputStream* const out) const;
...@@ -869,6 +900,12 @@ Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { ...@@ -869,6 +900,12 @@ Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
MetaWord* chunk_limit = top(); MetaWord* chunk_limit = top();
assert(chunk_limit != NULL, "Not safe to call this method"); assert(chunk_limit != NULL, "Not safe to call this method");
// The virtual spaces are always expanded by the
// commit granularity to enforce the following condition.
// Without this the is_available check will not work correctly.
assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
"The committed memory doesn't match the expanded memory.");
if (!is_available(chunk_word_size)) { if (!is_available(chunk_word_size)) {
if (TraceMetadataChunkAllocation) { if (TraceMetadataChunkAllocation) {
gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
...@@ -888,14 +925,21 @@ Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { ...@@ -888,14 +925,21 @@ Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
// Expand the virtual space (commit more of the reserved space) // Expand the virtual space (commit more of the reserved space)
bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) { bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
size_t bytes = words * BytesPerWord; size_t min_bytes = min_words * BytesPerWord;
bool result = virtual_space()->expand_by(bytes, pre_touch); size_t preferred_bytes = preferred_words * BytesPerWord;
if (TraceMetavirtualspaceAllocation && !result) {
gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed " size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
"for byte size " SIZE_FORMAT, bytes);
virtual_space()->print_on(gclog_or_tty); if (uncommitted < min_bytes) {
return false;
} }
size_t commit = MIN2(preferred_bytes, uncommitted);
bool result = virtual_space()->expand_by(commit, false);
assert(result, "Failed to commit memory");
return result; return result;
} }
...@@ -914,12 +958,23 @@ bool VirtualSpaceNode::initialize() { ...@@ -914,12 +958,23 @@ bool VirtualSpaceNode::initialize() {
return false; return false;
} }
// An allocation out of this Virtualspace that is larger // These are necessary restriction to make sure that the virtual space always
// than an initial commit size can waste that initial committed // grows in steps of Metaspace::commit_alignment(). If both base and size are
// space. // aligned only the middle alignment of the VirtualSpace is used.
size_t committed_byte_size = 0; assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
bool result = virtual_space()->initialize(_rs, committed_byte_size); assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
// ReservedSpaces marked as special will have the entire memory
// pre-committed. Setting a committed size will make sure that
// committed_size and actual_committed_size agrees.
size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
Metaspace::commit_alignment());
if (result) { if (result) {
assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
"Checking that the pre-committed memory was registered by the VirtualSpace");
set_top((MetaWord*)virtual_space()->low()); set_top((MetaWord*)virtual_space()->low());
set_reserved(MemRegion((HeapWord*)_rs.base(), set_reserved(MemRegion((HeapWord*)_rs.base(),
(HeapWord*)(_rs.base() + _rs.size()))); (HeapWord*)(_rs.base() + _rs.size())));
...@@ -976,13 +1031,23 @@ void VirtualSpaceList::dec_reserved_words(size_t v) { ...@@ -976,13 +1031,23 @@ void VirtualSpaceList::dec_reserved_words(size_t v) {
_reserved_words = _reserved_words - v; _reserved_words = _reserved_words - v;
} }
#define assert_committed_below_limit() \
assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
" limit (MaxMetaspaceSize): " SIZE_FORMAT, \
MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
void VirtualSpaceList::inc_committed_words(size_t v) { void VirtualSpaceList::inc_committed_words(size_t v) {
assert_lock_strong(SpaceManager::expand_lock()); assert_lock_strong(SpaceManager::expand_lock());
_committed_words = _committed_words + v; _committed_words = _committed_words + v;
assert_committed_below_limit();
} }
void VirtualSpaceList::dec_committed_words(size_t v) { void VirtualSpaceList::dec_committed_words(size_t v) {
assert_lock_strong(SpaceManager::expand_lock()); assert_lock_strong(SpaceManager::expand_lock());
_committed_words = _committed_words - v; _committed_words = _committed_words - v;
assert_committed_below_limit();
} }
void VirtualSpaceList::inc_virtual_space_count() { void VirtualSpaceList::inc_virtual_space_count() {
...@@ -1025,8 +1090,8 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) { ...@@ -1025,8 +1090,8 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
if (vsl->container_count() == 0 && vsl != current_virtual_space()) { if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
// Unlink it from the list // Unlink it from the list
if (prev_vsl == vsl) { if (prev_vsl == vsl) {
// This is the case of the current note being the first note. // This is the case of the current node being the first node.
assert(vsl == virtual_space_list(), "Expected to be the first note"); assert(vsl == virtual_space_list(), "Expected to be the first node");
set_virtual_space_list(vsl->next()); set_virtual_space_list(vsl->next());
} else { } else {
prev_vsl->set_next(vsl->next()); prev_vsl->set_next(vsl->next());
...@@ -1054,7 +1119,7 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) { ...@@ -1054,7 +1119,7 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
#endif #endif
} }
VirtualSpaceList::VirtualSpaceList(size_t word_size ) : VirtualSpaceList::VirtualSpaceList(size_t word_size) :
_is_class(false), _is_class(false),
_virtual_space_list(NULL), _virtual_space_list(NULL),
_current_virtual_space(NULL), _current_virtual_space(NULL),
...@@ -1063,9 +1128,7 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size ) : ...@@ -1063,9 +1128,7 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
_virtual_space_count(0) { _virtual_space_count(0) {
MutexLockerEx cl(SpaceManager::expand_lock(), MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
bool initialization_succeeded = grow_vs(word_size); create_new_virtual_space(word_size);
assert(initialization_succeeded,
" VirtualSpaceList initialization should not fail");
} }
VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
...@@ -1079,8 +1142,9 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : ...@@ -1079,8 +1142,9 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
bool succeeded = class_entry->initialize(); bool succeeded = class_entry->initialize();
assert(succeeded, " VirtualSpaceList initialization should not fail"); if (succeeded) {
link_vs(class_entry); link_vs(class_entry);
}
} }
size_t VirtualSpaceList::free_bytes() { size_t VirtualSpaceList::free_bytes() {
...@@ -1088,14 +1152,24 @@ size_t VirtualSpaceList::free_bytes() { ...@@ -1088,14 +1152,24 @@ size_t VirtualSpaceList::free_bytes() {
} }
// Allocate another meta virtual space and add it to the list. // Allocate another meta virtual space and add it to the list.
bool VirtualSpaceList::grow_vs(size_t vs_word_size) { bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
assert_lock_strong(SpaceManager::expand_lock()); assert_lock_strong(SpaceManager::expand_lock());
if (is_class()) {
assert(false, "We currently don't support more than one VirtualSpace for"
" the compressed class space. The initialization of the"
" CCS uses another code path and should not hit this path.");
return false;
}
if (vs_word_size == 0) { if (vs_word_size == 0) {
assert(false, "vs_word_size should always be at least _reserve_alignment large.");
return false; return false;
} }
// Reserve the space // Reserve the space
size_t vs_byte_size = vs_word_size * BytesPerWord; size_t vs_byte_size = vs_word_size * BytesPerWord;
assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned"); assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
// Allocate the meta virtual space and initialize it. // Allocate the meta virtual space and initialize it.
VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
...@@ -1103,7 +1177,8 @@ bool VirtualSpaceList::grow_vs(size_t vs_word_size) { ...@@ -1103,7 +1177,8 @@ bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
delete new_entry; delete new_entry;
return false; return false;
} else { } else {
assert(new_entry->reserved_words() == vs_word_size, "Must be"); assert(new_entry->reserved_words() == vs_word_size,
"Reserved memory size differs from requested memory size");
// ensure lock-free iteration sees fully initialized node // ensure lock-free iteration sees fully initialized node
OrderAccess::storestore(); OrderAccess::storestore();
link_vs(new_entry); link_vs(new_entry);
...@@ -1130,20 +1205,67 @@ void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { ...@@ -1130,20 +1205,67 @@ void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
} }
} }
bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) { bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
size_t min_words,
size_t preferred_words) {
size_t before = node->committed_words(); size_t before = node->committed_words();
bool result = node->expand_by(word_size, pre_touch); bool result = node->expand_by(min_words, preferred_words);
size_t after = node->committed_words(); size_t after = node->committed_words();
// after and before can be the same if the memory was pre-committed. // after and before can be the same if the memory was pre-committed.
assert(after >= before, "Must be"); assert(after >= before, "Inconsistency");
inc_committed_words(after - before); inc_committed_words(after - before);
return result; return result;
} }
bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
assert_is_size_aligned(min_words, Metaspace::commit_alignment_words());
assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
assert(min_words <= preferred_words, "Invalid arguments");
if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
return false;
}
size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
if (allowed_expansion_words < min_words) {
return false;
}
size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
// Commit more memory from the the current virtual space.
bool vs_expanded = expand_node_by(current_virtual_space(),
min_words,
max_expansion_words);
if (vs_expanded) {
return true;
}
// Get another virtual space.
size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
if (create_new_virtual_space(grow_vs_words)) {
if (current_virtual_space()->is_pre_committed()) {
// The memory was pre-committed, so we are done here.
assert(min_words <= current_virtual_space()->committed_words(),
"The new VirtualSpace was pre-committed, so it"
"should be large enough to fit the alloc request.");
return true;
}
return expand_node_by(current_virtual_space(),
min_words,
max_expansion_words);
}
return false;
}
Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words, size_t grow_chunks_by_words,
size_t medium_chunk_bunch) { size_t medium_chunk_bunch) {
...@@ -1151,63 +1273,27 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, ...@@ -1151,63 +1273,27 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
// Allocate a chunk out of the current virtual space. // Allocate a chunk out of the current virtual space.
Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
if (next == NULL) { if (next != NULL) {
// Not enough room in current virtual space. Try to commit return next;
// more space.
size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
grow_chunks_by_words);
size_t page_size_words = os::vm_page_size() / BytesPerWord;
size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
page_size_words);
bool vs_expanded =
expand_by(current_virtual_space(), aligned_expand_vs_by_words);
if (!vs_expanded) {
// Should the capacity of the metaspaces be expanded for
// this allocation? If it's the virtual space for classes and is
// being used for CompressedHeaders, don't allocate a new virtualspace.
if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
// Get another virtual space.
size_t allocation_aligned_expand_words =
align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
size_t grow_vs_words =
MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
if (grow_vs(grow_vs_words)) {
// Got it. It's on the list now. Get a chunk from it.
assert(current_virtual_space()->expanded_words() == 0,
"New virtual space nodes should not have expanded");
size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
page_size_words);
// We probably want to expand by aligned_expand_vs_by_words here.
expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
}
} else {
// Allocation will fail and induce a GC
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
" Fail instead of expand the metaspace");
}
}
} else {
// The virtual space expanded, get a new chunk
next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
assert(next != NULL, "Just expanded, should succeed");
}
} }
assert(next == NULL || (next->next() == NULL && next->prev() == NULL), // The expand amount is currently only determined by the requested sizes
"New chunk is still on some list"); // and not how much committed memory is left in the current virtual space.
return next;
}
Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size, size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
size_t chunk_bunch) { size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words());
// Get a chunk from the chunk freelist if (min_word_size >= preferred_word_size) {
Metachunk* new_chunk = get_new_chunk(chunk_word_size, // Can happen when humongous chunks are allocated.
chunk_word_size, preferred_word_size = min_word_size;
chunk_bunch); }
return new_chunk;
bool expanded = expand_by(min_word_size, preferred_word_size);
if (expanded) {
next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
assert(next != NULL, "The allocation was expected to succeed after the expansion");
}
return next;
} }
void VirtualSpaceList::print_on(outputStream* st) const { void VirtualSpaceList::print_on(outputStream* st) const {
...@@ -1256,96 +1342,96 @@ bool VirtualSpaceList::contains(const void *ptr) { ...@@ -1256,96 +1342,96 @@ bool VirtualSpaceList::contains(const void *ptr) {
// Calculate the amount to increase the high water mark (HWM). // Calculate the amount to increase the high water mark (HWM).
// Increase by a minimum amount (MinMetaspaceExpansion) so that // Increase by a minimum amount (MinMetaspaceExpansion) so that
// another expansion is not requested too soon. If that is not // another expansion is not requested too soon. If that is not
// enough to satisfy the allocation (i.e. big enough for a word_size // enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
// allocation), increase by MaxMetaspaceExpansion. If that is still // If that is still not enough, expand by the size of the allocation
// not enough, expand by the size of the allocation (word_size) plus // plus some.
// some. size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { size_t min_delta = MinMetaspaceExpansion;
size_t before_inc = MetaspaceGC::capacity_until_GC(); size_t max_delta = MaxMetaspaceExpansion;
size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord; size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
size_t page_size_words = os::vm_page_size() / BytesPerWord; if (delta <= min_delta) {
size_t size_delta_words = align_size_up(word_size, page_size_words); delta = min_delta;
size_t delta_words = MAX2(size_delta_words, min_delta_words); } else if (delta <= max_delta) {
if (delta_words > min_delta_words) {
// Don't want to hit the high water mark on the next // Don't want to hit the high water mark on the next
// allocation so make the delta greater than just enough // allocation so make the delta greater than just enough
// for this allocation. // for this allocation.
delta_words = MAX2(delta_words, max_delta_words); delta = max_delta;
if (delta_words > max_delta_words) { } else {
// This allocation is large but the next ones are probably not // This allocation is large but the next ones are probably not
// so increase by the minimum. // so increase by the minimum.
delta_words = delta_words + min_delta_words; delta = delta + min_delta;
}
} }
return delta_words;
assert_is_size_aligned(delta, Metaspace::commit_alignment());
return delta;
}
size_t MetaspaceGC::capacity_until_GC() {
size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
assert(value >= MetaspaceSize, "Not initialied properly?");
return value;
} }
bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
assert_is_size_aligned(v, Metaspace::commit_alignment());
// If the user wants a limit, impose one. return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
// The reason for someone using this flag is to limit reserved space. So }
// for non-class virtual space, compare against virtual spaces that are reserved.
// For class virtual space, we only compare against the committed space, not size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
// reserved space, because this is a larger space prereserved for compressed assert_is_size_aligned(v, Metaspace::commit_alignment());
// class pointers.
if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); }
size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
size_t real_allocated = nonclass_allocated + class_allocated; bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
if (real_allocated >= MaxMetaspaceSize) { // Check if the compressed class space is full.
if (is_class && Metaspace::using_class_space()) {
size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
return false; return false;
} }
} }
// Class virtual space should always be expanded. Call GC for the other // Check if the user has imposed a limit on the metaspace memory.
// metadata virtual space. size_t committed_bytes = MetaspaceAux::committed_bytes();
if (Metaspace::using_class_space() && if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
(vsl == Metaspace::class_space_list())) return true; return false;
}
return true;
}
size_t MetaspaceGC::allowed_expansion() {
size_t committed_bytes = MetaspaceAux::committed_bytes();
// If this is part of an allocation after a GC, expand size_t left_until_max = MaxMetaspaceSize - committed_bytes;
// unconditionally.
if (MetaspaceGC::expand_after_GC()) { // Always grant expansion if we are initiating the JVM,
return true; // or if the GC_locker is preventing GCs.
if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
return left_until_max / BytesPerWord;
} }
size_t capacity_until_gc = capacity_until_GC();
// If the capacity is below the minimum capacity, allow the if (capacity_until_gc <= committed_bytes) {
// expansion. Also set the high-water-mark (capacity_until_GC) return 0;
// to that minimum capacity so that a GC will not be induced
// until that minimum capacity is exceeded.
size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
size_t metaspace_size_bytes = MetaspaceSize;
if (committed_capacity_bytes < metaspace_size_bytes ||
capacity_until_GC() == 0) {
set_capacity_until_GC(metaspace_size_bytes);
return true;
} else {
if (committed_capacity_bytes < capacity_until_GC()) {
return true;
} else {
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT
" capacity_until_GC " SIZE_FORMAT
" allocated_capacity_bytes " SIZE_FORMAT,
word_size,
capacity_until_GC(),
MetaspaceAux::allocated_capacity_bytes());
}
return false;
}
} }
}
size_t left_until_GC = capacity_until_gc - committed_bytes;
size_t left_to_commit = MIN2(left_until_GC, left_until_max);
return left_to_commit / BytesPerWord;
}
void MetaspaceGC::compute_new_size() { void MetaspaceGC::compute_new_size() {
assert(_shrink_factor <= 100, "invalid shrink factor"); assert(_shrink_factor <= 100, "invalid shrink factor");
uint current_shrink_factor = _shrink_factor; uint current_shrink_factor = _shrink_factor;
_shrink_factor = 0; _shrink_factor = 0;
// Until a faster way of calculating the "used" quantity is implemented,
// use "capacity".
const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
...@@ -1377,9 +1463,10 @@ void MetaspaceGC::compute_new_size() { ...@@ -1377,9 +1463,10 @@ void MetaspaceGC::compute_new_size() {
// If we have less capacity below the metaspace HWM, then // If we have less capacity below the metaspace HWM, then
// increment the HWM. // increment the HWM.
size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
// Don't expand unless it's significant // Don't expand unless it's significant
if (expand_bytes >= MinMetaspaceExpansion) { if (expand_bytes >= MinMetaspaceExpansion) {
MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); MetaspaceGC::inc_capacity_until_GC(expand_bytes);
} }
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
size_t new_capacity_until_GC = capacity_until_GC; size_t new_capacity_until_GC = capacity_until_GC;
...@@ -1436,6 +1523,9 @@ void MetaspaceGC::compute_new_size() { ...@@ -1436,6 +1523,9 @@ void MetaspaceGC::compute_new_size() {
// on the third call, and 100% by the fourth call. But if we recompute // on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%. // size without shrinking, it goes back to 0%.
shrink_bytes = shrink_bytes / 100 * current_shrink_factor; shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
assert(shrink_bytes <= max_shrink_bytes, assert(shrink_bytes <= max_shrink_bytes,
err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
shrink_bytes, max_shrink_bytes)); shrink_bytes, max_shrink_bytes));
...@@ -1467,7 +1557,7 @@ void MetaspaceGC::compute_new_size() { ...@@ -1467,7 +1557,7 @@ void MetaspaceGC::compute_new_size() {
// Don't shrink unless it's significant // Don't shrink unless it's significant
if (shrink_bytes >= MinMetaspaceExpansion && if (shrink_bytes >= MinMetaspaceExpansion &&
((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
} }
} }
...@@ -1700,7 +1790,6 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { ...@@ -1700,7 +1790,6 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
assert(free_list != NULL, "Sanity check"); assert(free_list != NULL, "Sanity check");
chunk = free_list->head(); chunk = free_list->head();
debug_only(Metachunk* debug_head = chunk;)
if (chunk == NULL) { if (chunk == NULL) {
return NULL; return NULL;
...@@ -1709,9 +1798,6 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { ...@@ -1709,9 +1798,6 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
// Remove the chunk as the head of the list. // Remove the chunk as the head of the list.
free_list->remove_chunk(chunk); free_list->remove_chunk(chunk);
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
if (TraceMetadataChunkAllocation && Verbose) { if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
...@@ -1722,21 +1808,22 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { ...@@ -1722,21 +1808,22 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
word_size, word_size,
FreeBlockDictionary<Metachunk>::atLeast); FreeBlockDictionary<Metachunk>::atLeast);
if (chunk != NULL) { if (chunk == NULL) {
if (TraceMetadataHumongousAllocation) {
size_t waste = chunk->word_size() - word_size;
gclog_or_tty->print_cr("Free list allocate humongous chunk size "
SIZE_FORMAT " for requested size " SIZE_FORMAT
" waste " SIZE_FORMAT,
chunk->word_size(), word_size, waste);
}
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
} else {
return NULL; return NULL;
} }
if (TraceMetadataHumongousAllocation) {
size_t waste = chunk->word_size() - word_size;
gclog_or_tty->print_cr("Free list allocate humongous chunk size "
SIZE_FORMAT " for requested size " SIZE_FORMAT
" waste " SIZE_FORMAT,
chunk->word_size(), word_size, waste);
}
} }
// Chunk is being removed from the chunks free list.
dec_free_chunks_total(chunk->capacity_word_size());
// Remove it from the links to this freelist // Remove it from the links to this freelist
chunk->set_next(NULL); chunk->set_next(NULL);
chunk->set_prev(NULL); chunk->set_prev(NULL);
...@@ -1977,6 +2064,15 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) { ...@@ -1977,6 +2064,15 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) {
return chunk_word_size; return chunk_word_size;
} }
void SpaceManager::track_metaspace_memory_usage() {
if (is_init_completed()) {
if (is_class()) {
MemoryService::track_compressed_class_memory_usage();
}
MemoryService::track_metaspace_memory_usage();
}
}
MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
assert(vs_list()->current_virtual_space() != NULL, assert(vs_list()->current_virtual_space() != NULL,
"Should have been set"); "Should have been set");
...@@ -2002,15 +2098,24 @@ MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { ...@@ -2002,15 +2098,24 @@ MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
size_t grow_chunks_by_words = calc_chunk_size(word_size); size_t grow_chunks_by_words = calc_chunk_size(word_size);
Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
if (next != NULL) {
Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
}
MetaWord* mem = NULL;
// If a chunk was available, add it to the in-use chunk list // If a chunk was available, add it to the in-use chunk list
// and do an allocation from it. // and do an allocation from it.
if (next != NULL) { if (next != NULL) {
Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
// Add to this manager's list of chunks in use. // Add to this manager's list of chunks in use.
add_chunk(next, false); add_chunk(next, false);
return next->allocate(word_size); mem = next->allocate(word_size);
} }
return NULL;
// Track metaspace memory usage statistic.
track_metaspace_memory_usage();
return mem;
} }
void SpaceManager::print_on(outputStream* st) const { void SpaceManager::print_on(outputStream* st) const {
...@@ -2366,6 +2471,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) { ...@@ -2366,6 +2471,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
inc_used_metrics(word_size); inc_used_metrics(word_size);
return current_chunk()->allocate(word_size); // caller handles null result return current_chunk()->allocate(word_size); // caller handles null result
} }
if (current_chunk() != NULL) { if (current_chunk() != NULL) {
result = current_chunk()->allocate(word_size); result = current_chunk()->allocate(word_size);
} }
...@@ -2373,7 +2479,8 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) { ...@@ -2373,7 +2479,8 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
if (result == NULL) { if (result == NULL) {
result = grow_and_allocate(word_size); result = grow_and_allocate(word_size);
} }
if (result != 0) {
if (result != NULL) {
inc_used_metrics(word_size); inc_used_metrics(word_size);
assert(result != (MetaWord*) chunks_in_use(MediumIndex), assert(result != (MetaWord*) chunks_in_use(MediumIndex),
"Head of the list is being allocated"); "Head of the list is being allocated");
...@@ -2639,24 +2746,26 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { ...@@ -2639,24 +2746,26 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
void MetaspaceAux::print_on(outputStream* out) { void MetaspaceAux::print_on(outputStream* out) {
Metaspace::MetadataType nct = Metaspace::NonClassType; Metaspace::MetadataType nct = Metaspace::NonClassType;
out->print_cr(" Metaspace total " out->print_cr(" Metaspace "
SIZE_FORMAT "K, used " SIZE_FORMAT "K," "used " SIZE_FORMAT "K, "
" reserved " SIZE_FORMAT "K", "capacity " SIZE_FORMAT "K, "
allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K); "committed " SIZE_FORMAT "K, "
"reserved " SIZE_FORMAT "K",
out->print_cr(" data space " allocated_used_bytes()/K,
SIZE_FORMAT "K, used " SIZE_FORMAT "K," allocated_capacity_bytes()/K,
" reserved " SIZE_FORMAT "K", committed_bytes()/K,
allocated_capacity_bytes(nct)/K, reserved_bytes()/K);
allocated_used_bytes(nct)/K,
reserved_bytes(nct)/K);
if (Metaspace::using_class_space()) { if (Metaspace::using_class_space()) {
Metaspace::MetadataType ct = Metaspace::ClassType; Metaspace::MetadataType ct = Metaspace::ClassType;
out->print_cr(" class space " out->print_cr(" class space "
SIZE_FORMAT "K, used " SIZE_FORMAT "K," "used " SIZE_FORMAT "K, "
" reserved " SIZE_FORMAT "K", "capacity " SIZE_FORMAT "K, "
allocated_capacity_bytes(ct)/K, "committed " SIZE_FORMAT "K, "
"reserved " SIZE_FORMAT "K",
allocated_used_bytes(ct)/K, allocated_used_bytes(ct)/K,
allocated_capacity_bytes(ct)/K,
committed_bytes(ct)/K,
reserved_bytes(ct)/K); reserved_bytes(ct)/K);
} }
} }
...@@ -2808,6 +2917,9 @@ void MetaspaceAux::verify_metrics() { ...@@ -2808,6 +2917,9 @@ void MetaspaceAux::verify_metrics() {
size_t Metaspace::_first_chunk_word_size = 0; size_t Metaspace::_first_chunk_word_size = 0;
size_t Metaspace::_first_class_chunk_word_size = 0; size_t Metaspace::_first_class_chunk_word_size = 0;
size_t Metaspace::_commit_alignment = 0;
size_t Metaspace::_reserve_alignment = 0;
Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
initialize(lock, type); initialize(lock, type);
} }
...@@ -2869,21 +2981,30 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a ...@@ -2869,21 +2981,30 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
assert(class_metaspace_size() < KlassEncodingMetaspaceMax, assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
"Metaspace size is too big"); "Metaspace size is too big");
assert_is_ptr_aligned(requested_addr, _reserve_alignment);
assert_is_ptr_aligned(cds_base, _reserve_alignment);
assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
// Don't use large pages for the class space.
bool large_pages = false;
ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
os::vm_allocation_granularity(), _reserve_alignment,
false, requested_addr, 0); large_pages,
requested_addr, 0);
if (!metaspace_rs.is_reserved()) { if (!metaspace_rs.is_reserved()) {
if (UseSharedSpaces) { if (UseSharedSpaces) {
size_t increment = align_size_up(1*G, _reserve_alignment);
// Keep trying to allocate the metaspace, increasing the requested_addr // Keep trying to allocate the metaspace, increasing the requested_addr
// by 1GB each time, until we reach an address that will no longer allow // by 1GB each time, until we reach an address that will no longer allow
// use of CDS with compressed klass pointers. // use of CDS with compressed klass pointers.
char *addr = requested_addr; char *addr = requested_addr;
while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) && while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) { can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
addr = addr + 1*G; addr = addr + increment;
metaspace_rs = ReservedSpace(class_metaspace_size(), metaspace_rs = ReservedSpace(class_metaspace_size(),
os::vm_allocation_granularity(), false, addr, 0); _reserve_alignment, large_pages, addr, 0);
} }
} }
...@@ -2894,7 +3015,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a ...@@ -2894,7 +3015,7 @@ void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, a
// So, UseCompressedClassPointers cannot be turned off at this point. // So, UseCompressedClassPointers cannot be turned off at this point.
if (!metaspace_rs.is_reserved()) { if (!metaspace_rs.is_reserved()) {
metaspace_rs = ReservedSpace(class_metaspace_size(), metaspace_rs = ReservedSpace(class_metaspace_size(),
os::vm_allocation_granularity(), false); _reserve_alignment, large_pages);
if (!metaspace_rs.is_reserved()) { if (!metaspace_rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
class_metaspace_size())); class_metaspace_size()));
...@@ -2933,34 +3054,96 @@ void Metaspace::initialize_class_space(ReservedSpace rs) { ...@@ -2933,34 +3054,96 @@ void Metaspace::initialize_class_space(ReservedSpace rs) {
assert(using_class_space(), "Must be using class space"); assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs); _class_space_list = new VirtualSpaceList(rs);
_chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
if (!_class_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
}
} }
#endif #endif
// Align down. If the aligning result in 0, return 'alignment'.
static size_t restricted_align_down(size_t size, size_t alignment) {
return MAX2(alignment, align_size_down_(size, alignment));
}
void Metaspace::ergo_initialize() {
if (DumpSharedSpaces) {
// Using large pages when dumping the shared archive is currently not implemented.
FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
}
size_t page_size = os::vm_page_size();
if (UseLargePages && UseLargePagesInMetaspace) {
page_size = os::large_page_size();
}
_commit_alignment = page_size;
_reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
// Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
// override if MaxMetaspaceSize was set on the command line or not.
// This information is needed later to conform to the specification of the
// java.lang.management.MemoryUsage API.
//
// Ideally, we would be able to set the default value of MaxMetaspaceSize in
// globals.hpp to the aligned value, but this is not possible, since the
// alignment depends on other flags being parsed.
MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
if (MetaspaceSize > MaxMetaspaceSize) {
MetaspaceSize = MaxMetaspaceSize;
}
MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
if (MetaspaceSize < 256*K) {
vm_exit_during_initialization("Too small initial Metaspace size");
}
MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
set_class_metaspace_size(CompressedClassSpaceSize);
}
void Metaspace::global_initialize() { void Metaspace::global_initialize() {
// Initialize the alignment for shared spaces. // Initialize the alignment for shared spaces.
int max_alignment = os::vm_page_size(); int max_alignment = os::vm_page_size();
size_t cds_total = 0; size_t cds_total = 0;
set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
os::vm_allocation_granularity()));
MetaspaceShared::set_max_alignment(max_alignment); MetaspaceShared::set_max_alignment(max_alignment);
if (DumpSharedSpaces) { if (DumpSharedSpaces) {
SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
// Initialize with the sum of the shared space sizes. The read-only // Initialize with the sum of the shared space sizes. The read-only
// and read write metaspace chunks will be allocated out of this and the // and read write metaspace chunks will be allocated out of this and the
// remainder is the misc code and data chunks. // remainder is the misc code and data chunks.
cds_total = FileMapInfo::shared_spaces_size(); cds_total = FileMapInfo::shared_spaces_size();
cds_total = align_size_up(cds_total, _reserve_alignment);
_space_list = new VirtualSpaceList(cds_total/wordSize); _space_list = new VirtualSpaceList(cds_total/wordSize);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
if (!_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Unable to dump shared archive.", NULL);
}
#ifdef _LP64 #ifdef _LP64
if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
vm_exit_during_initialization("Unable to dump shared archive.",
err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
"klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
cds_total + class_metaspace_size(), (size_t)max_juint));
}
// Set the compressed klass pointer base so that decoding of these pointers works // Set the compressed klass pointer base so that decoding of these pointers works
// properly when creating the shared archive. // properly when creating the shared archive.
assert(UseCompressedOops && UseCompressedClassPointers, assert(UseCompressedOops && UseCompressedClassPointers,
...@@ -2971,9 +3154,6 @@ void Metaspace::global_initialize() { ...@@ -2971,9 +3154,6 @@ void Metaspace::global_initialize() {
_space_list->current_virtual_space()->bottom()); _space_list->current_virtual_space()->bottom());
} }
// Set the shift to zero.
assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
"CDS region is too large");
Universe::set_narrow_klass_shift(0); Universe::set_narrow_klass_shift(0);
#endif #endif
...@@ -2992,12 +3172,12 @@ void Metaspace::global_initialize() { ...@@ -2992,12 +3172,12 @@ void Metaspace::global_initialize() {
// Map in spaces now also // Map in spaces now also
if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
FileMapInfo::set_current_info(mapinfo); FileMapInfo::set_current_info(mapinfo);
cds_total = FileMapInfo::shared_spaces_size();
cds_address = (address)mapinfo->region_base(0);
} else { } else {
assert(!mapinfo->is_open() && !UseSharedSpaces, assert(!mapinfo->is_open() && !UseSharedSpaces,
"archive file not closed or shared spaces not disabled."); "archive file not closed or shared spaces not disabled.");
} }
cds_total = FileMapInfo::shared_spaces_size();
cds_address = (address)mapinfo->region_base(0);
} }
#ifdef _LP64 #ifdef _LP64
...@@ -3005,7 +3185,9 @@ void Metaspace::global_initialize() { ...@@ -3005,7 +3185,9 @@ void Metaspace::global_initialize() {
// above the heap and above the CDS area (if it exists). // above the heap and above the CDS area (if it exists).
if (using_class_space()) { if (using_class_space()) {
if (UseSharedSpaces) { if (UseSharedSpaces) {
allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address); char* cds_end = (char*)(cds_address + cds_total);
cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
} else { } else {
allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
} }
...@@ -3023,11 +3205,19 @@ void Metaspace::global_initialize() { ...@@ -3023,11 +3205,19 @@ void Metaspace::global_initialize() {
_first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
// Arbitrarily set the initial virtual space to a multiple // Arbitrarily set the initial virtual space to a multiple
// of the boot class loader size. // of the boot class loader size.
size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size(); size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
// Initialize the list of virtual spaces. // Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size); _space_list = new VirtualSpaceList(word_size);
_chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
if (!_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
}
} }
MetaspaceGC::initialize();
} }
Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
...@@ -3039,7 +3229,7 @@ Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, ...@@ -3039,7 +3229,7 @@ Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
return chunk; return chunk;
} }
return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch); return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
} }
void Metaspace::initialize(Mutex* lock, MetaspaceType type) { void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
...@@ -3112,19 +3302,18 @@ MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { ...@@ -3112,19 +3302,18 @@ MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
} }
MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
MetaWord* result; size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
MetaspaceGC::set_expand_after_GC(true); assert(delta_bytes > 0, "Must be");
size_t before_inc = MetaspaceGC::capacity_until_GC();
size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
MetaspaceGC::inc_capacity_until_GC(delta_bytes); size_t before_inc = after_inc - delta_bytes;
if (PrintGCDetails && Verbose) { if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
" to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); " to " SIZE_FORMAT, before_inc, after_inc);
} }
result = allocate(word_size, mdtype); return allocate(word_size, mdtype);
return result;
} }
// Space allocated in the Metaspace. This may // Space allocated in the Metaspace. This may
...@@ -3206,6 +3395,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { ...@@ -3206,6 +3395,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
} }
} }
Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
bool read_only, MetaspaceObj::Type type, TRAPS) { bool read_only, MetaspaceObj::Type type, TRAPS) {
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
...@@ -3213,20 +3403,16 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, ...@@ -3213,20 +3403,16 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
return NULL; // caller does a CHECK_NULL too return NULL; // caller does a CHECK_NULL too
} }
MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
// SSS: Should we align the allocations and make sure the sizes are aligned.
MetaWord* result = NULL;
assert(loader_data != NULL, "Should never pass around a NULL loader_data. " assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
"ClassLoaderData::the_null_class_loader_data() should have been used."); "ClassLoaderData::the_null_class_loader_data() should have been used.");
// Allocate in metaspaces without taking out a lock, because it deadlocks // Allocate in metaspaces without taking out a lock, because it deadlocks
// with the SymbolTable_lock. Dumping is single threaded for now. We'll have // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
// to revisit this for application class data sharing. // to revisit this for application class data sharing.
if (DumpSharedSpaces) { if (DumpSharedSpaces) {
assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
result = space->allocate(word_size, NonClassType); MetaWord* result = space->allocate(word_size, NonClassType);
if (result == NULL) { if (result == NULL) {
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
} else { } else {
...@@ -3235,42 +3421,64 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, ...@@ -3235,42 +3421,64 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
return Metablock::initialize(result, word_size); return Metablock::initialize(result, word_size);
} }
result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
// Try to allocate metadata.
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
if (result == NULL) { if (result == NULL) {
// Try to clean out some memory and retry. // Allocation failed.
result = if (is_init_completed()) {
Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( // Only start a GC if the bootstrapping has completed.
loader_data, word_size, mdtype);
// If result is still null, we are out of memory. // Try to clean out some memory and retry.
if (result == NULL) { result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
if (Verbose && TraceMetadataChunkAllocation) { loader_data, word_size, mdtype);
gclog_or_tty->print_cr("Metaspace allocation failed for size "
SIZE_FORMAT, word_size);
if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
MetaspaceAux::dump(gclog_or_tty);
}
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
"Metadata space";
report_java_out_of_memory(space_string);
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
space_string);
}
if (is_class_space_allocation(mdtype)) {
THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
} else {
THROW_OOP_0(Universe::out_of_memory_error_metaspace());
}
} }
} }
if (result == NULL) {
report_metadata_oome(loader_data, word_size, mdtype, THREAD);
// Will not reach here.
return NULL;
}
return Metablock::initialize(result, word_size); return Metablock::initialize(result, word_size);
} }
void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
// If result is still null, we are out of memory.
if (Verbose && TraceMetadataChunkAllocation) {
gclog_or_tty->print_cr("Metaspace allocation failed for size "
SIZE_FORMAT, word_size);
if (loader_data->metaspace_or_null() != NULL) {
loader_data->dump(gclog_or_tty);
}
MetaspaceAux::dump(gclog_or_tty);
}
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
"Metadata space";
report_java_out_of_memory(space_string);
if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
space_string);
}
if (!is_init_completed()) {
vm_exit_during_initialization("OutOfMemoryError", space_string);
}
if (is_class_space_allocation(mdtype)) {
THROW_OOP(Universe::out_of_memory_error_class_metaspace());
} else {
THROW_OOP(Universe::out_of_memory_error_metaspace());
}
}
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
assert(DumpSharedSpaces, "sanity"); assert(DumpSharedSpaces, "sanity");
......
...@@ -87,9 +87,10 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -87,9 +87,10 @@ class Metaspace : public CHeapObj<mtClass> {
friend class MetaspaceAux; friend class MetaspaceAux;
public: public:
enum MetadataType {ClassType = 0, enum MetadataType {
NonClassType = ClassType + 1, ClassType,
MetadataTypeCount = ClassType + 2 NonClassType,
MetadataTypeCount
}; };
enum MetaspaceType { enum MetaspaceType {
StandardMetaspaceType, StandardMetaspaceType,
...@@ -103,6 +104,9 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -103,6 +104,9 @@ class Metaspace : public CHeapObj<mtClass> {
private: private:
void initialize(Mutex* lock, MetaspaceType type); void initialize(Mutex* lock, MetaspaceType type);
// Get the first chunk for a Metaspace. Used for
// special cases such as the boot class loader, reflection
// class loader and anonymous class loader.
Metachunk* get_initialization_chunk(MetadataType mdtype, Metachunk* get_initialization_chunk(MetadataType mdtype,
size_t chunk_word_size, size_t chunk_word_size,
size_t chunk_bunch); size_t chunk_bunch);
...@@ -123,6 +127,9 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -123,6 +127,9 @@ class Metaspace : public CHeapObj<mtClass> {
static size_t _first_chunk_word_size; static size_t _first_chunk_word_size;
static size_t _first_class_chunk_word_size; static size_t _first_class_chunk_word_size;
static size_t _commit_alignment;
static size_t _reserve_alignment;
SpaceManager* _vsm; SpaceManager* _vsm;
SpaceManager* vsm() const { return _vsm; } SpaceManager* vsm() const { return _vsm; }
...@@ -191,12 +198,17 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -191,12 +198,17 @@ class Metaspace : public CHeapObj<mtClass> {
Metaspace(Mutex* lock, MetaspaceType type); Metaspace(Mutex* lock, MetaspaceType type);
~Metaspace(); ~Metaspace();
// Initialize globals for Metaspace static void ergo_initialize();
static void global_initialize(); static void global_initialize();
static size_t first_chunk_word_size() { return _first_chunk_word_size; } static size_t first_chunk_word_size() { return _first_chunk_word_size; }
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
static size_t reserve_alignment() { return _reserve_alignment; }
static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
static size_t commit_alignment() { return _commit_alignment; }
static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; }
char* bottom() const; char* bottom() const;
size_t used_words_slow(MetadataType mdtype) const; size_t used_words_slow(MetadataType mdtype) const;
size_t free_words_slow(MetadataType mdtype) const; size_t free_words_slow(MetadataType mdtype) const;
...@@ -219,6 +231,9 @@ class Metaspace : public CHeapObj<mtClass> { ...@@ -219,6 +231,9 @@ class Metaspace : public CHeapObj<mtClass> {
static void purge(MetadataType mdtype); static void purge(MetadataType mdtype);
static void purge(); static void purge();
static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
MetadataType mdtype, TRAPS);
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
// Debugging support // Debugging support
void verify(); void verify();
...@@ -352,17 +367,10 @@ class MetaspaceAux : AllStatic { ...@@ -352,17 +367,10 @@ class MetaspaceAux : AllStatic {
class MetaspaceGC : AllStatic { class MetaspaceGC : AllStatic {
// The current high-water-mark for inducing a GC. When // The current high-water-mark for inducing a GC.
// the capacity of all space in the virtual lists reaches this value, // When committed memory of all metaspaces reaches this value,
// a GC is induced and the value is increased. This should be changed // a GC is induced and the value is increased. Size is in bytes.
// to the space actually used for allocations to avoid affects of static volatile intptr_t _capacity_until_GC;
// fragmentation losses to partially used chunks. Size is in words.
static size_t _capacity_until_GC;
// After a GC is done any allocation that fails should try to expand
// the capacity of the Metaspaces. This flag is set during attempts
// to allocate in the VMGCOperation that does the GC.
static bool _expand_after_GC;
// For a CMS collection, signal that a concurrent collection should // For a CMS collection, signal that a concurrent collection should
// be started. // be started.
...@@ -370,20 +378,16 @@ class MetaspaceGC : AllStatic { ...@@ -370,20 +378,16 @@ class MetaspaceGC : AllStatic {
static uint _shrink_factor; static uint _shrink_factor;
static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
static size_t shrink_factor() { return _shrink_factor; } static size_t shrink_factor() { return _shrink_factor; }
void set_shrink_factor(uint v) { _shrink_factor = v; } void set_shrink_factor(uint v) { _shrink_factor = v; }
public: public:
static size_t capacity_until_GC() { return _capacity_until_GC; } static void initialize() { _capacity_until_GC = MetaspaceSize; }
static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
static void dec_capacity_until_GC(size_t v) { static size_t capacity_until_GC();
_capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; static size_t inc_capacity_until_GC(size_t v);
} static size_t dec_capacity_until_GC(size_t v);
static bool expand_after_GC() { return _expand_after_GC; }
static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
static bool should_concurrent_collect() { return _should_concurrent_collect; } static bool should_concurrent_collect() { return _should_concurrent_collect; }
static void set_should_concurrent_collect(bool v) { static void set_should_concurrent_collect(bool v) {
...@@ -391,11 +395,14 @@ class MetaspaceGC : AllStatic { ...@@ -391,11 +395,14 @@ class MetaspaceGC : AllStatic {
} }
// The amount to increase the high-water-mark (_capacity_until_GC) // The amount to increase the high-water-mark (_capacity_until_GC)
static size_t delta_capacity_until_GC(size_t word_size); static size_t delta_capacity_until_GC(size_t bytes);
// Tells if we have can expand metaspace without hitting set limits.
static bool can_expand(size_t words, bool is_class);
// It is expected that this will be called when the current capacity // Returns amount that we can expand without hitting a GC,
// has been used and a GC should be considered. // measured in words.
static bool should_expand(VirtualSpaceList* vsl, size_t word_size); static size_t allowed_expansion();
// Calculate the new high-water mark at which to induce // Calculate the new high-water mark at which to induce
// a GC. // a GC.
......
...@@ -3713,7 +3713,8 @@ void GraphKit::g1_write_barrier_post(Node* oop_store, ...@@ -3713,7 +3713,8 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
Node* no_base = __ top(); Node* no_base = __ top();
float likely = PROB_LIKELY(0.999); float likely = PROB_LIKELY(0.999);
float unlikely = PROB_UNLIKELY(0.999); float unlikely = PROB_UNLIKELY(0.999);
Node* zero = __ ConI(0); Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
Node* zeroX = __ ConX(0); Node* zeroX = __ ConX(0);
// Get the alias_index for raw card-mark memory // Get the alias_index for raw card-mark memory
...@@ -3769,8 +3770,16 @@ void GraphKit::g1_write_barrier_post(Node* oop_store, ...@@ -3769,8 +3770,16 @@ void GraphKit::g1_write_barrier_post(Node* oop_store,
// load the original value of the card // load the original value of the card
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw); Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
__ if_then(card_val, BoolTest::ne, zero); { __ if_then(card_val, BoolTest::ne, young_card); {
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf); sync_kit(ideal);
// Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
insert_mem_bar(Op_MemBarVolatile, oop_store);
__ sync_kit(this);
Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
__ if_then(card_val_reload, BoolTest::ne, dirty_card); {
g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
} __ end_if();
} __ end_if(); } __ end_if();
} __ end_if(); } __ end_if();
} __ end_if(); } __ end_if();
......
...@@ -2657,16 +2657,16 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, ...@@ -2657,16 +2657,16 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
FLAG_SET_CMDLINE(bool, BackgroundCompilation, false); FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
// -Xmn for compatibility with other JVM vendors // -Xmn for compatibility with other JVM vendors
} else if (match_option(option, "-Xmn", &tail)) { } else if (match_option(option, "-Xmn", &tail)) {
julong long_initial_eden_size = 0; julong long_initial_young_size = 0;
ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1); ArgsRange errcode = parse_memory_size(tail, &long_initial_young_size, 1);
if (errcode != arg_in_range) { if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(), jio_fprintf(defaultStream::error_stream(),
"Invalid initial eden size: %s\n", option->optionString); "Invalid initial young generation size: %s\n", option->optionString);
describe_range_error(errcode); describe_range_error(errcode);
return JNI_EINVAL; return JNI_EINVAL;
} }
FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size); FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size);
FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size); FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size);
// -Xms // -Xms
} else if (match_option(option, "-Xms", &tail)) { } else if (match_option(option, "-Xms", &tail)) {
julong long_initial_heap_size = 0; julong long_initial_heap_size = 0;
...@@ -3666,6 +3666,9 @@ jint Arguments::apply_ergo() { ...@@ -3666,6 +3666,9 @@ jint Arguments::apply_ergo() {
assert(verify_serial_gc_flags(), "SerialGC unset"); assert(verify_serial_gc_flags(), "SerialGC unset");
#endif // INCLUDE_ALL_GCS #endif // INCLUDE_ALL_GCS
// Initialize Metaspace flags and alignments.
Metaspace::ergo_initialize();
// Set bytecode rewriting flags // Set bytecode rewriting flags
set_bytecode_flags(); set_bytecode_flags();
......
...@@ -481,21 +481,21 @@ class CommandLineFlags { ...@@ -481,21 +481,21 @@ class CommandLineFlags {
#define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \ #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \
\ \
lp64_product(bool, UseCompressedOops, false, \ lp64_product(bool, UseCompressedOops, false, \
"Use 32-bit object references in 64-bit VM " \ "Use 32-bit object references in 64-bit VM. " \
"lp64_product means flag is always constant in 32 bit VM") \ "lp64_product means flag is always constant in 32 bit VM") \
\ \
lp64_product(bool, UseCompressedClassPointers, false, \ lp64_product(bool, UseCompressedClassPointers, false, \
"Use 32-bit class pointers in 64-bit VM " \ "Use 32-bit class pointers in 64-bit VM. " \
"lp64_product means flag is always constant in 32 bit VM") \ "lp64_product means flag is always constant in 32 bit VM") \
\ \
notproduct(bool, CheckCompressedOops, true, \ notproduct(bool, CheckCompressedOops, true, \
"generate checks in encoding/decoding code in debug VM") \ "Generate checks in encoding/decoding code in debug VM") \
\ \
product_pd(uintx, HeapBaseMinAddress, \ product_pd(uintx, HeapBaseMinAddress, \
"OS specific low limit for heap base address") \ "OS specific low limit for heap base address") \
\ \
diagnostic(bool, PrintCompressedOopsMode, false, \ diagnostic(bool, PrintCompressedOopsMode, false, \
"Print compressed oops base address and encoding mode") \ "Print compressed oops base address and encoding mode") \
\ \
lp64_product(intx, ObjectAlignmentInBytes, 8, \ lp64_product(intx, ObjectAlignmentInBytes, 8, \
"Default object alignment in bytes, 8 is minimum") \ "Default object alignment in bytes, 8 is minimum") \
...@@ -517,7 +517,7 @@ class CommandLineFlags { ...@@ -517,7 +517,7 @@ class CommandLineFlags {
"Use lwsync instruction if true, else use slower sync") \ "Use lwsync instruction if true, else use slower sync") \
\ \
develop(bool, CleanChunkPoolAsync, falseInEmbedded, \ develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
"Whether to clean the chunk pool asynchronously") \ "Clean the chunk pool asynchronously") \
\ \
/* Temporary: See 6948537 */ \ /* Temporary: See 6948537 */ \
experimental(bool, UseMemSetInBOT, true, \ experimental(bool, UseMemSetInBOT, true, \
...@@ -527,10 +527,12 @@ class CommandLineFlags { ...@@ -527,10 +527,12 @@ class CommandLineFlags {
"Enable normal processing of flags relating to field diagnostics")\ "Enable normal processing of flags relating to field diagnostics")\
\ \
experimental(bool, UnlockExperimentalVMOptions, false, \ experimental(bool, UnlockExperimentalVMOptions, false, \
"Enable normal processing of flags relating to experimental features")\ "Enable normal processing of flags relating to experimental " \
"features") \
\ \
product(bool, JavaMonitorsInStackTrace, true, \ product(bool, JavaMonitorsInStackTrace, true, \
"Print info. about Java monitor locks when the stacks are dumped")\ "Print information about Java monitor locks when the stacks are" \
"dumped") \
\ \
product_pd(bool, UseLargePages, \ product_pd(bool, UseLargePages, \
"Use large page memory") \ "Use large page memory") \
...@@ -541,8 +543,12 @@ class CommandLineFlags { ...@@ -541,8 +543,12 @@ class CommandLineFlags {
develop(bool, LargePagesIndividualAllocationInjectError, false, \ develop(bool, LargePagesIndividualAllocationInjectError, false, \
"Fail large pages individual allocation") \ "Fail large pages individual allocation") \
\ \
product(bool, UseLargePagesInMetaspace, false, \
"Use large page memory in metaspace. " \
"Only used if UseLargePages is enabled.") \
\
develop(bool, TracePageSizes, false, \ develop(bool, TracePageSizes, false, \
"Trace page size selection and usage.") \ "Trace page size selection and usage") \
\ \
product(bool, UseNUMA, false, \ product(bool, UseNUMA, false, \
"Use NUMA if available") \ "Use NUMA if available") \
...@@ -557,12 +563,12 @@ class CommandLineFlags { ...@@ -557,12 +563,12 @@ class CommandLineFlags {
"Force NUMA optimizations on single-node/UMA systems") \ "Force NUMA optimizations on single-node/UMA systems") \
\ \
product(uintx, NUMAChunkResizeWeight, 20, \ product(uintx, NUMAChunkResizeWeight, 20, \
"Percentage (0-100) used to weigh the current sample when " \ "Percentage (0-100) used to weigh the current sample when " \
"computing exponentially decaying average for " \ "computing exponentially decaying average for " \
"AdaptiveNUMAChunkSizing") \ "AdaptiveNUMAChunkSizing") \
\ \
product(uintx, NUMASpaceResizeRate, 1*G, \ product(uintx, NUMASpaceResizeRate, 1*G, \
"Do not reallocate more that this amount per collection") \ "Do not reallocate more than this amount per collection") \
\ \
product(bool, UseAdaptiveNUMAChunkSizing, true, \ product(bool, UseAdaptiveNUMAChunkSizing, true, \
"Enable adaptive chunk sizing for NUMA") \ "Enable adaptive chunk sizing for NUMA") \
...@@ -579,17 +585,17 @@ class CommandLineFlags { ...@@ -579,17 +585,17 @@ class CommandLineFlags {
product(intx, UseSSE, 99, \ product(intx, UseSSE, 99, \
"Highest supported SSE instructions set on x86/x64") \ "Highest supported SSE instructions set on x86/x64") \
\ \
product(bool, UseAES, false, \ product(bool, UseAES, false, \
"Control whether AES instructions can be used on x86/x64") \ "Control whether AES instructions can be used on x86/x64") \
\ \
product(uintx, LargePageSizeInBytes, 0, \ product(uintx, LargePageSizeInBytes, 0, \
"Large page size (0 to let VM choose the page size") \ "Large page size (0 to let VM choose the page size)") \
\ \
product(uintx, LargePageHeapSizeThreshold, 128*M, \ product(uintx, LargePageHeapSizeThreshold, 128*M, \
"Use large pages if max heap is at least this big") \ "Use large pages if maximum heap is at least this big") \
\ \
product(bool, ForceTimeHighResolution, false, \ product(bool, ForceTimeHighResolution, false, \
"Using high time resolution(For Win32 only)") \ "Using high time resolution (for Win32 only)") \
\ \
develop(bool, TraceItables, false, \ develop(bool, TraceItables, false, \
"Trace initialization and use of itables") \ "Trace initialization and use of itables") \
...@@ -605,10 +611,10 @@ class CommandLineFlags { ...@@ -605,10 +611,10 @@ class CommandLineFlags {
\ \
develop(bool, TraceLongCompiles, false, \ develop(bool, TraceLongCompiles, false, \
"Print out every time compilation is longer than " \ "Print out every time compilation is longer than " \
"a given threashold") \ "a given threshold") \
\ \
develop(bool, SafepointALot, false, \ develop(bool, SafepointALot, false, \
"Generates a lot of safepoints. Works with " \ "Generate a lot of safepoints. This works with " \
"GuaranteedSafepointInterval") \ "GuaranteedSafepointInterval") \
\ \
product_pd(bool, BackgroundCompilation, \ product_pd(bool, BackgroundCompilation, \
...@@ -616,13 +622,13 @@ class CommandLineFlags { ...@@ -616,13 +622,13 @@ class CommandLineFlags {
"compilation") \ "compilation") \
\ \
product(bool, PrintVMQWaitTime, false, \ product(bool, PrintVMQWaitTime, false, \
"Prints out the waiting time in VM operation queue") \ "Print out the waiting time in VM operation queue") \
\ \
develop(bool, NoYieldsInMicrolock, false, \ develop(bool, NoYieldsInMicrolock, false, \
"Disable yields in microlock") \ "Disable yields in microlock") \
\ \
develop(bool, TraceOopMapGeneration, false, \ develop(bool, TraceOopMapGeneration, false, \
"Shows oopmap generation") \ "Show OopMapGeneration") \
\ \
product(bool, MethodFlushing, true, \ product(bool, MethodFlushing, true, \
"Reclamation of zombie and not-entrant methods") \ "Reclamation of zombie and not-entrant methods") \
...@@ -631,10 +637,11 @@ class CommandLineFlags { ...@@ -631,10 +637,11 @@ class CommandLineFlags {
"Verify stack of each thread when it is entering a runtime call") \ "Verify stack of each thread when it is entering a runtime call") \
\ \
diagnostic(bool, ForceUnreachable, false, \ diagnostic(bool, ForceUnreachable, false, \
"Make all non code cache addresses to be unreachable with forcing use of 64bit literal fixups") \ "Make all non code cache addresses to be unreachable by " \
"forcing use of 64bit literal fixups") \
\ \
notproduct(bool, StressDerivedPointers, false, \ notproduct(bool, StressDerivedPointers, false, \
"Force scavenge when a derived pointers is detected on stack " \ "Force scavenge when a derived pointer is detected on stack " \
"after rtm call") \ "after rtm call") \
\ \
develop(bool, TraceDerivedPointers, false, \ develop(bool, TraceDerivedPointers, false, \
...@@ -653,86 +660,86 @@ class CommandLineFlags { ...@@ -653,86 +660,86 @@ class CommandLineFlags {
"Use Inline Caches for virtual calls ") \ "Use Inline Caches for virtual calls ") \
\ \
develop(bool, InlineArrayCopy, true, \ develop(bool, InlineArrayCopy, true, \
"inline arraycopy native that is known to be part of " \ "Inline arraycopy native that is known to be part of " \
"base library DLL") \ "base library DLL") \
\ \
develop(bool, InlineObjectHash, true, \ develop(bool, InlineObjectHash, true, \
"inline Object::hashCode() native that is known to be part " \ "Inline Object::hashCode() native that is known to be part " \
"of base library DLL") \ "of base library DLL") \
\ \
develop(bool, InlineNatives, true, \ develop(bool, InlineNatives, true, \
"inline natives that are known to be part of base library DLL") \ "Inline natives that are known to be part of base library DLL") \
\ \
develop(bool, InlineMathNatives, true, \ develop(bool, InlineMathNatives, true, \
"inline SinD, CosD, etc.") \ "Inline SinD, CosD, etc.") \
\ \
develop(bool, InlineClassNatives, true, \ develop(bool, InlineClassNatives, true, \
"inline Class.isInstance, etc") \ "Inline Class.isInstance, etc") \
\ \
develop(bool, InlineThreadNatives, true, \ develop(bool, InlineThreadNatives, true, \
"inline Thread.currentThread, etc") \ "Inline Thread.currentThread, etc") \
\ \
develop(bool, InlineUnsafeOps, true, \ develop(bool, InlineUnsafeOps, true, \
"inline memory ops (native methods) from sun.misc.Unsafe") \ "Inline memory ops (native methods) from sun.misc.Unsafe") \
\ \
product(bool, CriticalJNINatives, true, \ product(bool, CriticalJNINatives, true, \
"check for critical JNI entry points") \ "Check for critical JNI entry points") \
\ \
notproduct(bool, StressCriticalJNINatives, false, \ notproduct(bool, StressCriticalJNINatives, false, \
"Exercise register saving code in critical natives") \ "Exercise register saving code in critical natives") \
\ \
product(bool, UseSSE42Intrinsics, false, \ product(bool, UseSSE42Intrinsics, false, \
"SSE4.2 versions of intrinsics") \ "SSE4.2 versions of intrinsics") \
\ \
product(bool, UseAESIntrinsics, false, \ product(bool, UseAESIntrinsics, false, \
"use intrinsics for AES versions of crypto") \ "Use intrinsics for AES versions of crypto") \
\ \
product(bool, UseCRC32Intrinsics, false, \ product(bool, UseCRC32Intrinsics, false, \
"use intrinsics for java.util.zip.CRC32") \ "use intrinsics for java.util.zip.CRC32") \
\ \
develop(bool, TraceCallFixup, false, \ develop(bool, TraceCallFixup, false, \
"traces all call fixups") \ "Trace all call fixups") \
\ \
develop(bool, DeoptimizeALot, false, \ develop(bool, DeoptimizeALot, false, \
"deoptimize at every exit from the runtime system") \ "Deoptimize at every exit from the runtime system") \
\ \
notproduct(ccstrlist, DeoptimizeOnlyAt, "", \ notproduct(ccstrlist, DeoptimizeOnlyAt, "", \
"a comma separated list of bcis to deoptimize at") \ "A comma separated list of bcis to deoptimize at") \
\ \
product(bool, DeoptimizeRandom, false, \ product(bool, DeoptimizeRandom, false, \
"deoptimize random frames on random exit from the runtime system")\ "Deoptimize random frames on random exit from the runtime system")\
\ \
notproduct(bool, ZombieALot, false, \ notproduct(bool, ZombieALot, false, \
"creates zombies (non-entrant) at exit from the runt. system") \ "Create zombies (non-entrant) at exit from the runtime system") \
\ \
product(bool, UnlinkSymbolsALot, false, \ product(bool, UnlinkSymbolsALot, false, \
"unlink unreferenced symbols from the symbol table at safepoints")\ "Unlink unreferenced symbols from the symbol table at safepoints")\
\ \
notproduct(bool, WalkStackALot, false, \ notproduct(bool, WalkStackALot, false, \
"trace stack (no print) at every exit from the runtime system") \ "Trace stack (no print) at every exit from the runtime system") \
\ \
product(bool, Debugging, false, \ product(bool, Debugging, false, \
"set when executing debug methods in debug.ccp " \ "Set when executing debug methods in debug.cpp " \
"(to prevent triggering assertions)") \ "(to prevent triggering assertions)") \
\ \
notproduct(bool, StrictSafepointChecks, trueInDebug, \ notproduct(bool, StrictSafepointChecks, trueInDebug, \
"Enable strict checks that safepoints cannot happen for threads " \ "Enable strict checks that safepoints cannot happen for threads " \
"that used No_Safepoint_Verifier") \ "that use No_Safepoint_Verifier") \
\ \
notproduct(bool, VerifyLastFrame, false, \ notproduct(bool, VerifyLastFrame, false, \
"Verify oops on last frame on entry to VM") \ "Verify oops on last frame on entry to VM") \
\ \
develop(bool, TraceHandleAllocation, false, \ develop(bool, TraceHandleAllocation, false, \
"Prints out warnings when suspicious many handles are allocated") \ "Print out warnings when suspiciously many handles are allocated")\
\ \
product(bool, UseCompilerSafepoints, true, \ product(bool, UseCompilerSafepoints, true, \
"Stop at safepoints in compiled code") \ "Stop at safepoints in compiled code") \
\ \
product(bool, FailOverToOldVerifier, true, \ product(bool, FailOverToOldVerifier, true, \
"fail over to old verifier when split verifier fails") \ "Fail over to old verifier when split verifier fails") \
\ \
develop(bool, ShowSafepointMsgs, false, \ develop(bool, ShowSafepointMsgs, false, \
"Show msg. about safepoint synch.") \ "Show message about safepoint synchronization") \
\ \
product(bool, SafepointTimeout, false, \ product(bool, SafepointTimeout, false, \
"Time out and warn or fail after SafepointTimeoutDelay " \ "Time out and warn or fail after SafepointTimeoutDelay " \
...@@ -756,19 +763,19 @@ class CommandLineFlags { ...@@ -756,19 +763,19 @@ class CommandLineFlags {
"Trace external suspend wait failures") \ "Trace external suspend wait failures") \
\ \
product(bool, MaxFDLimit, true, \ product(bool, MaxFDLimit, true, \
"Bump the number of file descriptors to max in solaris.") \ "Bump the number of file descriptors to maximum in Solaris") \
\ \
diagnostic(bool, LogEvents, true, \ diagnostic(bool, LogEvents, true, \
"Enable the various ring buffer event logs") \ "Enable the various ring buffer event logs") \
\ \
diagnostic(uintx, LogEventsBufferEntries, 10, \ diagnostic(uintx, LogEventsBufferEntries, 10, \
"Enable the various ring buffer event logs") \ "Number of ring buffer event logs") \
\ \
product(bool, BytecodeVerificationRemote, true, \ product(bool, BytecodeVerificationRemote, true, \
"Enables the Java bytecode verifier for remote classes") \ "Enable the Java bytecode verifier for remote classes") \
\ \
product(bool, BytecodeVerificationLocal, false, \ product(bool, BytecodeVerificationLocal, false, \
"Enables the Java bytecode verifier for local classes") \ "Enable the Java bytecode verifier for local classes") \
\ \
develop(bool, ForceFloatExceptions, trueInDebug, \ develop(bool, ForceFloatExceptions, trueInDebug, \
"Force exceptions on FP stack under/overflow") \ "Force exceptions on FP stack under/overflow") \
...@@ -780,7 +787,7 @@ class CommandLineFlags { ...@@ -780,7 +787,7 @@ class CommandLineFlags {
"Trace java language assertions") \ "Trace java language assertions") \
\ \
notproduct(bool, CheckAssertionStatusDirectives, false, \ notproduct(bool, CheckAssertionStatusDirectives, false, \
"temporary - see javaClasses.cpp") \ "Temporary - see javaClasses.cpp") \
\ \
notproduct(bool, PrintMallocFree, false, \ notproduct(bool, PrintMallocFree, false, \
"Trace calls to C heap malloc/free allocation") \ "Trace calls to C heap malloc/free allocation") \
...@@ -799,16 +806,16 @@ class CommandLineFlags { ...@@ -799,16 +806,16 @@ class CommandLineFlags {
"entering the VM") \ "entering the VM") \
\ \
notproduct(bool, CheckOopishValues, false, \ notproduct(bool, CheckOopishValues, false, \
"Warn if value contains oop ( requires ZapDeadLocals)") \ "Warn if value contains oop (requires ZapDeadLocals)") \
\ \
develop(bool, UseMallocOnly, false, \ develop(bool, UseMallocOnly, false, \
"use only malloc/free for allocation (no resource area/arena)") \ "Use only malloc/free for allocation (no resource area/arena)") \
\ \
develop(bool, PrintMalloc, false, \ develop(bool, PrintMalloc, false, \
"print all malloc/free calls") \ "Print all malloc/free calls") \
\ \
develop(bool, PrintMallocStatistics, false, \ develop(bool, PrintMallocStatistics, false, \
"print malloc/free statistics") \ "Print malloc/free statistics") \
\ \
develop(bool, ZapResourceArea, trueInDebug, \ develop(bool, ZapResourceArea, trueInDebug, \
"Zap freed resource/arena space with 0xABABABAB") \ "Zap freed resource/arena space with 0xABABABAB") \
...@@ -820,7 +827,7 @@ class CommandLineFlags { ...@@ -820,7 +827,7 @@ class CommandLineFlags {
"Zap freed JNI handle space with 0xFEFEFEFE") \ "Zap freed JNI handle space with 0xFEFEFEFE") \
\ \
notproduct(bool, ZapStackSegments, trueInDebug, \ notproduct(bool, ZapStackSegments, trueInDebug, \
"Zap allocated/freed Stack segments with 0xFADFADED") \ "Zap allocated/freed stack segments with 0xFADFADED") \
\ \
develop(bool, ZapUnusedHeapArea, trueInDebug, \ develop(bool, ZapUnusedHeapArea, trueInDebug, \
"Zap unused heap space with 0xBAADBABE") \ "Zap unused heap space with 0xBAADBABE") \
...@@ -835,7 +842,7 @@ class CommandLineFlags { ...@@ -835,7 +842,7 @@ class CommandLineFlags {
"Zap filler objects with 0xDEAFBABE") \ "Zap filler objects with 0xDEAFBABE") \
\ \
develop(bool, PrintVMMessages, true, \ develop(bool, PrintVMMessages, true, \
"Print vm messages on console") \ "Print VM messages on console") \
\ \
product(bool, PrintGCApplicationConcurrentTime, false, \ product(bool, PrintGCApplicationConcurrentTime, false, \
"Print the time the application has been running") \ "Print the time the application has been running") \
...@@ -844,21 +851,21 @@ class CommandLineFlags { ...@@ -844,21 +851,21 @@ class CommandLineFlags {
"Print the time the application has been stopped") \ "Print the time the application has been stopped") \
\ \
diagnostic(bool, VerboseVerification, false, \ diagnostic(bool, VerboseVerification, false, \
"Display detailed verification details") \ "Display detailed verification details") \
\ \
notproduct(uintx, ErrorHandlerTest, 0, \ notproduct(uintx, ErrorHandlerTest, 0, \
"If > 0, provokes an error after VM initialization; the value" \ "If > 0, provokes an error after VM initialization; the value " \
"determines which error to provoke. See test_error_handler()" \ "determines which error to provoke. See test_error_handler() " \
"in debug.cpp.") \ "in debug.cpp.") \
\ \
develop(bool, Verbose, false, \ develop(bool, Verbose, false, \
"Prints additional debugging information from other modes") \ "Print additional debugging information from other modes") \
\ \
develop(bool, PrintMiscellaneous, false, \ develop(bool, PrintMiscellaneous, false, \
"Prints uncategorized debugging information (requires +Verbose)") \ "Print uncategorized debugging information (requires +Verbose)") \
\ \
develop(bool, WizardMode, false, \ develop(bool, WizardMode, false, \
"Prints much more debugging information") \ "Print much more debugging information") \
\ \
product(bool, ShowMessageBoxOnError, false, \ product(bool, ShowMessageBoxOnError, false, \
"Keep process alive on VM fatal error") \ "Keep process alive on VM fatal error") \
...@@ -870,7 +877,7 @@ class CommandLineFlags { ...@@ -870,7 +877,7 @@ class CommandLineFlags {
"Let VM fatal error propagate to the OS (ie. WER on Windows)") \ "Let VM fatal error propagate to the OS (ie. WER on Windows)") \
\ \
product(bool, SuppressFatalErrorMessage, false, \ product(bool, SuppressFatalErrorMessage, false, \
"Do NO Fatal Error report [Avoid deadlock]") \ "Report NO fatal error message (avoid deadlock)") \
\ \
product(ccstrlist, OnError, "", \ product(ccstrlist, OnError, "", \
"Run user-defined commands on fatal error; see VMError.cpp " \ "Run user-defined commands on fatal error; see VMError.cpp " \
...@@ -880,17 +887,17 @@ class CommandLineFlags { ...@@ -880,17 +887,17 @@ class CommandLineFlags {
"Run user-defined commands on first java.lang.OutOfMemoryError") \ "Run user-defined commands on first java.lang.OutOfMemoryError") \
\ \
manageable(bool, HeapDumpBeforeFullGC, false, \ manageable(bool, HeapDumpBeforeFullGC, false, \
"Dump heap to file before any major stop-world GC") \ "Dump heap to file before any major stop-the-world GC") \
\ \
manageable(bool, HeapDumpAfterFullGC, false, \ manageable(bool, HeapDumpAfterFullGC, false, \
"Dump heap to file after any major stop-world GC") \ "Dump heap to file after any major stop-the-world GC") \
\ \
manageable(bool, HeapDumpOnOutOfMemoryError, false, \ manageable(bool, HeapDumpOnOutOfMemoryError, false, \
"Dump heap to file when java.lang.OutOfMemoryError is thrown") \ "Dump heap to file when java.lang.OutOfMemoryError is thrown") \
\ \
manageable(ccstr, HeapDumpPath, NULL, \ manageable(ccstr, HeapDumpPath, NULL, \
"When HeapDumpOnOutOfMemoryError is on, the path (filename or" \ "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \
"directory) of the dump file (defaults to java_pid<pid>.hprof" \ "directory) of the dump file (defaults to java_pid<pid>.hprof " \
"in the working directory)") \ "in the working directory)") \
\ \
develop(uintx, SegmentedHeapDumpThreshold, 2*G, \ develop(uintx, SegmentedHeapDumpThreshold, 2*G, \
...@@ -904,10 +911,10 @@ class CommandLineFlags { ...@@ -904,10 +911,10 @@ class CommandLineFlags {
"Execute breakpoint upon encountering VM warning") \ "Execute breakpoint upon encountering VM warning") \
\ \
develop(bool, TraceVMOperation, false, \ develop(bool, TraceVMOperation, false, \
"Trace vm operations") \ "Trace VM operations") \
\ \
develop(bool, UseFakeTimers, false, \ develop(bool, UseFakeTimers, false, \
"Tells whether the VM should use system time or a fake timer") \ "Tell whether the VM should use system time or a fake timer") \
\ \
product(ccstr, NativeMemoryTracking, "off", \ product(ccstr, NativeMemoryTracking, "off", \
"Native memory tracking options") \ "Native memory tracking options") \
...@@ -917,7 +924,7 @@ class CommandLineFlags { ...@@ -917,7 +924,7 @@ class CommandLineFlags {
\ \
diagnostic(bool, AutoShutdownNMT, true, \ diagnostic(bool, AutoShutdownNMT, true, \
"Automatically shutdown native memory tracking under stress " \ "Automatically shutdown native memory tracking under stress " \
"situation. When set to false, native memory tracking tries to " \ "situations. When set to false, native memory tracking tries to " \
"stay alive at the expense of JVM performance") \ "stay alive at the expense of JVM performance") \
\ \
diagnostic(bool, LogCompilation, false, \ diagnostic(bool, LogCompilation, false, \
...@@ -927,12 +934,12 @@ class CommandLineFlags { ...@@ -927,12 +934,12 @@ class CommandLineFlags {
"Print compilations") \ "Print compilations") \
\ \
diagnostic(bool, TraceNMethodInstalls, false, \ diagnostic(bool, TraceNMethodInstalls, false, \
"Trace nmethod intallation") \ "Trace nmethod installation") \
\ \
diagnostic(intx, ScavengeRootsInCode, 2, \ diagnostic(intx, ScavengeRootsInCode, 2, \
"0: do not allow scavengable oops in the code cache; " \ "0: do not allow scavengable oops in the code cache; " \
"1: allow scavenging from the code cache; " \ "1: allow scavenging from the code cache; " \
"2: emit as many constants as the compiler can see") \ "2: emit as many constants as the compiler can see") \
\ \
product(bool, AlwaysRestoreFPU, false, \ product(bool, AlwaysRestoreFPU, false, \
"Restore the FPU control word after every JNI call (expensive)") \ "Restore the FPU control word after every JNI call (expensive)") \
...@@ -953,7 +960,7 @@ class CommandLineFlags { ...@@ -953,7 +960,7 @@ class CommandLineFlags {
"Print assembly code (using external disassembler.so)") \ "Print assembly code (using external disassembler.so)") \
\ \
diagnostic(ccstr, PrintAssemblyOptions, NULL, \ diagnostic(ccstr, PrintAssemblyOptions, NULL, \
"Options string passed to disassembler.so") \ "Print options string passed to disassembler.so") \
\ \
diagnostic(bool, PrintNMethods, false, \ diagnostic(bool, PrintNMethods, false, \
"Print assembly code for nmethods when generated") \ "Print assembly code for nmethods when generated") \
...@@ -974,20 +981,21 @@ class CommandLineFlags { ...@@ -974,20 +981,21 @@ class CommandLineFlags {
"Print exception handler tables for all nmethods when generated") \ "Print exception handler tables for all nmethods when generated") \
\ \
develop(bool, StressCompiledExceptionHandlers, false, \ develop(bool, StressCompiledExceptionHandlers, false, \
"Exercise compiled exception handlers") \ "Exercise compiled exception handlers") \
\ \
develop(bool, InterceptOSException, false, \ develop(bool, InterceptOSException, false, \
"Starts debugger when an implicit OS (e.g., NULL) " \ "Start debugger when an implicit OS (e.g. NULL) " \
"exception happens") \ "exception happens") \
\ \
product(bool, PrintCodeCache, false, \ product(bool, PrintCodeCache, false, \
"Print the code cache memory usage when exiting") \ "Print the code cache memory usage when exiting") \
\ \
develop(bool, PrintCodeCache2, false, \ develop(bool, PrintCodeCache2, false, \
"Print detailed usage info on the code cache when exiting") \ "Print detailed usage information on the code cache when exiting")\
\ \
product(bool, PrintCodeCacheOnCompilation, false, \ product(bool, PrintCodeCacheOnCompilation, false, \
"Print the code cache memory usage each time a method is compiled") \ "Print the code cache memory usage each time a method is " \
"compiled") \
\ \
diagnostic(bool, PrintStubCode, false, \ diagnostic(bool, PrintStubCode, false, \
"Print generated stub code") \ "Print generated stub code") \
...@@ -999,40 +1007,40 @@ class CommandLineFlags { ...@@ -999,40 +1007,40 @@ class CommandLineFlags {
"Omit backtraces for some 'hot' exceptions in optimized code") \ "Omit backtraces for some 'hot' exceptions in optimized code") \
\ \
product(bool, ProfilerPrintByteCodeStatistics, false, \ product(bool, ProfilerPrintByteCodeStatistics, false, \
"Prints byte code statictics when dumping profiler output") \ "Print bytecode statistics when dumping profiler output") \
\ \
product(bool, ProfilerRecordPC, false, \ product(bool, ProfilerRecordPC, false, \
"Collects tick for each 16 byte interval of compiled code") \ "Collect ticks for each 16 byte interval of compiled code") \
\ \
product(bool, ProfileVM, false, \ product(bool, ProfileVM, false, \
"Profiles ticks that fall within VM (either in the VM Thread " \ "Profile ticks that fall within VM (either in the VM Thread " \
"or VM code called through stubs)") \ "or VM code called through stubs)") \
\ \
product(bool, ProfileIntervals, false, \ product(bool, ProfileIntervals, false, \
"Prints profiles for each interval (see ProfileIntervalsTicks)") \ "Print profiles for each interval (see ProfileIntervalsTicks)") \
\ \
notproduct(bool, ProfilerCheckIntervals, false, \ notproduct(bool, ProfilerCheckIntervals, false, \
"Collect and print info on spacing of profiler ticks") \ "Collect and print information on spacing of profiler ticks") \
\ \
develop(bool, PrintJVMWarnings, false, \ develop(bool, PrintJVMWarnings, false, \
"Prints warnings for unimplemented JVM functions") \ "Print warnings for unimplemented JVM functions") \
\ \
product(bool, PrintWarnings, true, \ product(bool, PrintWarnings, true, \
"Prints JVM warnings to output stream") \ "Print JVM warnings to output stream") \
\ \
notproduct(uintx, WarnOnStalledSpinLock, 0, \ notproduct(uintx, WarnOnStalledSpinLock, 0, \
"Prints warnings for stalled SpinLocks") \ "Print warnings for stalled SpinLocks") \
\ \
product(bool, RegisterFinalizersAtInit, true, \ product(bool, RegisterFinalizersAtInit, true, \
"Register finalizable objects at end of Object.<init> or " \ "Register finalizable objects at end of Object.<init> or " \
"after allocation") \ "after allocation") \
\ \
develop(bool, RegisterReferences, true, \ develop(bool, RegisterReferences, true, \
"Tells whether the VM should register soft/weak/final/phantom " \ "Tell whether the VM should register soft/weak/final/phantom " \
"references") \ "references") \
\ \
develop(bool, IgnoreRewrites, false, \ develop(bool, IgnoreRewrites, false, \
"Supress rewrites of bytecodes in the oopmap generator. " \ "Suppress rewrites of bytecodes in the oopmap generator. " \
"This is unsafe!") \ "This is unsafe!") \
\ \
develop(bool, PrintCodeCacheExtension, false, \ develop(bool, PrintCodeCacheExtension, false, \
...@@ -1042,8 +1050,7 @@ class CommandLineFlags { ...@@ -1042,8 +1050,7 @@ class CommandLineFlags {
"Enable the security JVM functions") \ "Enable the security JVM functions") \
\ \
develop(bool, ProtectionDomainVerification, true, \ develop(bool, ProtectionDomainVerification, true, \
"Verifies protection domain before resolution in system " \ "Verify protection domain before resolution in system dictionary")\
"dictionary") \
\ \
product(bool, ClassUnloading, true, \ product(bool, ClassUnloading, true, \
"Do unloading of classes") \ "Do unloading of classes") \
...@@ -1056,14 +1063,14 @@ class CommandLineFlags { ...@@ -1056,14 +1063,14 @@ class CommandLineFlags {
"Write memory usage profiling to log file") \ "Write memory usage profiling to log file") \
\ \
notproduct(bool, PrintSystemDictionaryAtExit, false, \ notproduct(bool, PrintSystemDictionaryAtExit, false, \
"Prints the system dictionary at exit") \ "Print the system dictionary at exit") \
\ \
experimental(intx, PredictedLoadedClassCount, 0, \ experimental(intx, PredictedLoadedClassCount, 0, \
"Experimental: Tune loaded class cache starting size.") \ "Experimental: Tune loaded class cache starting size") \
\ \
diagnostic(bool, UnsyncloadClass, false, \ diagnostic(bool, UnsyncloadClass, false, \
"Unstable: VM calls loadClass unsynchronized. Custom " \ "Unstable: VM calls loadClass unsynchronized. Custom " \
"class loader must call VM synchronized for findClass " \ "class loader must call VM synchronized for findClass " \
"and defineClass.") \ "and defineClass.") \
\ \
product(bool, AlwaysLockClassLoader, false, \ product(bool, AlwaysLockClassLoader, false, \
...@@ -1079,22 +1086,22 @@ class CommandLineFlags { ...@@ -1079,22 +1086,22 @@ class CommandLineFlags {
"Call loadClassInternal() rather than loadClass()") \ "Call loadClassInternal() rather than loadClass()") \
\ \
product_pd(bool, DontYieldALot, \ product_pd(bool, DontYieldALot, \
"Throw away obvious excess yield calls (for SOLARIS only)") \ "Throw away obvious excess yield calls (for Solaris only)") \
\ \
product_pd(bool, ConvertSleepToYield, \ product_pd(bool, ConvertSleepToYield, \
"Converts sleep(0) to thread yield " \ "Convert sleep(0) to thread yield " \
"(may be off for SOLARIS to improve GUI)") \ "(may be off for Solaris to improve GUI)") \
\ \
product(bool, ConvertYieldToSleep, false, \ product(bool, ConvertYieldToSleep, false, \
"Converts yield to a sleep of MinSleepInterval to simulate Win32 "\ "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \
"behavior (SOLARIS only)") \ "behavior (Solaris only)") \
\ \
product(bool, UseBoundThreads, true, \ product(bool, UseBoundThreads, true, \
"Bind user level threads to kernel threads (for SOLARIS only)") \ "Bind user level threads to kernel threads (for Solaris only)") \
\ \
develop(bool, UseDetachedThreads, true, \ develop(bool, UseDetachedThreads, true, \
"Use detached threads that are recycled upon termination " \ "Use detached threads that are recycled upon termination " \
"(for SOLARIS only)") \ "(for Solaris only)") \
\ \
product(bool, UseLWPSynchronization, true, \ product(bool, UseLWPSynchronization, true, \
"Use LWP-based instead of libthread-based synchronization " \ "Use LWP-based instead of libthread-based synchronization " \
...@@ -1104,41 +1111,43 @@ class CommandLineFlags { ...@@ -1104,41 +1111,43 @@ class CommandLineFlags {
"(Unstable) Various monitor synchronization tunables") \ "(Unstable) Various monitor synchronization tunables") \
\ \
product(intx, EmitSync, 0, \ product(intx, EmitSync, 0, \
"(Unsafe,Unstable) " \ "(Unsafe, Unstable) " \
" Controls emission of inline sync fast-path code") \ "Control emission of inline sync fast-path code") \
\ \
product(intx, MonitorBound, 0, "Bound Monitor population") \ product(intx, MonitorBound, 0, "Bound Monitor population") \
\ \
product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \ product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \
\ \
product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \ product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \
\ \
product(intx, SyncVerbose, 0, "(Unstable)" ) \ product(intx, SyncVerbose, 0, "(Unstable)") \
\ \
product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" ) \ product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \
\ \
product(intx, hashCode, 5, \ product(intx, hashCode, 5, \
"(Unstable) select hashCode generation algorithm" ) \ "(Unstable) select hashCode generation algorithm") \
\ \
product(intx, WorkAroundNPTLTimedWaitHang, 1, \ product(intx, WorkAroundNPTLTimedWaitHang, 1, \
"(Unstable, Linux-specific)" \ "(Unstable, Linux-specific) " \
" avoid NPTL-FUTEX hang pthread_cond_timedwait" ) \ "avoid NPTL-FUTEX hang pthread_cond_timedwait") \
\ \
product(bool, FilterSpuriousWakeups, true, \ product(bool, FilterSpuriousWakeups, true, \
"Prevent spurious or premature wakeups from object.wait " \ "Prevent spurious or premature wakeups from object.wait " \
"(Solaris only)") \ "(Solaris only)") \
\ \
product(intx, NativeMonitorTimeout, -1, "(Unstable)" ) \ product(intx, NativeMonitorTimeout, -1, "(Unstable)") \
product(intx, NativeMonitorFlags, 0, "(Unstable)" ) \ \
product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" ) \ product(intx, NativeMonitorFlags, 0, "(Unstable)") \
\
product(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \
\ \
develop(bool, UsePthreads, false, \ develop(bool, UsePthreads, false, \
"Use pthread-based instead of libthread-based synchronization " \ "Use pthread-based instead of libthread-based synchronization " \
"(SPARC only)") \ "(SPARC only)") \
\ \
product(bool, AdjustConcurrency, false, \ product(bool, AdjustConcurrency, false, \
"call thr_setconcurrency at thread create time to avoid " \ "Call thr_setconcurrency at thread creation time to avoid " \
"LWP starvation on MP systems (For Solaris Only)") \ "LWP starvation on MP systems (for Solaris Only)") \
\ \
product(bool, ReduceSignalUsage, false, \ product(bool, ReduceSignalUsage, false, \
"Reduce the use of OS signals in Java and/or the VM") \ "Reduce the use of OS signals in Java and/or the VM") \
...@@ -1147,13 +1156,14 @@ class CommandLineFlags { ...@@ -1147,13 +1156,14 @@ class CommandLineFlags {
"Share vtable stubs (smaller code but worse branch prediction") \ "Share vtable stubs (smaller code but worse branch prediction") \
\ \
develop(bool, LoadLineNumberTables, true, \ develop(bool, LoadLineNumberTables, true, \
"Tells whether the class file parser loads line number tables") \ "Tell whether the class file parser loads line number tables") \
\ \
develop(bool, LoadLocalVariableTables, true, \ develop(bool, LoadLocalVariableTables, true, \
"Tells whether the class file parser loads local variable tables")\ "Tell whether the class file parser loads local variable tables") \
\ \
develop(bool, LoadLocalVariableTypeTables, true, \ develop(bool, LoadLocalVariableTypeTables, true, \
"Tells whether the class file parser loads local variable type tables")\ "Tell whether the class file parser loads local variable type" \
"tables") \
\ \
product(bool, AllowUserSignalHandlers, false, \ product(bool, AllowUserSignalHandlers, false, \
"Do not complain if the application installs signal handlers " \ "Do not complain if the application installs signal handlers " \
...@@ -1184,10 +1194,12 @@ class CommandLineFlags { ...@@ -1184,10 +1194,12 @@ class CommandLineFlags {
\ \
product(bool, EagerXrunInit, false, \ product(bool, EagerXrunInit, false, \
"Eagerly initialize -Xrun libraries; allows startup profiling, " \ "Eagerly initialize -Xrun libraries; allows startup profiling, " \
" but not all -Xrun libraries may support the state of the VM at this time") \ "but not all -Xrun libraries may support the state of the VM " \
"at this time") \
\ \
product(bool, PreserveAllAnnotations, false, \ product(bool, PreserveAllAnnotations, false, \
"Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \ "Preserve RuntimeInvisibleAnnotations as well " \
"as RuntimeVisibleAnnotations") \
\ \
develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \ develop(uintx, PreallocatedOutOfMemoryErrorCount, 4, \
"Number of OutOfMemoryErrors preallocated with backtrace") \ "Number of OutOfMemoryErrors preallocated with backtrace") \
...@@ -1262,7 +1274,7 @@ class CommandLineFlags { ...@@ -1262,7 +1274,7 @@ class CommandLineFlags {
"Trace level for JVMTI RedefineClasses") \ "Trace level for JVMTI RedefineClasses") \
\ \
develop(bool, StressMethodComparator, false, \ develop(bool, StressMethodComparator, false, \
"run the MethodComparator on all loaded methods") \ "Run the MethodComparator on all loaded methods") \
\ \
/* change to false by default sometime after Mustang */ \ /* change to false by default sometime after Mustang */ \
product(bool, VerifyMergedCPBytecodes, true, \ product(bool, VerifyMergedCPBytecodes, true, \
...@@ -1296,7 +1308,7 @@ class CommandLineFlags { ...@@ -1296,7 +1308,7 @@ class CommandLineFlags {
"Trace dependencies") \ "Trace dependencies") \
\ \
develop(bool, VerifyDependencies, trueInDebug, \ develop(bool, VerifyDependencies, trueInDebug, \
"Exercise and verify the compilation dependency mechanism") \ "Exercise and verify the compilation dependency mechanism") \
\ \
develop(bool, TraceNewOopMapGeneration, false, \ develop(bool, TraceNewOopMapGeneration, false, \
"Trace OopMapGeneration") \ "Trace OopMapGeneration") \
...@@ -1314,7 +1326,7 @@ class CommandLineFlags { ...@@ -1314,7 +1326,7 @@ class CommandLineFlags {
"Trace monitor matching failures during OopMapGeneration") \ "Trace monitor matching failures during OopMapGeneration") \
\ \
develop(bool, TraceOopMapRewrites, false, \ develop(bool, TraceOopMapRewrites, false, \
"Trace rewritting of method oops during oop map generation") \ "Trace rewriting of method oops during oop map generation") \
\ \
develop(bool, TraceSafepoint, false, \ develop(bool, TraceSafepoint, false, \
"Trace safepoint operations") \ "Trace safepoint operations") \
...@@ -1332,10 +1344,10 @@ class CommandLineFlags { ...@@ -1332,10 +1344,10 @@ class CommandLineFlags {
"Trace setup time") \ "Trace setup time") \
\ \
develop(bool, TraceProtectionDomainVerification, false, \ develop(bool, TraceProtectionDomainVerification, false, \
"Trace protection domain verifcation") \ "Trace protection domain verification") \
\ \
develop(bool, TraceClearedExceptions, false, \ develop(bool, TraceClearedExceptions, false, \
"Prints when an exception is forcibly cleared") \ "Print when an exception is forcibly cleared") \
\ \
product(bool, TraceClassResolution, false, \ product(bool, TraceClassResolution, false, \
"Trace all constant pool resolutions (for debugging)") \ "Trace all constant pool resolutions (for debugging)") \
...@@ -1349,7 +1361,7 @@ class CommandLineFlags { ...@@ -1349,7 +1361,7 @@ class CommandLineFlags {
/* gc */ \ /* gc */ \
\ \
product(bool, UseSerialGC, false, \ product(bool, UseSerialGC, false, \
"Use the serial garbage collector") \ "Use the Serial garbage collector") \
\ \
product(bool, UseG1GC, false, \ product(bool, UseG1GC, false, \
"Use the Garbage-First garbage collector") \ "Use the Garbage-First garbage collector") \
...@@ -1368,16 +1380,16 @@ class CommandLineFlags { ...@@ -1368,16 +1380,16 @@ class CommandLineFlags {
"The collection count for the first maximum compaction") \ "The collection count for the first maximum compaction") \
\ \
product(bool, UseMaximumCompactionOnSystemGC, true, \ product(bool, UseMaximumCompactionOnSystemGC, true, \
"In the Parallel Old garbage collector maximum compaction for " \ "Use maximum compaction in the Parallel Old garbage collector " \
"a system GC") \ "for a system GC") \
\ \
product(uintx, ParallelOldDeadWoodLimiterMean, 50, \ product(uintx, ParallelOldDeadWoodLimiterMean, 50, \
"The mean used by the par compact dead wood" \ "The mean used by the parallel compact dead wood " \
"limiter (a number between 0-100).") \ "limiter (a number between 0-100)") \
\ \
product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \ product(uintx, ParallelOldDeadWoodLimiterStdDev, 80, \
"The standard deviation used by the par compact dead wood" \ "The standard deviation used by the parallel compact dead wood " \
"limiter (a number between 0-100).") \ "limiter (a number between 0-100)") \
\ \
product(uintx, ParallelGCThreads, 0, \ product(uintx, ParallelGCThreads, 0, \
"Number of parallel threads parallel gc will use") \ "Number of parallel threads parallel gc will use") \
...@@ -1387,7 +1399,7 @@ class CommandLineFlags { ...@@ -1387,7 +1399,7 @@ class CommandLineFlags {
"parallel gc will use") \ "parallel gc will use") \
\ \
diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \ diagnostic(bool, ForceDynamicNumberOfGCThreads, false, \
"Force dynamic selection of the number of" \ "Force dynamic selection of the number of " \
"parallel threads parallel gc will use to aid debugging") \ "parallel threads parallel gc will use to aid debugging") \
\ \
product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M), \ product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M), \
...@@ -1398,7 +1410,7 @@ class CommandLineFlags { ...@@ -1398,7 +1410,7 @@ class CommandLineFlags {
"Trace the dynamic GC thread usage") \ "Trace the dynamic GC thread usage") \
\ \
develop(bool, ParallelOldGCSplitALot, false, \ develop(bool, ParallelOldGCSplitALot, false, \
"Provoke splitting (copying data from a young gen space to" \ "Provoke splitting (copying data from a young gen space to " \
"multiple destination spaces)") \ "multiple destination spaces)") \
\ \
develop(uintx, ParallelOldGCSplitInterval, 3, \ develop(uintx, ParallelOldGCSplitInterval, 3, \
...@@ -1408,19 +1420,19 @@ class CommandLineFlags { ...@@ -1408,19 +1420,19 @@ class CommandLineFlags {
"Number of threads concurrent gc will use") \ "Number of threads concurrent gc will use") \
\ \
product(uintx, YoungPLABSize, 4096, \ product(uintx, YoungPLABSize, 4096, \
"Size of young gen promotion labs (in HeapWords)") \ "Size of young gen promotion LAB's (in HeapWords)") \
\ \
product(uintx, OldPLABSize, 1024, \ product(uintx, OldPLABSize, 1024, \
"Size of old gen promotion labs (in HeapWords)") \ "Size of old gen promotion LAB's (in HeapWords)") \
\ \
product(uintx, GCTaskTimeStampEntries, 200, \ product(uintx, GCTaskTimeStampEntries, 200, \
"Number of time stamp entries per gc worker thread") \ "Number of time stamp entries per gc worker thread") \
\ \
product(bool, AlwaysTenure, false, \ product(bool, AlwaysTenure, false, \
"Always tenure objects in eden. (ParallelGC only)") \ "Always tenure objects in eden (ParallelGC only)") \
\ \
product(bool, NeverTenure, false, \ product(bool, NeverTenure, false, \
"Never tenure objects in eden, May tenure on overflow " \ "Never tenure objects in eden, may tenure on overflow " \
"(ParallelGC only)") \ "(ParallelGC only)") \
\ \
product(bool, ScavengeBeforeFullGC, true, \ product(bool, ScavengeBeforeFullGC, true, \
...@@ -1428,14 +1440,14 @@ class CommandLineFlags { ...@@ -1428,14 +1440,14 @@ class CommandLineFlags {
"used with UseParallelGC") \ "used with UseParallelGC") \
\ \
develop(bool, ScavengeWithObjectsInToSpace, false, \ develop(bool, ScavengeWithObjectsInToSpace, false, \
"Allow scavenges to occur when to_space contains objects.") \ "Allow scavenges to occur when to-space contains objects") \
\ \
product(bool, UseConcMarkSweepGC, false, \ product(bool, UseConcMarkSweepGC, false, \
"Use Concurrent Mark-Sweep GC in the old generation") \ "Use Concurrent Mark-Sweep GC in the old generation") \
\ \
product(bool, ExplicitGCInvokesConcurrent, false, \ product(bool, ExplicitGCInvokesConcurrent, false, \
"A System.gc() request invokes a concurrent collection;" \ "A System.gc() request invokes a concurrent collection; " \
" (effective only when UseConcMarkSweepGC)") \ "(effective only when UseConcMarkSweepGC)") \
\ \
product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \
"A System.gc() request invokes a concurrent collection and " \ "A System.gc() request invokes a concurrent collection and " \
...@@ -1443,19 +1455,19 @@ class CommandLineFlags { ...@@ -1443,19 +1455,19 @@ class CommandLineFlags {
"(effective only when UseConcMarkSweepGC)") \ "(effective only when UseConcMarkSweepGC)") \
\ \
product(bool, GCLockerInvokesConcurrent, false, \ product(bool, GCLockerInvokesConcurrent, false, \
"The exit of a JNI CS necessitating a scavenge also" \ "The exit of a JNI critical section necessitating a scavenge, " \
" kicks off a bkgrd concurrent collection") \ "also kicks off a background concurrent collection") \
\ \
product(uintx, GCLockerEdenExpansionPercent, 5, \ product(uintx, GCLockerEdenExpansionPercent, 5, \
"How much the GC can expand the eden by while the GC locker " \ "How much the GC can expand the eden by while the GC locker " \
"is active (as a percentage)") \ "is active (as a percentage)") \
\ \
diagnostic(intx, GCLockerRetryAllocationCount, 2, \ diagnostic(intx, GCLockerRetryAllocationCount, 2, \
"Number of times to retry allocations when" \ "Number of times to retry allocations when " \
" blocked by the GC locker") \ "blocked by the GC locker") \
\ \
develop(bool, UseCMSAdaptiveFreeLists, true, \ develop(bool, UseCMSAdaptiveFreeLists, true, \
"Use Adaptive Free Lists in the CMS generation") \ "Use adaptive free lists in the CMS generation") \
\ \
develop(bool, UseAsyncConcMarkSweepGC, true, \ develop(bool, UseAsyncConcMarkSweepGC, true, \
"Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\ "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
...@@ -1470,44 +1482,46 @@ class CommandLineFlags { ...@@ -1470,44 +1482,46 @@ class CommandLineFlags {
"Use passing of collection from background to foreground") \ "Use passing of collection from background to foreground") \
\ \
product(bool, UseParNewGC, false, \ product(bool, UseParNewGC, false, \
"Use parallel threads in the new generation.") \ "Use parallel threads in the new generation") \
\ \
product(bool, ParallelGCVerbose, false, \ product(bool, ParallelGCVerbose, false, \
"Verbose output for parallel GC.") \ "Verbose output for parallel gc") \
\ \
product(uintx, ParallelGCBufferWastePct, 10, \ product(uintx, ParallelGCBufferWastePct, 10, \
"Wasted fraction of parallel allocation buffer.") \ "Wasted fraction of parallel allocation buffer") \
\ \
diagnostic(bool, ParallelGCRetainPLAB, false, \ diagnostic(bool, ParallelGCRetainPLAB, false, \
"Retain parallel allocation buffers across scavenges; " \ "Retain parallel allocation buffers across scavenges; " \
" -- disabled because this currently conflicts with " \ "it is disabled because this currently conflicts with " \
" parallel card scanning under certain conditions ") \ "parallel card scanning under certain conditions.") \
\ \
product(uintx, TargetPLABWastePct, 10, \ product(uintx, TargetPLABWastePct, 10, \
"Target wasted space in last buffer as percent of overall " \ "Target wasted space in last buffer as percent of overall " \
"allocation") \ "allocation") \
\ \
product(uintx, PLABWeight, 75, \ product(uintx, PLABWeight, 75, \
"Percentage (0-100) used to weight the current sample when" \ "Percentage (0-100) used to weigh the current sample when " \
"computing exponentially decaying average for ResizePLAB.") \ "computing exponentially decaying average for ResizePLAB") \
\ \
product(bool, ResizePLAB, true, \ product(bool, ResizePLAB, true, \
"Dynamically resize (survivor space) promotion labs") \ "Dynamically resize (survivor space) promotion LAB's") \
\ \
product(bool, PrintPLAB, false, \ product(bool, PrintPLAB, false, \
"Print (survivor space) promotion labs sizing decisions") \ "Print (survivor space) promotion LAB's sizing decisions") \
\ \
product(intx, ParGCArrayScanChunk, 50, \ product(intx, ParGCArrayScanChunk, 50, \
"Scan a subset and push remainder, if array is bigger than this") \ "Scan a subset of object array and push remainder, if array is " \
"bigger than this") \
\ \
product(bool, ParGCUseLocalOverflow, false, \ product(bool, ParGCUseLocalOverflow, false, \
"Instead of a global overflow list, use local overflow stacks") \ "Instead of a global overflow list, use local overflow stacks") \
\ \
product(bool, ParGCTrimOverflow, true, \ product(bool, ParGCTrimOverflow, true, \
"Eagerly trim the local overflow lists (when ParGCUseLocalOverflow") \ "Eagerly trim the local overflow lists " \
"(when ParGCUseLocalOverflow)") \
\ \
notproduct(bool, ParGCWorkQueueOverflowALot, false, \ notproduct(bool, ParGCWorkQueueOverflowALot, false, \
"Whether we should simulate work queue overflow in ParNew") \ "Simulate work queue overflow in ParNew") \
\ \
notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \ notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \
"An `interval' counter that determines how frequently " \ "An `interval' counter that determines how frequently " \
...@@ -1525,43 +1539,46 @@ class CommandLineFlags { ...@@ -1525,43 +1539,46 @@ class CommandLineFlags {
"during card table scanning") \ "during card table scanning") \
\ \
product(uintx, CMSParPromoteBlocksToClaim, 16, \ product(uintx, CMSParPromoteBlocksToClaim, 16, \
"Number of blocks to attempt to claim when refilling CMS LAB for "\ "Number of blocks to attempt to claim when refilling CMS LAB's " \
"parallel GC.") \ "for parallel GC") \
\ \
product(uintx, OldPLABWeight, 50, \ product(uintx, OldPLABWeight, 50, \
"Percentage (0-100) used to weight the current sample when" \ "Percentage (0-100) used to weight the current sample when " \
"computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \ "computing exponentially decaying average for resizing " \
"CMSParPromoteBlocksToClaim") \
\ \
product(bool, ResizeOldPLAB, true, \ product(bool, ResizeOldPLAB, true, \
"Dynamically resize (old gen) promotion labs") \ "Dynamically resize (old gen) promotion LAB's") \
\ \
product(bool, PrintOldPLAB, false, \ product(bool, PrintOldPLAB, false, \
"Print (old gen) promotion labs sizing decisions") \ "Print (old gen) promotion LAB's sizing decisions") \
\ \
product(uintx, CMSOldPLABMin, 16, \ product(uintx, CMSOldPLABMin, 16, \
"Min size of CMS gen promotion lab caches per worker per blksize")\ "Minimum size of CMS gen promotion LAB caches per worker " \
"per block size") \
\ \
product(uintx, CMSOldPLABMax, 1024, \ product(uintx, CMSOldPLABMax, 1024, \
"Max size of CMS gen promotion lab caches per worker per blksize")\ "Maximum size of CMS gen promotion LAB caches per worker " \
"per block size") \
\ \
product(uintx, CMSOldPLABNumRefills, 4, \ product(uintx, CMSOldPLABNumRefills, 4, \
"Nominal number of refills of CMS gen promotion lab cache" \ "Nominal number of refills of CMS gen promotion LAB cache " \
" per worker per block size") \ "per worker per block size") \
\ \
product(bool, CMSOldPLABResizeQuicker, false, \ product(bool, CMSOldPLABResizeQuicker, false, \
"Whether to react on-the-fly during a scavenge to a sudden" \ "React on-the-fly during a scavenge to a sudden " \
" change in block demand rate") \ "change in block demand rate") \
\ \
product(uintx, CMSOldPLABToleranceFactor, 4, \ product(uintx, CMSOldPLABToleranceFactor, 4, \
"The tolerance of the phase-change detector for on-the-fly" \ "The tolerance of the phase-change detector for on-the-fly " \
" PLAB resizing during a scavenge") \ "PLAB resizing during a scavenge") \
\ \
product(uintx, CMSOldPLABReactivityFactor, 2, \ product(uintx, CMSOldPLABReactivityFactor, 2, \
"The gain in the feedback loop for on-the-fly PLAB resizing" \ "The gain in the feedback loop for on-the-fly PLAB resizing " \
" during a scavenge") \ "during a scavenge") \
\ \
product(bool, AlwaysPreTouch, false, \ product(bool, AlwaysPreTouch, false, \
"It forces all freshly committed pages to be pre-touched.") \ "Force all freshly committed pages to be pre-touched") \
\ \
product_pd(uintx, CMSYoungGenPerWorker, \ product_pd(uintx, CMSYoungGenPerWorker, \
"The maximum size of young gen chosen by default per GC worker " \ "The maximum size of young gen chosen by default per GC worker " \
...@@ -1571,64 +1588,67 @@ class CommandLineFlags { ...@@ -1571,64 +1588,67 @@ class CommandLineFlags {
"Whether CMS GC should operate in \"incremental\" mode") \ "Whether CMS GC should operate in \"incremental\" mode") \
\ \
product(uintx, CMSIncrementalDutyCycle, 10, \ product(uintx, CMSIncrementalDutyCycle, 10, \
"CMS incremental mode duty cycle (a percentage, 0-100). If" \ "Percentage (0-100) of CMS incremental mode duty cycle. If " \
"CMSIncrementalPacing is enabled, then this is just the initial" \ "CMSIncrementalPacing is enabled, then this is just the initial " \
"value") \ "value.") \
\ \
product(bool, CMSIncrementalPacing, true, \ product(bool, CMSIncrementalPacing, true, \
"Whether the CMS incremental mode duty cycle should be " \ "Whether the CMS incremental mode duty cycle should be " \
"automatically adjusted") \ "automatically adjusted") \
\ \
product(uintx, CMSIncrementalDutyCycleMin, 0, \ product(uintx, CMSIncrementalDutyCycleMin, 0, \
"Lower bound on the duty cycle when CMSIncrementalPacing is " \ "Minimum percentage (0-100) of the CMS incremental duty cycle " \
"enabled (a percentage, 0-100)") \ "used when CMSIncrementalPacing is enabled") \
\ \
product(uintx, CMSIncrementalSafetyFactor, 10, \ product(uintx, CMSIncrementalSafetyFactor, 10, \
"Percentage (0-100) used to add conservatism when computing the " \ "Percentage (0-100) used to add conservatism when computing the " \
"duty cycle") \ "duty cycle") \
\ \
product(uintx, CMSIncrementalOffset, 0, \ product(uintx, CMSIncrementalOffset, 0, \
"Percentage (0-100) by which the CMS incremental mode duty cycle" \ "Percentage (0-100) by which the CMS incremental mode duty cycle "\
" is shifted to the right within the period between young GCs") \ "is shifted to the right within the period between young GCs") \
\ \
product(uintx, CMSExpAvgFactor, 50, \ product(uintx, CMSExpAvgFactor, 50, \
"Percentage (0-100) used to weight the current sample when" \ "Percentage (0-100) used to weigh the current sample when " \
"computing exponential averages for CMS statistics.") \ "computing exponential averages for CMS statistics") \
\ \
product(uintx, CMS_FLSWeight, 75, \ product(uintx, CMS_FLSWeight, 75, \
"Percentage (0-100) used to weight the current sample when" \ "Percentage (0-100) used to weigh the current sample when " \
"computing exponentially decating averages for CMS FLS statistics.") \ "computing exponentially decaying averages for CMS FLS " \
"statistics") \
\ \
product(uintx, CMS_FLSPadding, 1, \ product(uintx, CMS_FLSPadding, 1, \
"The multiple of deviation from mean to use for buffering" \ "The multiple of deviation from mean to use for buffering " \
"against volatility in free list demand.") \ "against volatility in free list demand") \
\ \
product(uintx, FLSCoalescePolicy, 2, \ product(uintx, FLSCoalescePolicy, 2, \
"CMS: Aggression level for coalescing, increasing from 0 to 4") \ "CMS: aggressiveness level for coalescing, increasing " \
"from 0 to 4") \
\ \
product(bool, FLSAlwaysCoalesceLarge, false, \ product(bool, FLSAlwaysCoalesceLarge, false, \
"CMS: Larger free blocks are always available for coalescing") \ "CMS: larger free blocks are always available for coalescing") \
\ \
product(double, FLSLargestBlockCoalesceProximity, 0.99, \ product(double, FLSLargestBlockCoalesceProximity, 0.99, \
"CMS: the smaller the percentage the greater the coalition force")\ "CMS: the smaller the percentage the greater the coalescing " \
"force") \
\ \
product(double, CMSSmallCoalSurplusPercent, 1.05, \ product(double, CMSSmallCoalSurplusPercent, 1.05, \
"CMS: the factor by which to inflate estimated demand of small" \ "CMS: the factor by which to inflate estimated demand of small " \
" block sizes to prevent coalescing with an adjoining block") \ "block sizes to prevent coalescing with an adjoining block") \
\ \
product(double, CMSLargeCoalSurplusPercent, 0.95, \ product(double, CMSLargeCoalSurplusPercent, 0.95, \
"CMS: the factor by which to inflate estimated demand of large" \ "CMS: the factor by which to inflate estimated demand of large " \
" block sizes to prevent coalescing with an adjoining block") \ "block sizes to prevent coalescing with an adjoining block") \
\ \
product(double, CMSSmallSplitSurplusPercent, 1.10, \ product(double, CMSSmallSplitSurplusPercent, 1.10, \
"CMS: the factor by which to inflate estimated demand of small" \ "CMS: the factor by which to inflate estimated demand of small " \
" block sizes to prevent splitting to supply demand for smaller" \ "block sizes to prevent splitting to supply demand for smaller " \
" blocks") \ "blocks") \
\ \
product(double, CMSLargeSplitSurplusPercent, 1.00, \ product(double, CMSLargeSplitSurplusPercent, 1.00, \
"CMS: the factor by which to inflate estimated demand of large" \ "CMS: the factor by which to inflate estimated demand of large " \
" block sizes to prevent splitting to supply demand for smaller" \ "block sizes to prevent splitting to supply demand for smaller " \
" blocks") \ "blocks") \
\ \
product(bool, CMSExtrapolateSweep, false, \ product(bool, CMSExtrapolateSweep, false, \
"CMS: cushion for block demand during sweep") \ "CMS: cushion for block demand during sweep") \
...@@ -1640,11 +1660,11 @@ class CommandLineFlags { ...@@ -1640,11 +1660,11 @@ class CommandLineFlags {
\ \
product(uintx, CMS_SweepPadding, 1, \ product(uintx, CMS_SweepPadding, 1, \
"The multiple of deviation from mean to use for buffering " \ "The multiple of deviation from mean to use for buffering " \
"against volatility in inter-sweep duration.") \ "against volatility in inter-sweep duration") \
\ \
product(uintx, CMS_SweepTimerThresholdMillis, 10, \ product(uintx, CMS_SweepTimerThresholdMillis, 10, \
"Skip block flux-rate sampling for an epoch unless inter-sweep " \ "Skip block flux-rate sampling for an epoch unless inter-sweep " \
"duration exceeds this threhold in milliseconds") \ "duration exceeds this threshold in milliseconds") \
\ \
develop(bool, CMSTraceIncrementalMode, false, \ develop(bool, CMSTraceIncrementalMode, false, \
"Trace CMS incremental mode") \ "Trace CMS incremental mode") \
...@@ -1659,14 +1679,15 @@ class CommandLineFlags { ...@@ -1659,14 +1679,15 @@ class CommandLineFlags {
"Whether class unloading enabled when using CMS GC") \ "Whether class unloading enabled when using CMS GC") \
\ \
product(uintx, CMSClassUnloadingMaxInterval, 0, \ product(uintx, CMSClassUnloadingMaxInterval, 0, \
"When CMS class unloading is enabled, the maximum CMS cycle count"\ "When CMS class unloading is enabled, the maximum CMS cycle " \
" for which classes may not be unloaded") \ "count for which classes may not be unloaded") \
\ \
product(bool, CMSCompactWhenClearAllSoftRefs, true, \ product(bool, CMSCompactWhenClearAllSoftRefs, true, \
"Compact when asked to collect CMS gen with clear_all_soft_refs") \ "Compact when asked to collect CMS gen with " \
"clear_all_soft_refs()") \
\ \
product(bool, UseCMSCompactAtFullCollection, true, \ product(bool, UseCMSCompactAtFullCollection, true, \
"Use mark sweep compact at full collections") \ "Use Mark-Sweep-Compact algorithm at full collections") \
\ \
product(uintx, CMSFullGCsBeforeCompaction, 0, \ product(uintx, CMSFullGCsBeforeCompaction, 0, \
"Number of CMS full collection done before compaction if > 0") \ "Number of CMS full collection done before compaction if > 0") \
...@@ -1688,38 +1709,37 @@ class CommandLineFlags { ...@@ -1688,38 +1709,37 @@ class CommandLineFlags {
"Warn in case of excessive CMS looping") \ "Warn in case of excessive CMS looping") \
\ \
develop(bool, CMSOverflowEarlyRestoration, false, \ develop(bool, CMSOverflowEarlyRestoration, false, \
"Whether preserved marks should be restored early") \ "Restore preserved marks early") \
\ \
product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \ product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M), \
"Size of marking stack") \ "Size of marking stack") \
\ \
product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \ product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M), \
"Max size of marking stack") \ "Maximum size of marking stack") \
\ \
notproduct(bool, CMSMarkStackOverflowALot, false, \ notproduct(bool, CMSMarkStackOverflowALot, false, \
"Whether we should simulate frequent marking stack / work queue" \ "Simulate frequent marking stack / work queue overflow") \
" overflow") \
\ \
notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \ notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \
"An `interval' counter that determines how frequently" \ "An \"interval\" counter that determines how frequently " \
" we simulate overflow; a smaller number increases frequency") \ "to simulate overflow; a smaller number increases frequency") \
\ \
product(uintx, CMSMaxAbortablePrecleanLoops, 0, \ product(uintx, CMSMaxAbortablePrecleanLoops, 0, \
"(Temporary, subject to experimentation)" \ "(Temporary, subject to experimentation) " \
"Maximum number of abortable preclean iterations, if > 0") \ "Maximum number of abortable preclean iterations, if > 0") \
\ \
product(intx, CMSMaxAbortablePrecleanTime, 5000, \ product(intx, CMSMaxAbortablePrecleanTime, 5000, \
"(Temporary, subject to experimentation)" \ "(Temporary, subject to experimentation) " \
"Maximum time in abortable preclean in ms") \ "Maximum time in abortable preclean (in milliseconds)") \
\ \
product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \ product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \
"(Temporary, subject to experimentation)" \ "(Temporary, subject to experimentation) " \
"Nominal minimum work per abortable preclean iteration") \ "Nominal minimum work per abortable preclean iteration") \
\ \
manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \ manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \
"(Temporary, subject to experimentation)" \ "(Temporary, subject to experimentation) " \
" Time that we sleep between iterations when not given" \ "Time that we sleep between iterations when not given " \
" enough work per iteration") \ "enough work per iteration") \
\ \
product(uintx, CMSRescanMultiple, 32, \ product(uintx, CMSRescanMultiple, 32, \
"Size (in cards) of CMS parallel rescan task") \ "Size (in cards) of CMS parallel rescan task") \
...@@ -1737,23 +1757,24 @@ class CommandLineFlags { ...@@ -1737,23 +1757,24 @@ class CommandLineFlags {
"Whether parallel remark enabled (only if ParNewGC)") \ "Whether parallel remark enabled (only if ParNewGC)") \
\ \
product(bool, CMSParallelSurvivorRemarkEnabled, true, \ product(bool, CMSParallelSurvivorRemarkEnabled, true, \
"Whether parallel remark of survivor space" \ "Whether parallel remark of survivor space " \
" enabled (effective only if CMSParallelRemarkEnabled)") \ "enabled (effective only if CMSParallelRemarkEnabled)") \
\ \
product(bool, CMSPLABRecordAlways, true, \ product(bool, CMSPLABRecordAlways, true, \
"Whether to always record survivor space PLAB bdries" \ "Always record survivor space PLAB boundaries (effective only " \
" (effective only if CMSParallelSurvivorRemarkEnabled)") \ "if CMSParallelSurvivorRemarkEnabled)") \
\ \
product(bool, CMSEdenChunksRecordAlways, true, \ product(bool, CMSEdenChunksRecordAlways, true, \
"Whether to always record eden chunks used for " \ "Always record eden chunks used for the parallel initial mark " \
"the parallel initial mark or remark of eden" ) \ "or remark of eden") \
\ \
product(bool, CMSPrintEdenSurvivorChunks, false, \ product(bool, CMSPrintEdenSurvivorChunks, false, \
"Print the eden and the survivor chunks used for the parallel " \ "Print the eden and the survivor chunks used for the parallel " \
"initial mark or remark of the eden/survivor spaces") \ "initial mark or remark of the eden/survivor spaces") \
\ \
product(bool, CMSConcurrentMTEnabled, true, \ product(bool, CMSConcurrentMTEnabled, true, \
"Whether multi-threaded concurrent work enabled (if ParNewGC)") \ "Whether multi-threaded concurrent work enabled " \
"(effective only if ParNewGC)") \
\ \
product(bool, CMSPrecleaningEnabled, true, \ product(bool, CMSPrecleaningEnabled, true, \
"Whether concurrent precleaning enabled") \ "Whether concurrent precleaning enabled") \
...@@ -1762,12 +1783,12 @@ class CommandLineFlags { ...@@ -1762,12 +1783,12 @@ class CommandLineFlags {
"Maximum number of precleaning iteration passes") \ "Maximum number of precleaning iteration passes") \
\ \
product(uintx, CMSPrecleanNumerator, 2, \ product(uintx, CMSPrecleanNumerator, 2, \
"CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
" ratio") \ "ratio") \
\ \
product(uintx, CMSPrecleanDenominator, 3, \ product(uintx, CMSPrecleanDenominator, 3, \
"CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \ "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
" ratio") \ "ratio") \
\ \
product(bool, CMSPrecleanRefLists1, true, \ product(bool, CMSPrecleanRefLists1, true, \
"Preclean ref lists during (initial) preclean phase") \ "Preclean ref lists during (initial) preclean phase") \
...@@ -1782,7 +1803,7 @@ class CommandLineFlags { ...@@ -1782,7 +1803,7 @@ class CommandLineFlags {
"Preclean survivors during abortable preclean phase") \ "Preclean survivors during abortable preclean phase") \
\ \
product(uintx, CMSPrecleanThreshold, 1000, \ product(uintx, CMSPrecleanThreshold, 1000, \
"Don't re-iterate if #dirty cards less than this") \ "Do not iterate again if number of dirty cards is less than this")\
\ \
product(bool, CMSCleanOnEnter, true, \ product(bool, CMSCleanOnEnter, true, \
"Clean-on-enter optimization for reducing number of dirty cards") \ "Clean-on-enter optimization for reducing number of dirty cards") \
...@@ -1791,14 +1812,16 @@ class CommandLineFlags { ...@@ -1791,14 +1812,16 @@ class CommandLineFlags {
"Choose variant (1,2) of verification following remark") \ "Choose variant (1,2) of verification following remark") \
\ \
product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \ product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M, \
"If Eden used is below this value, don't try to schedule remark") \ "If Eden size is below this, do not try to schedule remark") \
\ \
product(uintx, CMSScheduleRemarkEdenPenetration, 50, \ product(uintx, CMSScheduleRemarkEdenPenetration, 50, \
"The Eden occupancy % at which to try and schedule remark pause") \ "The Eden occupancy percentage (0-100) at which " \
"to try and schedule remark pause") \
\ \
product(uintx, CMSScheduleRemarkSamplingRatio, 5, \ product(uintx, CMSScheduleRemarkSamplingRatio, 5, \
"Start sampling Eden top at least before yg occupancy reaches" \ "Start sampling eden top at least before young gen " \
" 1/<ratio> of the size at which we plan to schedule remark") \ "occupancy reaches 1/<ratio> of the size at which " \
"we plan to schedule remark") \
\ \
product(uintx, CMSSamplingGrain, 16*K, \ product(uintx, CMSSamplingGrain, 16*K, \
"The minimum distance between eden samples for CMS (see above)") \ "The minimum distance between eden samples for CMS (see above)") \
...@@ -1820,27 +1843,27 @@ class CommandLineFlags { ...@@ -1820,27 +1843,27 @@ class CommandLineFlags {
"should start a collection cycle") \ "should start a collection cycle") \
\ \
product(bool, CMSYield, true, \ product(bool, CMSYield, true, \
"Yield between steps of concurrent mark & sweep") \ "Yield between steps of CMS") \
\ \
product(uintx, CMSBitMapYieldQuantum, 10*M, \ product(uintx, CMSBitMapYieldQuantum, 10*M, \
"Bitmap operations should process at most this many bits" \ "Bitmap operations should process at most this many bits " \
"between yields") \ "between yields") \
\ \
product(bool, CMSDumpAtPromotionFailure, false, \ product(bool, CMSDumpAtPromotionFailure, false, \
"Dump useful information about the state of the CMS old " \ "Dump useful information about the state of the CMS old " \
" generation upon a promotion failure.") \ "generation upon a promotion failure") \
\ \
product(bool, CMSPrintChunksInDump, false, \ product(bool, CMSPrintChunksInDump, false, \
"In a dump enabled by CMSDumpAtPromotionFailure, include " \ "In a dump enabled by CMSDumpAtPromotionFailure, include " \
" more detailed information about the free chunks.") \ "more detailed information about the free chunks") \
\ \
product(bool, CMSPrintObjectsInDump, false, \ product(bool, CMSPrintObjectsInDump, false, \
"In a dump enabled by CMSDumpAtPromotionFailure, include " \ "In a dump enabled by CMSDumpAtPromotionFailure, include " \
" more detailed information about the allocated objects.") \ "more detailed information about the allocated objects") \
\ \
diagnostic(bool, FLSVerifyAllHeapReferences, false, \ diagnostic(bool, FLSVerifyAllHeapReferences, false, \
"Verify that all refs across the FLS boundary " \ "Verify that all references across the FLS boundary " \
" are to valid objects") \ "are to valid objects") \
\ \
diagnostic(bool, FLSVerifyLists, false, \ diagnostic(bool, FLSVerifyLists, false, \
"Do lots of (expensive) FreeListSpace verification") \ "Do lots of (expensive) FreeListSpace verification") \
...@@ -1852,17 +1875,18 @@ class CommandLineFlags { ...@@ -1852,17 +1875,18 @@ class CommandLineFlags {
"Do lots of (expensive) FLS dictionary verification") \ "Do lots of (expensive) FLS dictionary verification") \
\ \
develop(bool, VerifyBlockOffsetArray, false, \ develop(bool, VerifyBlockOffsetArray, false, \
"Do (expensive!) block offset array verification") \ "Do (expensive) block offset array verification") \
\ \
diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \ diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
"Maintain _unallocated_block in BlockOffsetArray" \ "Maintain _unallocated_block in BlockOffsetArray " \
" (currently applicable only to CMS collector)") \ "(currently applicable only to CMS collector)") \
\ \
develop(bool, TraceCMSState, false, \ develop(bool, TraceCMSState, false, \
"Trace the state of the CMS collection") \ "Trace the state of the CMS collection") \
\ \
product(intx, RefDiscoveryPolicy, 0, \ product(intx, RefDiscoveryPolicy, 0, \
"Whether reference-based(0) or referent-based(1)") \ "Select type of reference discovery policy: " \
"reference-based(0) or referent-based(1)") \
\ \
product(bool, ParallelRefProcEnabled, false, \ product(bool, ParallelRefProcEnabled, false, \
"Enable parallel reference processing whenever possible") \ "Enable parallel reference processing whenever possible") \
...@@ -1890,7 +1914,7 @@ class CommandLineFlags { ...@@ -1890,7 +1914,7 @@ class CommandLineFlags {
"denotes 'do constant GC cycles'.") \ "denotes 'do constant GC cycles'.") \
\ \
product(bool, UseCMSInitiatingOccupancyOnly, false, \ product(bool, UseCMSInitiatingOccupancyOnly, false, \
"Only use occupancy as a crierion for starting a CMS collection") \ "Only use occupancy as a criterion for starting a CMS collection")\
\ \
product(uintx, CMSIsTooFullPercentage, 98, \ product(uintx, CMSIsTooFullPercentage, 98, \
"An absolute ceiling above which CMS will always consider the " \ "An absolute ceiling above which CMS will always consider the " \
...@@ -1902,7 +1926,7 @@ class CommandLineFlags { ...@@ -1902,7 +1926,7 @@ class CommandLineFlags {
\ \
notproduct(bool, CMSVerifyReturnedBytes, false, \ notproduct(bool, CMSVerifyReturnedBytes, false, \
"Check that all the garbage collected was returned to the " \ "Check that all the garbage collected was returned to the " \
"free lists.") \ "free lists") \
\ \
notproduct(bool, ScavengeALot, false, \ notproduct(bool, ScavengeALot, false, \
"Force scavenge at every Nth exit from the runtime system " \ "Force scavenge at every Nth exit from the runtime system " \
...@@ -1917,16 +1941,16 @@ class CommandLineFlags { ...@@ -1917,16 +1941,16 @@ class CommandLineFlags {
\ \
product(bool, PrintPromotionFailure, false, \ product(bool, PrintPromotionFailure, false, \
"Print additional diagnostic information following " \ "Print additional diagnostic information following " \
" promotion failure") \ "promotion failure") \
\ \
notproduct(bool, PromotionFailureALot, false, \ notproduct(bool, PromotionFailureALot, false, \
"Use promotion failure handling on every youngest generation " \ "Use promotion failure handling on every youngest generation " \
"collection") \ "collection") \
\ \
develop(uintx, PromotionFailureALotCount, 1000, \ develop(uintx, PromotionFailureALotCount, 1000, \
"Number of promotion failures occurring at ParGCAllocBuffer" \ "Number of promotion failures occurring at ParGCAllocBuffer " \
"refill attempts (ParNew) or promotion attempts " \ "refill attempts (ParNew) or promotion attempts " \
"(other young collectors) ") \ "(other young collectors)") \
\ \
develop(uintx, PromotionFailureALotInterval, 5, \ develop(uintx, PromotionFailureALotInterval, 5, \
"Total collections between promotion failures alot") \ "Total collections between promotion failures alot") \
...@@ -1945,7 +1969,7 @@ class CommandLineFlags { ...@@ -1945,7 +1969,7 @@ class CommandLineFlags {
"Ratio of hard spins to calls to yield") \ "Ratio of hard spins to calls to yield") \
\ \
develop(uintx, ObjArrayMarkingStride, 512, \ develop(uintx, ObjArrayMarkingStride, 512, \
"Number of ObjArray elements to push onto the marking stack" \ "Number of object array elements to push onto the marking stack " \
"before pushing a continuation entry") \ "before pushing a continuation entry") \
\ \
develop(bool, MetadataAllocationFailALot, false, \ develop(bool, MetadataAllocationFailALot, false, \
...@@ -1953,7 +1977,7 @@ class CommandLineFlags { ...@@ -1953,7 +1977,7 @@ class CommandLineFlags {
"MetadataAllocationFailALotInterval") \ "MetadataAllocationFailALotInterval") \
\ \
develop(uintx, MetadataAllocationFailALotInterval, 1000, \ develop(uintx, MetadataAllocationFailALotInterval, 1000, \
"metadata allocation failure alot interval") \ "Metadata allocation failure a lot interval") \
\ \
develop(bool, MetaDataDeallocateALot, false, \ develop(bool, MetaDataDeallocateALot, false, \
"Deallocation bunches of metadata at intervals controlled by " \ "Deallocation bunches of metadata at intervals controlled by " \
...@@ -1972,7 +1996,7 @@ class CommandLineFlags { ...@@ -1972,7 +1996,7 @@ class CommandLineFlags {
"Trace virtual space metadata allocations") \ "Trace virtual space metadata allocations") \
\ \
notproduct(bool, ExecuteInternalVMTests, false, \ notproduct(bool, ExecuteInternalVMTests, false, \
"Enable execution of internal VM tests.") \ "Enable execution of internal VM tests") \
\ \
notproduct(bool, VerboseInternalVMTests, false, \ notproduct(bool, VerboseInternalVMTests, false, \
"Turn on logging for internal VM tests.") \ "Turn on logging for internal VM tests.") \
...@@ -1980,7 +2004,7 @@ class CommandLineFlags { ...@@ -1980,7 +2004,7 @@ class CommandLineFlags {
product_pd(bool, UseTLAB, "Use thread-local object allocation") \ product_pd(bool, UseTLAB, "Use thread-local object allocation") \
\ \
product_pd(bool, ResizeTLAB, \ product_pd(bool, ResizeTLAB, \
"Dynamically resize tlab size for threads") \ "Dynamically resize TLAB size for threads") \
\ \
product(bool, ZeroTLAB, false, \ product(bool, ZeroTLAB, false, \
"Zero out the newly created TLAB") \ "Zero out the newly created TLAB") \
...@@ -1992,7 +2016,8 @@ class CommandLineFlags { ...@@ -1992,7 +2016,8 @@ class CommandLineFlags {
"Print various TLAB related information") \ "Print various TLAB related information") \
\ \
product(bool, TLABStats, true, \ product(bool, TLABStats, true, \
"Print various TLAB related information") \ "Provide more detailed and expensive TLAB statistics " \
"(with PrintTLAB)") \
\ \
EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \ EMBEDDED_ONLY(product(bool, LowMemoryProtection, true, \
"Enable LowMemoryProtection")) \ "Enable LowMemoryProtection")) \
...@@ -2026,14 +2051,14 @@ class CommandLineFlags { ...@@ -2026,14 +2051,14 @@ class CommandLineFlags {
"Fraction (1/n) of real memory used for initial heap size") \ "Fraction (1/n) of real memory used for initial heap size") \
\ \
develop(uintx, MaxVirtMemFraction, 2, \ develop(uintx, MaxVirtMemFraction, 2, \
"Maximum fraction (1/n) of virtual memory used for ergonomically" \ "Maximum fraction (1/n) of virtual memory used for ergonomically "\
"determining maximum heap size") \ "determining maximum heap size") \
\ \
product(bool, UseAutoGCSelectPolicy, false, \ product(bool, UseAutoGCSelectPolicy, false, \
"Use automatic collection selection policy") \ "Use automatic collection selection policy") \
\ \
product(uintx, AutoGCSelectPauseMillis, 5000, \ product(uintx, AutoGCSelectPauseMillis, 5000, \
"Automatic GC selection pause threshhold in ms") \ "Automatic GC selection pause threshold in milliseconds") \
\ \
product(bool, UseAdaptiveSizePolicy, true, \ product(bool, UseAdaptiveSizePolicy, true, \
"Use adaptive generation sizing policies") \ "Use adaptive generation sizing policies") \
...@@ -2048,7 +2073,7 @@ class CommandLineFlags { ...@@ -2048,7 +2073,7 @@ class CommandLineFlags {
"Use adaptive young-old sizing policies at major collections") \ "Use adaptive young-old sizing policies at major collections") \
\ \
product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \ product(bool, UseAdaptiveSizePolicyWithSystemGC, false, \
"Use statistics from System.GC for adaptive size policy") \ "Include statistics from System.gc() for adaptive size policy") \
\ \
product(bool, UseAdaptiveGCBoundary, false, \ product(bool, UseAdaptiveGCBoundary, false, \
"Allow young-old boundary to move") \ "Allow young-old boundary to move") \
...@@ -2060,16 +2085,16 @@ class CommandLineFlags { ...@@ -2060,16 +2085,16 @@ class CommandLineFlags {
"Resize the virtual spaces of the young or old generations") \ "Resize the virtual spaces of the young or old generations") \
\ \
product(uintx, AdaptiveSizeThroughPutPolicy, 0, \ product(uintx, AdaptiveSizeThroughPutPolicy, 0, \
"Policy for changeing generation size for throughput goals") \ "Policy for changing generation size for throughput goals") \
\ \
product(uintx, AdaptiveSizePausePolicy, 0, \ product(uintx, AdaptiveSizePausePolicy, 0, \
"Policy for changing generation size for pause goals") \ "Policy for changing generation size for pause goals") \
\ \
develop(bool, PSAdjustTenuredGenForMinorPause, false, \ develop(bool, PSAdjustTenuredGenForMinorPause, false, \
"Adjust tenured generation to achive a minor pause goal") \ "Adjust tenured generation to achieve a minor pause goal") \
\ \
develop(bool, PSAdjustYoungGenForMajorPause, false, \ develop(bool, PSAdjustYoungGenForMajorPause, false, \
"Adjust young generation to achive a major pause goal") \ "Adjust young generation to achieve a major pause goal") \
\ \
product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \ product(uintx, AdaptiveSizePolicyInitializingSteps, 20, \
"Number of steps where heuristics is used before data is used") \ "Number of steps where heuristics is used before data is used") \
...@@ -2124,14 +2149,15 @@ class CommandLineFlags { ...@@ -2124,14 +2149,15 @@ class CommandLineFlags {
"Decay factor to TenuredGenerationSizeIncrement") \ "Decay factor to TenuredGenerationSizeIncrement") \
\ \
product(uintx, MaxGCPauseMillis, max_uintx, \ product(uintx, MaxGCPauseMillis, max_uintx, \
"Adaptive size policy maximum GC pause time goal in msec, " \ "Adaptive size policy maximum GC pause time goal in millisecond, "\
"or (G1 Only) the max. GC time per MMU time slice") \ "or (G1 Only) the maximum GC time per MMU time slice") \
\ \
product(uintx, GCPauseIntervalMillis, 0, \ product(uintx, GCPauseIntervalMillis, 0, \
"Time slice for MMU specification") \ "Time slice for MMU specification") \
\ \
product(uintx, MaxGCMinorPauseMillis, max_uintx, \ product(uintx, MaxGCMinorPauseMillis, max_uintx, \
"Adaptive size policy maximum GC minor pause time goal in msec") \ "Adaptive size policy maximum GC minor pause time goal " \
"in millisecond") \
\ \
product(uintx, GCTimeRatio, 99, \ product(uintx, GCTimeRatio, 99, \
"Adaptive size policy application time to GC time ratio") \ "Adaptive size policy application time to GC time ratio") \
...@@ -2159,8 +2185,8 @@ class CommandLineFlags { ...@@ -2159,8 +2185,8 @@ class CommandLineFlags {
"before an OutOfMemory error is thrown") \ "before an OutOfMemory error is thrown") \
\ \
product(uintx, GCTimeLimit, 98, \ product(uintx, GCTimeLimit, 98, \
"Limit of proportion of time spent in GC before an OutOfMemory" \ "Limit of the proportion of time spent in GC before " \
"error is thrown (used with GCHeapFreeLimit)") \ "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)") \
\ \
product(uintx, GCHeapFreeLimit, 2, \ product(uintx, GCHeapFreeLimit, 2, \
"Minimum percentage of free space after a full GC before an " \ "Minimum percentage of free space after a full GC before an " \
...@@ -2182,7 +2208,7 @@ class CommandLineFlags { ...@@ -2182,7 +2208,7 @@ class CommandLineFlags {
"How many fields ahead to prefetch in oop scan (<= 0 means off)") \ "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
\ \
diagnostic(bool, VerifySilently, false, \ diagnostic(bool, VerifySilently, false, \
"Don't print print the verification progress") \ "Do not print the verification progress") \
\ \
diagnostic(bool, VerifyDuringStartup, false, \ diagnostic(bool, VerifyDuringStartup, false, \
"Verify memory system before executing any Java code " \ "Verify memory system before executing any Java code " \
...@@ -2205,7 +2231,7 @@ class CommandLineFlags { ...@@ -2205,7 +2231,7 @@ class CommandLineFlags {
\ \
diagnostic(bool, DeferInitialCardMark, false, \ diagnostic(bool, DeferInitialCardMark, false, \
"When +ReduceInitialCardMarks, explicitly defer any that " \ "When +ReduceInitialCardMarks, explicitly defer any that " \
"may arise from new_pre_store_barrier") \ "may arise from new_pre_store_barrier") \
\ \
diagnostic(bool, VerifyRememberedSets, false, \ diagnostic(bool, VerifyRememberedSets, false, \
"Verify GC remembered sets") \ "Verify GC remembered sets") \
...@@ -2214,10 +2240,10 @@ class CommandLineFlags { ...@@ -2214,10 +2240,10 @@ class CommandLineFlags {
"Verify GC object start array if verify before/after") \ "Verify GC object start array if verify before/after") \
\ \
product(bool, DisableExplicitGC, false, \ product(bool, DisableExplicitGC, false, \
"Tells whether calling System.gc() does a full GC") \ "Ignore calls to System.gc()") \
\ \
notproduct(bool, CheckMemoryInitialization, false, \ notproduct(bool, CheckMemoryInitialization, false, \
"Checks memory initialization") \ "Check memory initialization") \
\ \
product(bool, CollectGen0First, false, \ product(bool, CollectGen0First, false, \
"Collect youngest generation before each full GC") \ "Collect youngest generation before each full GC") \
...@@ -2238,44 +2264,45 @@ class CommandLineFlags { ...@@ -2238,44 +2264,45 @@ class CommandLineFlags {
"Stride through processors when distributing processes") \ "Stride through processors when distributing processes") \
\ \
product(uintx, CMSCoordinatorYieldSleepCount, 10, \ product(uintx, CMSCoordinatorYieldSleepCount, 10, \
"number of times the coordinator GC thread will sleep while " \ "Number of times the coordinator GC thread will sleep while " \
"yielding before giving up and resuming GC") \ "yielding before giving up and resuming GC") \
\ \
product(uintx, CMSYieldSleepCount, 0, \ product(uintx, CMSYieldSleepCount, 0, \
"number of times a GC thread (minus the coordinator) " \ "Number of times a GC thread (minus the coordinator) " \
"will sleep while yielding before giving up and resuming GC") \ "will sleep while yielding before giving up and resuming GC") \
\ \
/* gc tracing */ \ /* gc tracing */ \
manageable(bool, PrintGC, false, \ manageable(bool, PrintGC, false, \
"Print message at garbage collect") \ "Print message at garbage collection") \
\ \
manageable(bool, PrintGCDetails, false, \ manageable(bool, PrintGCDetails, false, \
"Print more details at garbage collect") \ "Print more details at garbage collection") \
\ \
manageable(bool, PrintGCDateStamps, false, \ manageable(bool, PrintGCDateStamps, false, \
"Print date stamps at garbage collect") \ "Print date stamps at garbage collection") \
\ \
manageable(bool, PrintGCTimeStamps, false, \ manageable(bool, PrintGCTimeStamps, false, \
"Print timestamps at garbage collect") \ "Print timestamps at garbage collection") \
\ \
product(bool, PrintGCTaskTimeStamps, false, \ product(bool, PrintGCTaskTimeStamps, false, \
"Print timestamps for individual gc worker thread tasks") \ "Print timestamps for individual gc worker thread tasks") \
\ \
develop(intx, ConcGCYieldTimeout, 0, \ develop(intx, ConcGCYieldTimeout, 0, \
"If non-zero, assert that GC threads yield within this # of ms.") \ "If non-zero, assert that GC threads yield within this " \
"number of milliseconds") \
\ \
notproduct(bool, TraceMarkSweep, false, \ notproduct(bool, TraceMarkSweep, false, \
"Trace mark sweep") \ "Trace mark sweep") \
\ \
product(bool, PrintReferenceGC, false, \ product(bool, PrintReferenceGC, false, \
"Print times spent handling reference objects during GC " \ "Print times spent handling reference objects during GC " \
" (enabled only when PrintGCDetails)") \ "(enabled only when PrintGCDetails)") \
\ \
develop(bool, TraceReferenceGC, false, \ develop(bool, TraceReferenceGC, false, \
"Trace handling of soft/weak/final/phantom references") \ "Trace handling of soft/weak/final/phantom references") \
\ \
develop(bool, TraceFinalizerRegistration, false, \ develop(bool, TraceFinalizerRegistration, false, \
"Trace registration of final references") \ "Trace registration of final references") \
\ \
notproduct(bool, TraceScavenge, false, \ notproduct(bool, TraceScavenge, false, \
"Trace scavenge") \ "Trace scavenge") \
...@@ -2312,7 +2339,7 @@ class CommandLineFlags { ...@@ -2312,7 +2339,7 @@ class CommandLineFlags {
"Print heap layout before and after each GC") \ "Print heap layout before and after each GC") \
\ \
product_rw(bool, PrintHeapAtGCExtended, false, \ product_rw(bool, PrintHeapAtGCExtended, false, \
"Prints extended information about the layout of the heap " \ "Print extended information about the layout of the heap " \
"when -XX:+PrintHeapAtGC is set") \ "when -XX:+PrintHeapAtGC is set") \
\ \
product(bool, PrintHeapAtSIGBREAK, true, \ product(bool, PrintHeapAtSIGBREAK, true, \
...@@ -2349,45 +2376,45 @@ class CommandLineFlags { ...@@ -2349,45 +2376,45 @@ class CommandLineFlags {
"Trace actions of the GC task threads") \ "Trace actions of the GC task threads") \
\ \
product(bool, PrintParallelOldGCPhaseTimes, false, \ product(bool, PrintParallelOldGCPhaseTimes, false, \
"Print the time taken by each parallel old gc phase." \ "Print the time taken by each phase in ParallelOldGC " \
"PrintGCDetails must also be enabled.") \ "(PrintGCDetails must also be enabled)") \
\ \
develop(bool, TraceParallelOldGCMarkingPhase, false, \ develop(bool, TraceParallelOldGCMarkingPhase, false, \
"Trace parallel old gc marking phase") \ "Trace marking phase in ParallelOldGC") \
\ \
develop(bool, TraceParallelOldGCSummaryPhase, false, \ develop(bool, TraceParallelOldGCSummaryPhase, false, \
"Trace parallel old gc summary phase") \ "Trace summary phase in ParallelOldGC") \
\ \
develop(bool, TraceParallelOldGCCompactionPhase, false, \ develop(bool, TraceParallelOldGCCompactionPhase, false, \
"Trace parallel old gc compaction phase") \ "Trace compaction phase in ParallelOldGC") \
\ \
develop(bool, TraceParallelOldGCDensePrefix, false, \ develop(bool, TraceParallelOldGCDensePrefix, false, \
"Trace parallel old gc dense prefix computation") \ "Trace dense prefix computation for ParallelOldGC") \
\ \
develop(bool, IgnoreLibthreadGPFault, false, \ develop(bool, IgnoreLibthreadGPFault, false, \
"Suppress workaround for libthread GP fault") \ "Suppress workaround for libthread GP fault") \
\ \
product(bool, PrintJNIGCStalls, false, \ product(bool, PrintJNIGCStalls, false, \
"Print diagnostic message when GC is stalled" \ "Print diagnostic message when GC is stalled " \
"by JNI critical section") \ "by JNI critical section") \
\ \
experimental(double, ObjectCountCutOffPercent, 0.5, \ experimental(double, ObjectCountCutOffPercent, 0.5, \
"The percentage of the used heap that the instances of a class " \ "The percentage of the used heap that the instances of a class " \
"must occupy for the class to generate a trace event.") \ "must occupy for the class to generate a trace event") \
\ \
/* GC log rotation setting */ \ /* GC log rotation setting */ \
\ \
product(bool, UseGCLogFileRotation, false, \ product(bool, UseGCLogFileRotation, false, \
"Prevent large gclog file for long running app. " \ "Rotate gclog files (for long running applications). It requires "\
"Requires -Xloggc:<filename>") \ "-Xloggc:<filename>") \
\ \
product(uintx, NumberOfGCLogFiles, 0, \ product(uintx, NumberOfGCLogFiles, 0, \
"Number of gclog files in rotation, " \ "Number of gclog files in rotation " \
"Default: 0, no rotation") \ "(default: 0, no rotation)") \
\ \
product(uintx, GCLogFileSize, 0, \ product(uintx, GCLogFileSize, 0, \
"GC log file size, Default: 0 bytes, no rotation " \ "GC log file size (default: 0 bytes, no rotation). " \
"Only valid with UseGCLogFileRotation") \ "It requires UseGCLogFileRotation") \
\ \
/* JVMTI heap profiling */ \ /* JVMTI heap profiling */ \
\ \
...@@ -2464,40 +2491,40 @@ class CommandLineFlags { ...@@ -2464,40 +2491,40 @@ class CommandLineFlags {
"Generate range checks for array accesses") \ "Generate range checks for array accesses") \
\ \
develop_pd(bool, ImplicitNullChecks, \ develop_pd(bool, ImplicitNullChecks, \
"generate code for implicit null checks") \ "Generate code for implicit null checks") \
\ \
product(bool, PrintSafepointStatistics, false, \ product(bool, PrintSafepointStatistics, false, \
"print statistics about safepoint synchronization") \ "Print statistics about safepoint synchronization") \
\ \
product(intx, PrintSafepointStatisticsCount, 300, \ product(intx, PrintSafepointStatisticsCount, 300, \
"total number of safepoint statistics collected " \ "Total number of safepoint statistics collected " \
"before printing them out") \ "before printing them out") \
\ \
product(intx, PrintSafepointStatisticsTimeout, -1, \ product(intx, PrintSafepointStatisticsTimeout, -1, \
"print safepoint statistics only when safepoint takes" \ "Print safepoint statistics only when safepoint takes " \
" more than PrintSafepointSatisticsTimeout in millis") \ "more than PrintSafepointSatisticsTimeout in millis") \
\ \
product(bool, TraceSafepointCleanupTime, false, \ product(bool, TraceSafepointCleanupTime, false, \
"print the break down of clean up tasks performed during" \ "Print the break down of clean up tasks performed during " \
" safepoint") \ "safepoint") \
\ \
product(bool, Inline, true, \ product(bool, Inline, true, \
"enable inlining") \ "Enable inlining") \
\ \
product(bool, ClipInlining, true, \ product(bool, ClipInlining, true, \
"clip inlining if aggregate method exceeds DesiredMethodLimit") \ "Clip inlining if aggregate method exceeds DesiredMethodLimit") \
\ \
develop(bool, UseCHA, true, \ develop(bool, UseCHA, true, \
"enable CHA") \ "Enable CHA") \
\ \
product(bool, UseTypeProfile, true, \ product(bool, UseTypeProfile, true, \
"Check interpreter profile for historically monomorphic calls") \ "Check interpreter profile for historically monomorphic calls") \
\ \
notproduct(bool, TimeCompiler, false, \ notproduct(bool, TimeCompiler, false, \
"time the compiler") \ "Time the compiler") \
\ \
diagnostic(bool, PrintInlining, false, \ diagnostic(bool, PrintInlining, false, \
"prints inlining optimizations") \ "Print inlining optimizations") \
\ \
product(bool, UsePopCountInstruction, false, \ product(bool, UsePopCountInstruction, false, \
"Use population count instruction") \ "Use population count instruction") \
...@@ -2509,57 +2536,59 @@ class CommandLineFlags { ...@@ -2509,57 +2536,59 @@ class CommandLineFlags {
"Print when methods are replaced do to recompilation") \ "Print when methods are replaced do to recompilation") \
\ \
develop(bool, PrintMethodFlushing, false, \ develop(bool, PrintMethodFlushing, false, \
"print the nmethods being flushed") \ "Print the nmethods being flushed") \
\ \
develop(bool, UseRelocIndex, false, \ develop(bool, UseRelocIndex, false, \
"use an index to speed random access to relocations") \ "Use an index to speed random access to relocations") \
\ \
develop(bool, StressCodeBuffers, false, \ develop(bool, StressCodeBuffers, false, \
"Exercise code buffer expansion and other rare state changes") \ "Exercise code buffer expansion and other rare state changes") \
\ \
diagnostic(bool, DebugNonSafepoints, trueInDebug, \ diagnostic(bool, DebugNonSafepoints, trueInDebug, \
"Generate extra debugging info for non-safepoints in nmethods") \ "Generate extra debugging information for non-safepoints in " \
"nmethods") \
\ \
product(bool, PrintVMOptions, false, \ product(bool, PrintVMOptions, false, \
"Print flags that appeared on the command line") \ "Print flags that appeared on the command line") \
\ \
product(bool, IgnoreUnrecognizedVMOptions, false, \ product(bool, IgnoreUnrecognizedVMOptions, false, \
"Ignore unrecognized VM options") \ "Ignore unrecognized VM options") \
\ \
product(bool, PrintCommandLineFlags, false, \ product(bool, PrintCommandLineFlags, false, \
"Print flags specified on command line or set by ergonomics") \ "Print flags specified on command line or set by ergonomics") \
\ \
product(bool, PrintFlagsInitial, false, \ product(bool, PrintFlagsInitial, false, \
"Print all VM flags before argument processing and exit VM") \ "Print all VM flags before argument processing and exit VM") \
\ \
product(bool, PrintFlagsFinal, false, \ product(bool, PrintFlagsFinal, false, \
"Print all VM flags after argument and ergonomic processing") \ "Print all VM flags after argument and ergonomic processing") \
\ \
notproduct(bool, PrintFlagsWithComments, false, \ notproduct(bool, PrintFlagsWithComments, false, \
"Print all VM flags with default values and descriptions and exit")\ "Print all VM flags with default values and descriptions and " \
"exit") \
\ \
diagnostic(bool, SerializeVMOutput, true, \ diagnostic(bool, SerializeVMOutput, true, \
"Use a mutex to serialize output to tty and LogFile") \ "Use a mutex to serialize output to tty and LogFile") \
\ \
diagnostic(bool, DisplayVMOutput, true, \ diagnostic(bool, DisplayVMOutput, true, \
"Display all VM output on the tty, independently of LogVMOutput") \ "Display all VM output on the tty, independently of LogVMOutput") \
\ \
diagnostic(bool, LogVMOutput, false, \ diagnostic(bool, LogVMOutput, false, \
"Save VM output to LogFile") \ "Save VM output to LogFile") \
\ \
diagnostic(ccstr, LogFile, NULL, \ diagnostic(ccstr, LogFile, NULL, \
"If LogVMOutput or LogCompilation is on, save VM output to " \ "If LogVMOutput or LogCompilation is on, save VM output to " \
"this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \ "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\
\ \
product(ccstr, ErrorFile, NULL, \ product(ccstr, ErrorFile, NULL, \
"If an error occurs, save the error data to this file " \ "If an error occurs, save the error data to this file " \
"[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \
\ \
product(bool, DisplayVMOutputToStderr, false, \ product(bool, DisplayVMOutputToStderr, false, \
"If DisplayVMOutput is true, display all VM output to stderr") \ "If DisplayVMOutput is true, display all VM output to stderr") \
\ \
product(bool, DisplayVMOutputToStdout, false, \ product(bool, DisplayVMOutputToStdout, false, \
"If DisplayVMOutput is true, display all VM output to stdout") \ "If DisplayVMOutput is true, display all VM output to stdout") \
\ \
product(bool, UseHeavyMonitors, false, \ product(bool, UseHeavyMonitors, false, \
"use heavyweight instead of lightweight Java monitors") \ "use heavyweight instead of lightweight Java monitors") \
...@@ -2583,7 +2612,7 @@ class CommandLineFlags { ...@@ -2583,7 +2612,7 @@ class CommandLineFlags {
\ \
notproduct(ccstr, AbortVMOnExceptionMessage, NULL, \ notproduct(ccstr, AbortVMOnExceptionMessage, NULL, \
"Call fatal if the exception pointed by AbortVMOnException " \ "Call fatal if the exception pointed by AbortVMOnException " \
"has this message.") \ "has this message") \
\ \
develop(bool, DebugVtables, false, \ develop(bool, DebugVtables, false, \
"add debugging code to vtable dispatch") \ "add debugging code to vtable dispatch") \
...@@ -2650,29 +2679,29 @@ class CommandLineFlags { ...@@ -2650,29 +2679,29 @@ class CommandLineFlags {
\ \
/* statistics */ \ /* statistics */ \
develop(bool, CountCompiledCalls, false, \ develop(bool, CountCompiledCalls, false, \
"counts method invocations") \ "Count method invocations") \
\ \
notproduct(bool, CountRuntimeCalls, false, \ notproduct(bool, CountRuntimeCalls, false, \
"counts VM runtime calls") \ "Count VM runtime calls") \
\ \
develop(bool, CountJNICalls, false, \ develop(bool, CountJNICalls, false, \
"counts jni method invocations") \ "Count jni method invocations") \
\ \
notproduct(bool, CountJVMCalls, false, \ notproduct(bool, CountJVMCalls, false, \
"counts jvm method invocations") \ "Count jvm method invocations") \
\ \
notproduct(bool, CountRemovableExceptions, false, \ notproduct(bool, CountRemovableExceptions, false, \
"count exceptions that could be replaced by branches due to " \ "Count exceptions that could be replaced by branches due to " \
"inlining") \ "inlining") \
\ \
notproduct(bool, ICMissHistogram, false, \ notproduct(bool, ICMissHistogram, false, \
"produce histogram of IC misses") \ "Produce histogram of IC misses") \
\ \
notproduct(bool, PrintClassStatistics, false, \ notproduct(bool, PrintClassStatistics, false, \
"prints class statistics at end of run") \ "Print class statistics at end of run") \
\ \
notproduct(bool, PrintMethodStatistics, false, \ notproduct(bool, PrintMethodStatistics, false, \
"prints method statistics at end of run") \ "Print method statistics at end of run") \
\ \
/* interpreter */ \ /* interpreter */ \
develop(bool, ClearInterpreterLocals, false, \ develop(bool, ClearInterpreterLocals, false, \
...@@ -2686,7 +2715,7 @@ class CommandLineFlags { ...@@ -2686,7 +2715,7 @@ class CommandLineFlags {
"Rewrite frequently used bytecode pairs into a single bytecode") \ "Rewrite frequently used bytecode pairs into a single bytecode") \
\ \
diagnostic(bool, PrintInterpreter, false, \ diagnostic(bool, PrintInterpreter, false, \
"Prints the generated interpreter code") \ "Print the generated interpreter code") \
\ \
product(bool, UseInterpreter, true, \ product(bool, UseInterpreter, true, \
"Use interpreter for non-compiled methods") \ "Use interpreter for non-compiled methods") \
...@@ -2704,8 +2733,8 @@ class CommandLineFlags { ...@@ -2704,8 +2733,8 @@ class CommandLineFlags {
"Use fast method entry code for accessor methods") \ "Use fast method entry code for accessor methods") \
\ \
product_pd(bool, UseOnStackReplacement, \ product_pd(bool, UseOnStackReplacement, \
"Use on stack replacement, calls runtime if invoc. counter " \ "Use on stack replacement, calls runtime if invoc. counter " \
"overflows in loop") \ "overflows in loop") \
\ \
notproduct(bool, TraceOnStackReplacement, false, \ notproduct(bool, TraceOnStackReplacement, false, \
"Trace on stack replacement") \ "Trace on stack replacement") \
...@@ -2753,10 +2782,10 @@ class CommandLineFlags { ...@@ -2753,10 +2782,10 @@ class CommandLineFlags {
"Trace frequency based inlining") \ "Trace frequency based inlining") \
\ \
develop_pd(bool, InlineIntrinsics, \ develop_pd(bool, InlineIntrinsics, \
"Inline intrinsics that can be statically resolved") \ "Inline intrinsics that can be statically resolved") \
\ \
product_pd(bool, ProfileInterpreter, \ product_pd(bool, ProfileInterpreter, \
"Profile at the bytecode level during interpretation") \ "Profile at the bytecode level during interpretation") \
\ \
develop_pd(bool, ProfileTraps, \ develop_pd(bool, ProfileTraps, \
"Profile deoptimization traps at the bytecode level") \ "Profile deoptimization traps at the bytecode level") \
...@@ -2766,7 +2795,7 @@ class CommandLineFlags { ...@@ -2766,7 +2795,7 @@ class CommandLineFlags {
"CompileThreshold) before using the method's profile") \ "CompileThreshold) before using the method's profile") \
\ \
develop(bool, PrintMethodData, false, \ develop(bool, PrintMethodData, false, \
"Print the results of +ProfileInterpreter at end of run") \ "Print the results of +ProfileInterpreter at end of run") \
\ \
develop(bool, VerifyDataPointer, trueInDebug, \ develop(bool, VerifyDataPointer, trueInDebug, \
"Verify the method data pointer during interpreter profiling") \ "Verify the method data pointer during interpreter profiling") \
...@@ -2781,7 +2810,7 @@ class CommandLineFlags { ...@@ -2781,7 +2810,7 @@ class CommandLineFlags {
\ \
/* compilation */ \ /* compilation */ \
product(bool, UseCompiler, true, \ product(bool, UseCompiler, true, \
"use compilation") \ "Use Just-In-Time compilation") \
\ \
develop(bool, TraceCompilationPolicy, false, \ develop(bool, TraceCompilationPolicy, false, \
"Trace compilation policy") \ "Trace compilation policy") \
...@@ -2790,20 +2819,21 @@ class CommandLineFlags { ...@@ -2790,20 +2819,21 @@ class CommandLineFlags {
"Time the compilation policy") \ "Time the compilation policy") \
\ \
product(bool, UseCounterDecay, true, \ product(bool, UseCounterDecay, true, \
"adjust recompilation counters") \ "Adjust recompilation counters") \
\ \
develop(intx, CounterHalfLifeTime, 30, \ develop(intx, CounterHalfLifeTime, 30, \
"half-life time of invocation counters (in secs)") \ "Half-life time of invocation counters (in seconds)") \
\ \
develop(intx, CounterDecayMinIntervalLength, 500, \ develop(intx, CounterDecayMinIntervalLength, 500, \
"Min. ms. between invocation of CounterDecay") \ "The minimum interval (in milliseconds) between invocation of " \
"CounterDecay") \
\ \
product(bool, AlwaysCompileLoopMethods, false, \ product(bool, AlwaysCompileLoopMethods, false, \
"when using recompilation, never interpret methods " \ "When using recompilation, never interpret methods " \
"containing loops") \ "containing loops") \
\ \
product(bool, DontCompileHugeMethods, true, \ product(bool, DontCompileHugeMethods, true, \
"don't compile methods > HugeMethodLimit") \ "Do not compile methods > HugeMethodLimit") \
\ \
/* Bytecode escape analysis estimation. */ \ /* Bytecode escape analysis estimation. */ \
product(bool, EstimateArgEscape, true, \ product(bool, EstimateArgEscape, true, \
...@@ -2813,10 +2843,10 @@ class CommandLineFlags { ...@@ -2813,10 +2843,10 @@ class CommandLineFlags {
"How much tracing to do of bytecode escape analysis estimates") \ "How much tracing to do of bytecode escape analysis estimates") \
\ \
product(intx, MaxBCEAEstimateLevel, 5, \ product(intx, MaxBCEAEstimateLevel, 5, \
"Maximum number of nested calls that are analyzed by BC EA.") \ "Maximum number of nested calls that are analyzed by BC EA") \
\ \
product(intx, MaxBCEAEstimateSize, 150, \ product(intx, MaxBCEAEstimateSize, 150, \
"Maximum bytecode size of a method to be analyzed by BC EA.") \ "Maximum bytecode size of a method to be analyzed by BC EA") \
\ \
product(intx, AllocatePrefetchStyle, 1, \ product(intx, AllocatePrefetchStyle, 1, \
"0 = no prefetch, " \ "0 = no prefetch, " \
...@@ -2831,7 +2861,8 @@ class CommandLineFlags { ...@@ -2831,7 +2861,8 @@ class CommandLineFlags {
"Number of lines to prefetch ahead of array allocation pointer") \ "Number of lines to prefetch ahead of array allocation pointer") \
\ \
product(intx, AllocateInstancePrefetchLines, 1, \ product(intx, AllocateInstancePrefetchLines, 1, \
"Number of lines to prefetch ahead of instance allocation pointer") \ "Number of lines to prefetch ahead of instance allocation " \
"pointer") \
\ \
product(intx, AllocatePrefetchStepSize, 16, \ product(intx, AllocatePrefetchStepSize, 16, \
"Step size in bytes of sequential prefetch instructions") \ "Step size in bytes of sequential prefetch instructions") \
...@@ -2851,8 +2882,8 @@ class CommandLineFlags { ...@@ -2851,8 +2882,8 @@ class CommandLineFlags {
"(0 means off)") \ "(0 means off)") \
\ \
product(intx, MaxJavaStackTraceDepth, 1024, \ product(intx, MaxJavaStackTraceDepth, 1024, \
"Max. no. of lines in the stack trace for Java exceptions " \ "The maximum number of lines in the stack trace for Java " \
"(0 means all)") \ "exceptions (0 means all)") \
\ \
NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \ NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000, \
"Guarantee a safepoint (at least) every so many milliseconds " \ "Guarantee a safepoint (at least) every so many milliseconds " \
...@@ -2876,10 +2907,10 @@ class CommandLineFlags { ...@@ -2876,10 +2907,10 @@ class CommandLineFlags {
"result in more aggressive sweeping") \ "result in more aggressive sweeping") \
\ \
notproduct(bool, LogSweeper, false, \ notproduct(bool, LogSweeper, false, \
"Keep a ring buffer of sweeper activity") \ "Keep a ring buffer of sweeper activity") \
\ \
notproduct(intx, SweeperLogEntries, 1024, \ notproduct(intx, SweeperLogEntries, 1024, \
"Number of records in the ring buffer of sweeper activity") \ "Number of records in the ring buffer of sweeper activity") \
\ \
notproduct(intx, MemProfilingInterval, 500, \ notproduct(intx, MemProfilingInterval, 500, \
"Time between each invocation of the MemProfiler") \ "Time between each invocation of the MemProfiler") \
...@@ -2922,34 +2953,35 @@ class CommandLineFlags { ...@@ -2922,34 +2953,35 @@ class CommandLineFlags {
"less than this") \ "less than this") \
\ \
product(intx, MaxInlineSize, 35, \ product(intx, MaxInlineSize, 35, \
"maximum bytecode size of a method to be inlined") \ "The maximum bytecode size of a method to be inlined") \
\ \
product_pd(intx, FreqInlineSize, \ product_pd(intx, FreqInlineSize, \
"maximum bytecode size of a frequent method to be inlined") \ "The maximum bytecode size of a frequent method to be inlined") \
\ \
product(intx, MaxTrivialSize, 6, \ product(intx, MaxTrivialSize, 6, \
"maximum bytecode size of a trivial method to be inlined") \ "The maximum bytecode size of a trivial method to be inlined") \
\ \
product(intx, MinInliningThreshold, 250, \ product(intx, MinInliningThreshold, 250, \
"min. invocation count a method needs to have to be inlined") \ "The minimum invocation count a method needs to have to be " \
"inlined") \
\ \
develop(intx, MethodHistogramCutoff, 100, \ develop(intx, MethodHistogramCutoff, 100, \
"cutoff value for method invoc. histogram (+CountCalls)") \ "The cutoff value for method invocation histogram (+CountCalls)") \
\ \
develop(intx, ProfilerNumberOfInterpretedMethods, 25, \ develop(intx, ProfilerNumberOfInterpretedMethods, 25, \
"# of interpreted methods to show in profile") \ "Number of interpreted methods to show in profile") \
\ \
develop(intx, ProfilerNumberOfCompiledMethods, 25, \ develop(intx, ProfilerNumberOfCompiledMethods, 25, \
"# of compiled methods to show in profile") \ "Number of compiled methods to show in profile") \
\ \
develop(intx, ProfilerNumberOfStubMethods, 25, \ develop(intx, ProfilerNumberOfStubMethods, 25, \
"# of stub methods to show in profile") \ "Number of stub methods to show in profile") \
\ \
develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \ develop(intx, ProfilerNumberOfRuntimeStubNodes, 25, \
"# of runtime stub nodes to show in profile") \ "Number of runtime stub nodes to show in profile") \
\ \
product(intx, ProfileIntervalsTicks, 100, \ product(intx, ProfileIntervalsTicks, 100, \
"# of ticks between printing of interval profile " \ "Number of ticks between printing of interval profile " \
"(+ProfileIntervals)") \ "(+ProfileIntervals)") \
\ \
notproduct(intx, ScavengeALotInterval, 1, \ notproduct(intx, ScavengeALotInterval, 1, \
...@@ -2970,7 +3002,7 @@ class CommandLineFlags { ...@@ -2970,7 +3002,7 @@ class CommandLineFlags {
\ \
develop(intx, MinSleepInterval, 1, \ develop(intx, MinSleepInterval, 1, \
"Minimum sleep() interval (milliseconds) when " \ "Minimum sleep() interval (milliseconds) when " \
"ConvertSleepToYield is off (used for SOLARIS)") \ "ConvertSleepToYield is off (used for Solaris)") \
\ \
develop(intx, ProfilerPCTickThreshold, 15, \ develop(intx, ProfilerPCTickThreshold, 15, \
"Number of ticks in a PC buckets to be a hotspot") \ "Number of ticks in a PC buckets to be a hotspot") \
...@@ -2985,22 +3017,22 @@ class CommandLineFlags { ...@@ -2985,22 +3017,22 @@ class CommandLineFlags {
"Mark nmethods non-entrant at registration") \ "Mark nmethods non-entrant at registration") \
\ \
diagnostic(intx, MallocVerifyInterval, 0, \ diagnostic(intx, MallocVerifyInterval, 0, \
"if non-zero, verify C heap after every N calls to " \ "If non-zero, verify C heap after every N calls to " \
"malloc/realloc/free") \ "malloc/realloc/free") \
\ \
diagnostic(intx, MallocVerifyStart, 0, \ diagnostic(intx, MallocVerifyStart, 0, \
"if non-zero, start verifying C heap after Nth call to " \ "If non-zero, start verifying C heap after Nth call to " \
"malloc/realloc/free") \ "malloc/realloc/free") \
\ \
diagnostic(uintx, MallocMaxTestWords, 0, \ diagnostic(uintx, MallocMaxTestWords, 0, \
"if non-zero, max # of Words that malloc/realloc can allocate " \ "If non-zero, maximum number of words that malloc/realloc can " \
"(for testing only)") \ "allocate (for testing only)") \
\ \
product(intx, TypeProfileWidth, 2, \ product(intx, TypeProfileWidth, 2, \
"number of receiver types to record in call/cast profile") \ "Number of receiver types to record in call/cast profile") \
\ \
develop(intx, BciProfileWidth, 2, \ develop(intx, BciProfileWidth, 2, \
"number of return bci's to record in ret profile") \ "Number of return bci's to record in ret profile") \
\ \
product(intx, PerMethodRecompilationCutoff, 400, \ product(intx, PerMethodRecompilationCutoff, 400, \
"After recompiling N times, stay in the interpreter (-1=>'Inf')") \ "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
...@@ -3067,7 +3099,7 @@ class CommandLineFlags { ...@@ -3067,7 +3099,7 @@ class CommandLineFlags {
"Percentage of Eden that can be wasted") \ "Percentage of Eden that can be wasted") \
\ \
product(uintx, TLABRefillWasteFraction, 64, \ product(uintx, TLABRefillWasteFraction, 64, \
"Max TLAB waste at a refill (internal fragmentation)") \ "Maximum TLAB waste at a refill (internal fragmentation)") \
\ \
product(uintx, TLABWasteIncrement, 4, \ product(uintx, TLABWasteIncrement, 4, \
"Increment allowed waste at slow allocation") \ "Increment allowed waste at slow allocation") \
...@@ -3076,7 +3108,7 @@ class CommandLineFlags { ...@@ -3076,7 +3108,7 @@ class CommandLineFlags {
"Ratio of eden/survivor space size") \ "Ratio of eden/survivor space size") \
\ \
product(uintx, NewRatio, 2, \ product(uintx, NewRatio, 2, \
"Ratio of new/old generation sizes") \ "Ratio of old/new generation sizes") \
\ \
product_pd(uintx, NewSizeThreadIncrease, \ product_pd(uintx, NewSizeThreadIncrease, \
"Additional size added to desired new generation size per " \ "Additional size added to desired new generation size per " \
...@@ -3093,28 +3125,30 @@ class CommandLineFlags { ...@@ -3093,28 +3125,30 @@ class CommandLineFlags {
"class pointers are used") \ "class pointers are used") \
\ \
product(uintx, MinHeapFreeRatio, 40, \ product(uintx, MinHeapFreeRatio, 40, \
"Min percentage of heap free after GC to avoid expansion") \ "The minimum percentage of heap free after GC to avoid expansion")\
\ \
product(uintx, MaxHeapFreeRatio, 70, \ product(uintx, MaxHeapFreeRatio, 70, \
"Max percentage of heap free after GC to avoid shrinking") \ "The maximum percentage of heap free after GC to avoid shrinking")\
\ \
product(intx, SoftRefLRUPolicyMSPerMB, 1000, \ product(intx, SoftRefLRUPolicyMSPerMB, 1000, \
"Number of milliseconds per MB of free space in the heap") \ "Number of milliseconds per MB of free space in the heap") \
\ \
product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \ product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
"Min change in heap space due to GC (in bytes)") \ "The minimum change in heap space due to GC (in bytes)") \
\ \
product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \ product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
"Min expansion of Metaspace (in bytes)") \ "The minimum expansion of Metaspace (in bytes)") \
\ \
product(uintx, MinMetaspaceFreeRatio, 40, \ product(uintx, MinMetaspaceFreeRatio, 40, \
"Min percentage of Metaspace free after GC to avoid expansion") \ "The minimum percentage of Metaspace free after GC to avoid " \
"expansion") \
\ \
product(uintx, MaxMetaspaceFreeRatio, 70, \ product(uintx, MaxMetaspaceFreeRatio, 70, \
"Max percentage of Metaspace free after GC to avoid shrinking") \ "The maximum percentage of Metaspace free after GC to avoid " \
"shrinking") \
\ \
product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \ product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
"Max expansion of Metaspace without full GC (in bytes)") \ "The maximum expansion of Metaspace without full GC (in bytes)") \
\ \
product(uintx, QueuedAllocationWarningCount, 0, \ product(uintx, QueuedAllocationWarningCount, 0, \
"Number of times an allocation that queues behind a GC " \ "Number of times an allocation that queues behind a GC " \
...@@ -3136,13 +3170,14 @@ class CommandLineFlags { ...@@ -3136,13 +3170,14 @@ class CommandLineFlags {
"Desired percentage of survivor space used after scavenge") \ "Desired percentage of survivor space used after scavenge") \
\ \
product(uintx, MarkSweepDeadRatio, 5, \ product(uintx, MarkSweepDeadRatio, 5, \
"Percentage (0-100) of the old gen allowed as dead wood." \ "Percentage (0-100) of the old gen allowed as dead wood. " \
"Serial mark sweep treats this as both the min and max value." \ "Serial mark sweep treats this as both the minimum and maximum " \
"CMS uses this value only if it falls back to mark sweep." \ "value. " \
"Par compact uses a variable scale based on the density of the" \ "CMS uses this value only if it falls back to mark sweep. " \
"generation and treats this as the max value when the heap is" \ "Par compact uses a variable scale based on the density of the " \
"either completely full or completely empty. Par compact also" \ "generation and treats this as the maximum value when the heap " \
"has a smaller default value; see arguments.cpp.") \ "is either completely full or completely empty. Par compact " \
"also has a smaller default value; see arguments.cpp.") \
\ \
product(uintx, MarkSweepAlwaysCompactCount, 4, \ product(uintx, MarkSweepAlwaysCompactCount, 4, \
"How often should we fully compact the heap (ignoring the dead " \ "How often should we fully compact the heap (ignoring the dead " \
...@@ -3161,27 +3196,27 @@ class CommandLineFlags { ...@@ -3161,27 +3196,27 @@ class CommandLineFlags {
"Census for CMS' FreeListSpace") \ "Census for CMS' FreeListSpace") \
\ \
develop(uintx, GCExpandToAllocateDelayMillis, 0, \ develop(uintx, GCExpandToAllocateDelayMillis, 0, \
"Delay in ms between expansion and allocation") \ "Delay between expansion and allocation (in milliseconds)") \
\ \
develop(uintx, GCWorkerDelayMillis, 0, \ develop(uintx, GCWorkerDelayMillis, 0, \
"Delay in ms in scheduling GC workers") \ "Delay in scheduling GC workers (in milliseconds)") \
\ \
product(intx, DeferThrSuspendLoopCount, 4000, \ product(intx, DeferThrSuspendLoopCount, 4000, \
"(Unstable) Number of times to iterate in safepoint loop " \ "(Unstable) Number of times to iterate in safepoint loop " \
" before blocking VM threads ") \ "before blocking VM threads ") \
\ \
product(intx, DeferPollingPageLoopCount, -1, \ product(intx, DeferPollingPageLoopCount, -1, \
"(Unsafe,Unstable) Number of iterations in safepoint loop " \ "(Unsafe,Unstable) Number of iterations in safepoint loop " \
"before changing safepoint polling page to RO ") \ "before changing safepoint polling page to RO ") \
\ \
product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \ product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)") \
\ \
product(bool, PSChunkLargeArrays, true, \ product(bool, PSChunkLargeArrays, true, \
"true: process large arrays in chunks") \ "Process large arrays in chunks") \
\ \
product(uintx, GCDrainStackTargetSize, 64, \ product(uintx, GCDrainStackTargetSize, 64, \
"how many entries we'll try to leave on the stack during " \ "Number of entries we will try to leave on the stack " \
"parallel GC") \ "during parallel gc") \
\ \
/* stack parameters */ \ /* stack parameters */ \
product_pd(intx, StackYellowPages, \ product_pd(intx, StackYellowPages, \
...@@ -3191,8 +3226,8 @@ class CommandLineFlags { ...@@ -3191,8 +3226,8 @@ class CommandLineFlags {
"Number of red zone (unrecoverable overflows) pages") \ "Number of red zone (unrecoverable overflows) pages") \
\ \
product_pd(intx, StackShadowPages, \ product_pd(intx, StackShadowPages, \
"Number of shadow zone (for overflow checking) pages" \ "Number of shadow zone (for overflow checking) pages " \
" this should exceed the depth of the VM and native call stack") \ "this should exceed the depth of the VM and native call stack") \
\ \
product_pd(intx, ThreadStackSize, \ product_pd(intx, ThreadStackSize, \
"Thread Stack Size (in Kbytes)") \ "Thread Stack Size (in Kbytes)") \
...@@ -3232,16 +3267,16 @@ class CommandLineFlags { ...@@ -3232,16 +3267,16 @@ class CommandLineFlags {
"Reserved code cache size (in bytes) - maximum code cache size") \ "Reserved code cache size (in bytes) - maximum code cache size") \
\ \
product(uintx, CodeCacheMinimumFreeSpace, 500*K, \ product(uintx, CodeCacheMinimumFreeSpace, 500*K, \
"When less than X space left, we stop compiling.") \ "When less than X space left, we stop compiling") \
\ \
product_pd(uintx, CodeCacheExpansionSize, \ product_pd(uintx, CodeCacheExpansionSize, \
"Code cache expansion size (in bytes)") \ "Code cache expansion size (in bytes)") \
\ \
develop_pd(uintx, CodeCacheMinBlockLength, \ develop_pd(uintx, CodeCacheMinBlockLength, \
"Minimum number of segments in a code cache block.") \ "Minimum number of segments in a code cache block") \
\ \
notproduct(bool, ExitOnFullCodeCache, false, \ notproduct(bool, ExitOnFullCodeCache, false, \
"Exit the VM if we fill the code cache.") \ "Exit the VM if we fill the code cache") \
\ \
product(bool, UseCodeCacheFlushing, true, \ product(bool, UseCodeCacheFlushing, true, \
"Attempt to clean the code cache before shutting off compiler") \ "Attempt to clean the code cache before shutting off compiler") \
...@@ -3252,31 +3287,31 @@ class CommandLineFlags { ...@@ -3252,31 +3287,31 @@ class CommandLineFlags {
"switch") \ "switch") \
\ \
develop(intx, StopInterpreterAt, 0, \ develop(intx, StopInterpreterAt, 0, \
"Stops interpreter execution at specified bytecode number") \ "Stop interpreter execution at specified bytecode number") \
\ \
develop(intx, TraceBytecodesAt, 0, \ develop(intx, TraceBytecodesAt, 0, \
"Traces bytecodes starting with specified bytecode number") \ "Trace bytecodes starting with specified bytecode number") \
\ \
/* compiler interface */ \ /* compiler interface */ \
develop(intx, CIStart, 0, \ develop(intx, CIStart, 0, \
"the id of the first compilation to permit") \ "The id of the first compilation to permit") \
\ \
develop(intx, CIStop, -1, \ develop(intx, CIStop, -1, \
"the id of the last compilation to permit") \ "The id of the last compilation to permit") \
\ \
develop(intx, CIStartOSR, 0, \ develop(intx, CIStartOSR, 0, \
"the id of the first osr compilation to permit " \ "The id of the first osr compilation to permit " \
"(CICountOSR must be on)") \ "(CICountOSR must be on)") \
\ \
develop(intx, CIStopOSR, -1, \ develop(intx, CIStopOSR, -1, \
"the id of the last osr compilation to permit " \ "The id of the last osr compilation to permit " \
"(CICountOSR must be on)") \ "(CICountOSR must be on)") \
\ \
develop(intx, CIBreakAtOSR, -1, \ develop(intx, CIBreakAtOSR, -1, \
"id of osr compilation to break at") \ "The id of osr compilation to break at") \
\ \
develop(intx, CIBreakAt, -1, \ develop(intx, CIBreakAt, -1, \
"id of compilation to break at") \ "The id of compilation to break at") \
\ \
product(ccstrlist, CompileOnly, "", \ product(ccstrlist, CompileOnly, "", \
"List of methods (pkg/class.name) to restrict compilation to") \ "List of methods (pkg/class.name) to restrict compilation to") \
...@@ -3295,11 +3330,11 @@ class CommandLineFlags { ...@@ -3295,11 +3330,11 @@ class CommandLineFlags {
"[default: ./replay_pid%p.log] (%p replaced with pid)") \ "[default: ./replay_pid%p.log] (%p replaced with pid)") \
\ \
develop(intx, ReplaySuppressInitializers, 2, \ develop(intx, ReplaySuppressInitializers, 2, \
"Controls handling of class initialization during replay" \ "Control handling of class initialization during replay: " \
"0 - don't do anything special" \ "0 - don't do anything special; " \
"1 - treat all class initializers as empty" \ "1 - treat all class initializers as empty; " \
"2 - treat class initializers for application classes as empty" \ "2 - treat class initializers for application classes as empty; " \
"3 - allow all class initializers to run during bootstrap but" \ "3 - allow all class initializers to run during bootstrap but " \
" pretend they are empty after starting replay") \ " pretend they are empty after starting replay") \
\ \
develop(bool, ReplayIgnoreInitErrors, false, \ develop(bool, ReplayIgnoreInitErrors, false, \
...@@ -3328,14 +3363,15 @@ class CommandLineFlags { ...@@ -3328,14 +3363,15 @@ class CommandLineFlags {
"0 : Normal. "\ "0 : Normal. "\
" VM chooses priorities that are appropriate for normal "\ " VM chooses priorities that are appropriate for normal "\
" applications. On Solaris NORM_PRIORITY and above are mapped "\ " applications. On Solaris NORM_PRIORITY and above are mapped "\
" to normal native priority. Java priorities below NORM_PRIORITY"\ " to normal native priority. Java priorities below " \
" map to lower native priority values. On Windows applications"\ " NORM_PRIORITY map to lower native priority values. On "\
" are allowed to use higher native priorities. However, with "\ " Windows applications are allowed to use higher native "\
" ThreadPriorityPolicy=0, VM will not use the highest possible"\ " priorities. However, with ThreadPriorityPolicy=0, VM will "\
" native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may "\ " not use the highest possible native priority, "\
" interfere with system threads. On Linux thread priorities "\ " THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with "\
" are ignored because the OS does not support static priority "\ " system threads. On Linux thread priorities are ignored "\
" in SCHED_OTHER scheduling class which is the only choice for"\ " because the OS does not support static priority in "\
" SCHED_OTHER scheduling class which is the only choice for "\
" non-root, non-realtime applications. "\ " non-root, non-realtime applications. "\
"1 : Aggressive. "\ "1 : Aggressive. "\
" Java thread priorities map over to the entire range of "\ " Java thread priorities map over to the entire range of "\
...@@ -3366,16 +3402,35 @@ class CommandLineFlags { ...@@ -3366,16 +3402,35 @@ class CommandLineFlags {
product(bool, VMThreadHintNoPreempt, false, \ product(bool, VMThreadHintNoPreempt, false, \
"(Solaris only) Give VM thread an extra quanta") \ "(Solaris only) Give VM thread an extra quanta") \
\ \
product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \ product(intx, JavaPriority1_To_OSPriority, -1, \
product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \ "Map Java priorities to OS priorities") \
product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \ \
product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \ product(intx, JavaPriority2_To_OSPriority, -1, \
product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \ "Map Java priorities to OS priorities") \
product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \ \
product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \ product(intx, JavaPriority3_To_OSPriority, -1, \
product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \ "Map Java priorities to OS priorities") \
product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \ \
product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \ product(intx, JavaPriority4_To_OSPriority, -1, \
"Map Java priorities to OS priorities") \
\
product(intx, JavaPriority5_To_OSPriority, -1, \
"Map Java priorities to OS priorities") \
\
product(intx, JavaPriority6_To_OSPriority, -1, \
"Map Java priorities to OS priorities") \
\
product(intx, JavaPriority7_To_OSPriority, -1, \
"Map Java priorities to OS priorities") \
\
product(intx, JavaPriority8_To_OSPriority, -1, \
"Map Java priorities to OS priorities") \
\
product(intx, JavaPriority9_To_OSPriority, -1, \
"Map Java priorities to OS priorities") \
\
product(intx, JavaPriority10_To_OSPriority,-1, \
"Map Java priorities to OS priorities") \
\ \
experimental(bool, UseCriticalJavaThreadPriority, false, \ experimental(bool, UseCriticalJavaThreadPriority, false, \
"Java thread priority 10 maps to critical scheduling priority") \ "Java thread priority 10 maps to critical scheduling priority") \
...@@ -3406,37 +3461,38 @@ class CommandLineFlags { ...@@ -3406,37 +3461,38 @@ class CommandLineFlags {
"Used with +TraceLongCompiles") \ "Used with +TraceLongCompiles") \
\ \
product(intx, StarvationMonitorInterval, 200, \ product(intx, StarvationMonitorInterval, 200, \
"Pause between each check in ms") \ "Pause between each check (in milliseconds)") \
\ \
/* recompilation */ \ /* recompilation */ \
product_pd(intx, CompileThreshold, \ product_pd(intx, CompileThreshold, \
"number of interpreted method invocations before (re-)compiling") \ "number of interpreted method invocations before (re-)compiling") \
\ \
product_pd(intx, BackEdgeThreshold, \ product_pd(intx, BackEdgeThreshold, \
"Interpreter Back edge threshold at which an OSR compilation is invoked")\ "Interpreter Back edge threshold at which an OSR compilation is " \
"invoked") \
\ \
product(intx, Tier0InvokeNotifyFreqLog, 7, \ product(intx, Tier0InvokeNotifyFreqLog, 7, \
"Interpreter (tier 0) invocation notification frequency.") \ "Interpreter (tier 0) invocation notification frequency") \
\ \
product(intx, Tier2InvokeNotifyFreqLog, 11, \ product(intx, Tier2InvokeNotifyFreqLog, 11, \
"C1 without MDO (tier 2) invocation notification frequency.") \ "C1 without MDO (tier 2) invocation notification frequency") \
\ \
product(intx, Tier3InvokeNotifyFreqLog, 10, \ product(intx, Tier3InvokeNotifyFreqLog, 10, \
"C1 with MDO profiling (tier 3) invocation notification " \ "C1 with MDO profiling (tier 3) invocation notification " \
"frequency.") \ "frequency") \
\ \
product(intx, Tier23InlineeNotifyFreqLog, 20, \ product(intx, Tier23InlineeNotifyFreqLog, 20, \
"Inlinee invocation (tiers 2 and 3) notification frequency") \ "Inlinee invocation (tiers 2 and 3) notification frequency") \
\ \
product(intx, Tier0BackedgeNotifyFreqLog, 10, \ product(intx, Tier0BackedgeNotifyFreqLog, 10, \
"Interpreter (tier 0) invocation notification frequency.") \ "Interpreter (tier 0) invocation notification frequency") \
\ \
product(intx, Tier2BackedgeNotifyFreqLog, 14, \ product(intx, Tier2BackedgeNotifyFreqLog, 14, \
"C1 without MDO (tier 2) invocation notification frequency.") \ "C1 without MDO (tier 2) invocation notification frequency") \
\ \
product(intx, Tier3BackedgeNotifyFreqLog, 13, \ product(intx, Tier3BackedgeNotifyFreqLog, 13, \
"C1 with MDO profiling (tier 3) invocation notification " \ "C1 with MDO profiling (tier 3) invocation notification " \
"frequency.") \ "frequency") \
\ \
product(intx, Tier2CompileThreshold, 0, \ product(intx, Tier2CompileThreshold, 0, \
"threshold at which tier 2 compilation is invoked") \ "threshold at which tier 2 compilation is invoked") \
...@@ -3453,7 +3509,7 @@ class CommandLineFlags { ...@@ -3453,7 +3509,7 @@ class CommandLineFlags {
\ \
product(intx, Tier3CompileThreshold, 2000, \ product(intx, Tier3CompileThreshold, 2000, \
"Threshold at which tier 3 compilation is invoked (invocation " \ "Threshold at which tier 3 compilation is invoked (invocation " \
"minimum must be satisfied.") \ "minimum must be satisfied") \
\ \
product(intx, Tier3BackEdgeThreshold, 60000, \ product(intx, Tier3BackEdgeThreshold, 60000, \
"Back edge threshold at which tier 3 OSR compilation is invoked") \ "Back edge threshold at which tier 3 OSR compilation is invoked") \
...@@ -3467,7 +3523,7 @@ class CommandLineFlags { ...@@ -3467,7 +3523,7 @@ class CommandLineFlags {
\ \
product(intx, Tier4CompileThreshold, 15000, \ product(intx, Tier4CompileThreshold, 15000, \
"Threshold at which tier 4 compilation is invoked (invocation " \ "Threshold at which tier 4 compilation is invoked (invocation " \
"minimum must be satisfied.") \ "minimum must be satisfied") \
\ \
product(intx, Tier4BackEdgeThreshold, 40000, \ product(intx, Tier4BackEdgeThreshold, 40000, \
"Back edge threshold at which tier 4 OSR compilation is invoked") \ "Back edge threshold at which tier 4 OSR compilation is invoked") \
...@@ -3496,12 +3552,12 @@ class CommandLineFlags { ...@@ -3496,12 +3552,12 @@ class CommandLineFlags {
"Stop at given compilation level") \ "Stop at given compilation level") \
\ \
product(intx, Tier0ProfilingStartPercentage, 200, \ product(intx, Tier0ProfilingStartPercentage, 200, \
"Start profiling in interpreter if the counters exceed tier 3" \ "Start profiling in interpreter if the counters exceed tier 3 " \
"thresholds by the specified percentage") \ "thresholds by the specified percentage") \
\ \
product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \ product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \
"Increase the compile threshold for C1 compilation if the code" \ "Increase the compile threshold for C1 compilation if the code " \
"cache is filled by the specified percentage.") \ "cache is filled by the specified percentage") \
\ \
product(intx, TieredRateUpdateMinTime, 1, \ product(intx, TieredRateUpdateMinTime, 1, \
"Minimum rate sampling interval (in milliseconds)") \ "Minimum rate sampling interval (in milliseconds)") \
...@@ -3516,24 +3572,26 @@ class CommandLineFlags { ...@@ -3516,24 +3572,26 @@ class CommandLineFlags {
"Print tiered events notifications") \ "Print tiered events notifications") \
\ \
product_pd(intx, OnStackReplacePercentage, \ product_pd(intx, OnStackReplacePercentage, \
"NON_TIERED number of method invocations/branches (expressed as %"\ "NON_TIERED number of method invocations/branches (expressed as " \
"of CompileThreshold) before (re-)compiling OSR code") \ "% of CompileThreshold) before (re-)compiling OSR code") \
\ \
product(intx, InterpreterProfilePercentage, 33, \ product(intx, InterpreterProfilePercentage, 33, \
"NON_TIERED number of method invocations/branches (expressed as %"\ "NON_TIERED number of method invocations/branches (expressed as " \
"of CompileThreshold) before profiling in the interpreter") \ "% of CompileThreshold) before profiling in the interpreter") \
\ \
develop(intx, MaxRecompilationSearchLength, 10, \ develop(intx, MaxRecompilationSearchLength, 10, \
"max. # frames to inspect searching for recompilee") \ "The maximum number of frames to inspect when searching for " \
"recompilee") \
\ \
develop(intx, MaxInterpretedSearchLength, 3, \ develop(intx, MaxInterpretedSearchLength, 3, \
"max. # interp. frames to skip when searching for recompilee") \ "The maximum number of interpreted frames to skip when searching "\
"for recompilee") \
\ \
develop(intx, DesiredMethodLimit, 8000, \ develop(intx, DesiredMethodLimit, 8000, \
"desired max. method size (in bytecodes) after inlining") \ "The desired maximum method size (in bytecodes) after inlining") \
\ \
develop(intx, HugeMethodLimit, 8000, \ develop(intx, HugeMethodLimit, 8000, \
"don't compile methods larger than this if " \ "Don't compile methods larger than this if " \
"+DontCompileHugeMethods") \ "+DontCompileHugeMethods") \
\ \
/* New JDK 1.4 reflection implementation */ \ /* New JDK 1.4 reflection implementation */ \
...@@ -3555,7 +3613,7 @@ class CommandLineFlags { ...@@ -3555,7 +3613,7 @@ class CommandLineFlags {
"in InvocationTargetException. See 6531596") \ "in InvocationTargetException. See 6531596") \
\ \
develop(bool, VerifyLambdaBytecodes, false, \ develop(bool, VerifyLambdaBytecodes, false, \
"Force verification of jdk 8 lambda metafactory bytecodes.") \ "Force verification of jdk 8 lambda metafactory bytecodes") \
\ \
develop(intx, FastSuperclassLimit, 8, \ develop(intx, FastSuperclassLimit, 8, \
"Depth of hardwired instanceof accelerator array") \ "Depth of hardwired instanceof accelerator array") \
...@@ -3579,18 +3637,19 @@ class CommandLineFlags { ...@@ -3579,18 +3637,19 @@ class CommandLineFlags {
/* flags for performance data collection */ \ /* flags for performance data collection */ \
\ \
product(bool, UsePerfData, falseInEmbedded, \ product(bool, UsePerfData, falseInEmbedded, \
"Flag to disable jvmstat instrumentation for performance testing" \ "Flag to disable jvmstat instrumentation for performance testing "\
"and problem isolation purposes.") \ "and problem isolation purposes") \
\ \
product(bool, PerfDataSaveToFile, false, \ product(bool, PerfDataSaveToFile, false, \
"Save PerfData memory to hsperfdata_<pid> file on exit") \ "Save PerfData memory to hsperfdata_<pid> file on exit") \
\ \
product(ccstr, PerfDataSaveFile, NULL, \ product(ccstr, PerfDataSaveFile, NULL, \
"Save PerfData memory to the specified absolute pathname," \ "Save PerfData memory to the specified absolute pathname. " \
"%p in the file name if present will be replaced by pid") \ "The string %p in the file name (if present) " \
"will be replaced by pid") \
\ \
product(intx, PerfDataSamplingInterval, 50 /*ms*/, \ product(intx, PerfDataSamplingInterval, 50, \
"Data sampling interval in milliseconds") \ "Data sampling interval (in milliseconds)") \
\ \
develop(bool, PerfTraceDataCreation, false, \ develop(bool, PerfTraceDataCreation, false, \
"Trace creation of Performance Data Entries") \ "Trace creation of Performance Data Entries") \
...@@ -3615,7 +3674,7 @@ class CommandLineFlags { ...@@ -3615,7 +3674,7 @@ class CommandLineFlags {
"Bypass Win32 file system criteria checks (Windows Only)") \ "Bypass Win32 file system criteria checks (Windows Only)") \
\ \
product(intx, UnguardOnExecutionViolation, 0, \ product(intx, UnguardOnExecutionViolation, 0, \
"Unguard page and retry on no-execute fault (Win32 only)" \ "Unguard page and retry on no-execute fault (Win32 only) " \
"0=off, 1=conservative, 2=aggressive") \ "0=off, 1=conservative, 2=aggressive") \
\ \
/* Serviceability Support */ \ /* Serviceability Support */ \
...@@ -3624,7 +3683,7 @@ class CommandLineFlags { ...@@ -3624,7 +3683,7 @@ class CommandLineFlags {
"Create JMX Management Server") \ "Create JMX Management Server") \
\ \
product(bool, DisableAttachMechanism, false, \ product(bool, DisableAttachMechanism, false, \
"Disable mechanism that allows tools to attach to this VM") \ "Disable mechanism that allows tools to attach to this VM") \
\ \
product(bool, StartAttachListener, false, \ product(bool, StartAttachListener, false, \
"Always start Attach Listener at VM startup") \ "Always start Attach Listener at VM startup") \
...@@ -3647,9 +3706,9 @@ class CommandLineFlags { ...@@ -3647,9 +3706,9 @@ class CommandLineFlags {
"Require shared spaces for metadata") \ "Require shared spaces for metadata") \
\ \
product(bool, DumpSharedSpaces, false, \ product(bool, DumpSharedSpaces, false, \
"Special mode: JVM reads a class list, loads classes, builds " \ "Special mode: JVM reads a class list, loads classes, builds " \
"shared spaces, and dumps the shared spaces to a file to be " \ "shared spaces, and dumps the shared spaces to a file to be " \
"used in future JVM runs.") \ "used in future JVM runs") \
\ \
product(bool, PrintSharedSpaces, false, \ product(bool, PrintSharedSpaces, false, \
"Print usage of shared spaces") \ "Print usage of shared spaces") \
...@@ -3722,7 +3781,7 @@ class CommandLineFlags { ...@@ -3722,7 +3781,7 @@ class CommandLineFlags {
"Relax the access control checks in the verifier") \ "Relax the access control checks in the verifier") \
\ \
diagnostic(bool, PrintDTraceDOF, false, \ diagnostic(bool, PrintDTraceDOF, false, \
"Print the DTrace DOF passed to the system for JSDT probes") \ "Print the DTrace DOF passed to the system for JSDT probes") \
\ \
product(uintx, StringTableSize, defaultStringTableSize, \ product(uintx, StringTableSize, defaultStringTableSize, \
"Number of buckets in the interned String table") \ "Number of buckets in the interned String table") \
...@@ -3738,8 +3797,8 @@ class CommandLineFlags { ...@@ -3738,8 +3797,8 @@ class CommandLineFlags {
\ \
product(bool, UseVMInterruptibleIO, false, \ product(bool, UseVMInterruptibleIO, false, \
"(Unstable, Solaris-specific) Thread interrupt before or with " \ "(Unstable, Solaris-specific) Thread interrupt before or with " \
"EINTR for I/O operations results in OS_INTRPT. The default value"\ "EINTR for I/O operations results in OS_INTRPT. The default " \
" of this flag is true for JDK 6 and earlier") \ "value of this flag is true for JDK 6 and earlier") \
\ \
diagnostic(bool, WhiteBoxAPI, false, \ diagnostic(bool, WhiteBoxAPI, false, \
"Enable internal testing APIs") \ "Enable internal testing APIs") \
...@@ -3760,6 +3819,7 @@ class CommandLineFlags { ...@@ -3760,6 +3819,7 @@ class CommandLineFlags {
\ \
product(bool, EnableTracing, false, \ product(bool, EnableTracing, false, \
"Enable event-based tracing") \ "Enable event-based tracing") \
\
product(bool, UseLockedTracing, false, \ product(bool, UseLockedTracing, false, \
"Use locked-tracing when doing event-based tracing") "Use locked-tracing when doing event-based tracing")
......
...@@ -368,8 +368,15 @@ VirtualSpace::VirtualSpace() { ...@@ -368,8 +368,15 @@ VirtualSpace::VirtualSpace() {
bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
return initialize_with_granularity(rs, committed_size, max_commit_granularity);
}
bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
if(!rs.is_reserved()) return false; // allocation failed. if(!rs.is_reserved()) return false; // allocation failed.
assert(_low_boundary == NULL, "VirtualSpace already initialized"); assert(_low_boundary == NULL, "VirtualSpace already initialized");
assert(max_commit_granularity > 0, "Granularity must be non-zero.");
_low_boundary = rs.base(); _low_boundary = rs.base();
_high_boundary = low_boundary() + rs.size(); _high_boundary = low_boundary() + rs.size();
...@@ -390,7 +397,7 @@ bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { ...@@ -390,7 +397,7 @@ bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
// No attempt is made to force large page alignment at the very top and // No attempt is made to force large page alignment at the very top and
// bottom of the space if they are not aligned so already. // bottom of the space if they are not aligned so already.
_lower_alignment = os::vm_page_size(); _lower_alignment = os::vm_page_size();
_middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1); _middle_alignment = max_commit_granularity;
_upper_alignment = os::vm_page_size(); _upper_alignment = os::vm_page_size();
// End of each region // End of each region
...@@ -966,17 +973,52 @@ void TestReservedSpace_test() { ...@@ -966,17 +973,52 @@ void TestReservedSpace_test() {
class TestVirtualSpace : AllStatic { class TestVirtualSpace : AllStatic {
enum TestLargePages {
Default,
Disable,
Reserve,
Commit
};
static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
switch(mode) {
default:
case Default:
case Reserve:
return ReservedSpace(reserve_size_aligned);
case Disable:
case Commit:
return ReservedSpace(reserve_size_aligned,
os::vm_allocation_granularity(),
/* large */ false, /* exec */ false);
}
}
static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
switch(mode) {
default:
case Default:
case Reserve:
return vs.initialize(rs, 0);
case Disable:
return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
case Commit:
return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
}
}
public: public:
static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) { static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
TestLargePages mode = Default) {
size_t granularity = os::vm_allocation_granularity(); size_t granularity = os::vm_allocation_granularity();
size_t reserve_size_aligned = align_size_up(reserve_size, granularity); size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
ReservedSpace reserved(reserve_size_aligned); ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
assert(reserved.is_reserved(), "Must be"); assert(reserved.is_reserved(), "Must be");
VirtualSpace vs; VirtualSpace vs;
bool initialized = vs.initialize(reserved, 0); bool initialized = initialize_virtual_space(vs, reserved, mode);
assert(initialized, "Failed to initialize VirtualSpace"); assert(initialized, "Failed to initialize VirtualSpace");
vs.expand_by(commit_size, false); vs.expand_by(commit_size, false);
...@@ -986,7 +1028,10 @@ class TestVirtualSpace : AllStatic { ...@@ -986,7 +1028,10 @@ class TestVirtualSpace : AllStatic {
} else { } else {
assert_ge(vs.actual_committed_size(), commit_size); assert_ge(vs.actual_committed_size(), commit_size);
// Approximate the commit granularity. // Approximate the commit granularity.
size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size(); // Make sure that we don't commit using large pages
// if large pages has been disabled for this VirtualSpace.
size_t commit_granularity = (mode == Disable || !UseLargePages) ?
os::vm_page_size() : os::large_page_size();
assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
} }
...@@ -1042,9 +1087,40 @@ class TestVirtualSpace : AllStatic { ...@@ -1042,9 +1087,40 @@ class TestVirtualSpace : AllStatic {
test_virtual_space_actual_committed_space(10 * M, 10 * M); test_virtual_space_actual_committed_space(10 * M, 10 * M);
} }
static void test_virtual_space_disable_large_pages() {
if (!UseLargePages) {
return;
}
// These test cases verify that if we force VirtualSpace to disable large pages
test_virtual_space_actual_committed_space(10 * M, 0, Disable);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
test_virtual_space_actual_committed_space(10 * M, 0, Commit);
test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
}
static void test_virtual_space() { static void test_virtual_space() {
test_virtual_space_actual_committed_space(); test_virtual_space_actual_committed_space();
test_virtual_space_actual_committed_space_one_large_page(); test_virtual_space_actual_committed_space_one_large_page();
test_virtual_space_disable_large_pages();
} }
}; };
......
...@@ -178,6 +178,7 @@ class VirtualSpace VALUE_OBJ_CLASS_SPEC { ...@@ -178,6 +178,7 @@ class VirtualSpace VALUE_OBJ_CLASS_SPEC {
public: public:
// Initialization // Initialization
VirtualSpace(); VirtualSpace();
bool initialize_with_granularity(ReservedSpace rs, size_t committed_byte_size, size_t max_commit_ganularity);
bool initialize(ReservedSpace rs, size_t committed_byte_size); bool initialize(ReservedSpace rs, size_t committed_byte_size);
// Destruction // Destruction
......
...@@ -716,11 +716,17 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary; ...@@ -716,11 +716,17 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(PlaceholderEntry, _loader_data, ClassLoaderData*) \ nonstatic_field(PlaceholderEntry, _loader_data, ClassLoaderData*) \
\ \
/**************************/ \ /**************************/ \
/* ProctectionDomainEntry */ \ /* ProtectionDomainEntry */ \
/**************************/ \ /**************************/ \
\ \
nonstatic_field(ProtectionDomainEntry, _next, ProtectionDomainEntry*) \ nonstatic_field(ProtectionDomainEntry, _next, ProtectionDomainEntry*) \
nonstatic_field(ProtectionDomainEntry, _protection_domain, oop) \ nonstatic_field(ProtectionDomainEntry, _pd_cache, ProtectionDomainCacheEntry*) \
\
/*******************************/ \
/* ProtectionDomainCacheEntry */ \
/*******************************/ \
\
nonstatic_field(ProtectionDomainCacheEntry, _literal, oop) \
\ \
/*************************/ \ /*************************/ \
/* LoaderConstraintEntry */ \ /* LoaderConstraintEntry */ \
...@@ -1563,6 +1569,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary; ...@@ -1563,6 +1569,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
declare_toplevel_type(SystemDictionary) \ declare_toplevel_type(SystemDictionary) \
declare_toplevel_type(vmSymbols) \ declare_toplevel_type(vmSymbols) \
declare_toplevel_type(ProtectionDomainEntry) \ declare_toplevel_type(ProtectionDomainEntry) \
declare_toplevel_type(ProtectionDomainCacheEntry) \
\ \
declare_toplevel_type(GenericGrowableArray) \ declare_toplevel_type(GenericGrowableArray) \
declare_toplevel_type(GrowableArray<int>) \ declare_toplevel_type(GrowableArray<int>) \
......
...@@ -148,6 +148,12 @@ public: ...@@ -148,6 +148,12 @@ public:
static void track_code_cache_memory_usage() { static void track_code_cache_memory_usage() {
track_memory_pool_usage(_code_heap_pool); track_memory_pool_usage(_code_heap_pool);
} }
static void track_metaspace_memory_usage() {
track_memory_pool_usage(_metaspace_pool);
}
static void track_compressed_class_memory_usage() {
track_memory_pool_usage(_compressed_class_pool);
}
static void track_memory_pool_usage(MemoryPool* pool); static void track_memory_pool_usage(MemoryPool* pool);
static void gc_begin(bool fullGC, bool recordGCBeginTime, static void gc_begin(bool fullGC, bool recordGCBeginTime,
......
...@@ -326,12 +326,15 @@ typedef jlong s8; ...@@ -326,12 +326,15 @@ typedef jlong s8;
const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134) const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 (p.134)
// Default ProtectionDomainCacheSize values
const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017);
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// Default and minimum StringTableSize values // Default and minimum StringTableSize values
const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013);
const int minimumStringTableSize=1009; const int minimumStringTableSize = 1009;
const int defaultSymbolTableSize = 20011; const int defaultSymbolTableSize = 20011;
const int minimumSymbolTableSize = 1009; const int minimumSymbolTableSize = 1009;
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test TestLargePagesFlags
* @summary Tests how large pages are choosen depending on the given large pages flag combinations.
* @library /testlibrary
* @run main TestLargePagesFlags
*/
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.Platform;
import com.oracle.java.testlibrary.ProcessTools;
import java.util.ArrayList;
public class TestLargePagesFlags {
public static void main(String [] args) throws Exception {
if (!Platform.isLinux()) {
System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux.");
return;
}
testUseTransparentHugePages();
testUseHugeTLBFS();
testUseSHM();
testCombinations();
}
public static void testUseTransparentHugePages() throws Exception {
if (!canUse(UseTransparentHugePages(true))) {
System.out.println("Skipping testUseTransparentHugePages");
return;
}
// -XX:-UseLargePages overrides all other flags.
new FlagTester()
.use(UseLargePages(false),
UseTransparentHugePages(true))
.expect(
UseLargePages(false),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(false));
// Explicitly turn on UseTransparentHugePages.
new FlagTester()
.use(UseTransparentHugePages(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(true),
UseHugeTLBFS(false),
UseSHM(false));
new FlagTester()
.use(UseLargePages(true),
UseTransparentHugePages(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(true),
UseHugeTLBFS(false),
UseSHM(false));
// Setting a specific large pages flag will turn
// off heuristics to choose large pages type.
new FlagTester()
.use(UseLargePages(true),
UseTransparentHugePages(false))
.expect(
UseLargePages(false),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(false));
// Don't turn on UseTransparentHugePages
// unless the user explicitly asks for them.
new FlagTester()
.use(UseLargePages(true))
.expect(
UseTransparentHugePages(false));
}
public static void testUseHugeTLBFS() throws Exception {
if (!canUse(UseHugeTLBFS(true))) {
System.out.println("Skipping testUseHugeTLBFS");
return;
}
// -XX:-UseLargePages overrides all other flags.
new FlagTester()
.use(UseLargePages(false),
UseHugeTLBFS(true))
.expect(
UseLargePages(false),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(false));
// Explicitly turn on UseHugeTLBFS.
new FlagTester()
.use(UseHugeTLBFS(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(true),
UseSHM(false));
new FlagTester()
.use(UseLargePages(true),
UseHugeTLBFS(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(true),
UseSHM(false));
// Setting a specific large pages flag will turn
// off heuristics to choose large pages type.
new FlagTester()
.use(UseLargePages(true),
UseHugeTLBFS(false))
.expect(
UseLargePages(false),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(false));
// Using UseLargePages will default to UseHugeTLBFS large pages.
new FlagTester()
.use(UseLargePages(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(true),
UseSHM(false));
}
public static void testUseSHM() throws Exception {
if (!canUse(UseSHM(true))) {
System.out.println("Skipping testUseSHM");
return;
}
// -XX:-UseLargePages overrides all other flags.
new FlagTester()
.use(UseLargePages(false),
UseSHM(true))
.expect(
UseLargePages(false),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(false));
// Explicitly turn on UseSHM.
new FlagTester()
.use(UseSHM(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(true)) ;
new FlagTester()
.use(UseLargePages(true),
UseSHM(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(true)) ;
// Setting a specific large pages flag will turn
// off heuristics to choose large pages type.
new FlagTester()
.use(UseLargePages(true),
UseSHM(false))
.expect(
UseLargePages(false),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(false));
// Setting UseLargePages can allow the system to choose
// UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages.
new FlagTester()
.use(UseLargePages(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false));
}
public static void testCombinations() throws Exception {
if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) {
System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination");
return;
}
// UseHugeTLBFS takes precedence over SHM.
new FlagTester()
.use(UseLargePages(true),
UseHugeTLBFS(true),
UseSHM(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(true),
UseSHM(false));
new FlagTester()
.use(UseLargePages(true),
UseHugeTLBFS(false),
UseSHM(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(true));
new FlagTester()
.use(UseLargePages(true),
UseHugeTLBFS(true),
UseSHM(false))
.expect(
UseLargePages(true),
UseTransparentHugePages(false),
UseHugeTLBFS(true),
UseSHM(false));
new FlagTester()
.use(UseLargePages(true),
UseHugeTLBFS(false),
UseSHM(false))
.expect(
UseLargePages(false),
UseTransparentHugePages(false),
UseHugeTLBFS(false),
UseSHM(false));
if (!canUse(UseTransparentHugePages(true))) {
return;
}
// UseTransparentHugePages takes precedence.
new FlagTester()
.use(UseLargePages(true),
UseTransparentHugePages(true),
UseHugeTLBFS(true),
UseSHM(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(true),
UseHugeTLBFS(false),
UseSHM(false));
new FlagTester()
.use(UseTransparentHugePages(true),
UseHugeTLBFS(true),
UseSHM(true))
.expect(
UseLargePages(true),
UseTransparentHugePages(true),
UseHugeTLBFS(false),
UseSHM(false));
}
private static class FlagTester {
private Flag [] useFlags;
public FlagTester use(Flag... useFlags) {
this.useFlags = useFlags;
return this;
}
public void expect(Flag... expectedFlags) throws Exception {
if (useFlags == null) {
throw new IllegalStateException("Must run use() before expect()");
}
OutputAnalyzer output = executeNewJVM(useFlags);
for (Flag flag : expectedFlags) {
System.out.println("Looking for: " + flag.flagString());
String strValue = output.firstMatch(".* " + flag.name() + " .* :?= (\\S+).*", 1);
if (strValue == null) {
throw new RuntimeException("Flag " + flag.name() + " couldn't be found");
}
if (!flag.value().equals(strValue)) {
throw new RuntimeException("Wrong value for: " + flag.name()
+ " expected: " + flag.value()
+ " got: " + strValue);
}
}
output.shouldHaveExitValue(0);
}
}
private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception {
ArrayList<String> args = new ArrayList<>();
for (Flag flag : flags) {
args.add(flag.flagString());
}
args.add("-XX:+PrintFlagsFinal");
args.add("-version");
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()]));
OutputAnalyzer output = new OutputAnalyzer(pb.start());
return output;
}
private static boolean canUse(Flag flag) {
try {
new FlagTester().use(flag).expect(flag);
} catch (Exception e) {
return false;
}
return true;
}
private static Flag UseLargePages(boolean value) {
return new BooleanFlag("UseLargePages", value);
}
private static Flag UseTransparentHugePages(boolean value) {
return new BooleanFlag("UseTransparentHugePages", value);
}
private static Flag UseHugeTLBFS(boolean value) {
return new BooleanFlag("UseHugeTLBFS", value);
}
private static Flag UseSHM(boolean value) {
return new BooleanFlag("UseSHM", value);
}
private static class BooleanFlag implements Flag {
private String name;
private boolean value;
BooleanFlag(String name, boolean value) {
this.name = name;
this.value = value;
}
public String flagString() {
return "-XX:" + (value ? "+" : "-") + name;
}
public String name() {
return name;
}
public String value() {
return Boolean.toString(value);
}
}
private static interface Flag {
public String flagString();
public String name();
public String value();
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册