提交 42926b73 编写于 作者: X xlu

Merge

......@@ -22,10 +22,9 @@
*
*/
//
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
//
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
define_pd_global(bool, CICompileOSR, true );
......@@ -48,27 +47,24 @@ define_pd_global(intx, OnStackReplacePercentage, 1400 );
define_pd_global(bool, UseTLAB, true );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(intx, NewRatio, 8 ); // Design center runs on 1.3.1
define_pd_global(bool, ResizeTLAB, true );
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx, PermSize, 12*M );
define_pd_global(uintx, MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uintx,PermSize, 12*M );
define_pd_global(uintx,MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(intx, NewSizeThreadIncrease, 16*K );
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(intx, InitialCodeCacheSize, 160*K);
#endif // TIERED
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, false);
define_pd_global(bool, LIRFillDelaySlots, true);
define_pd_global(bool, LIRFillDelaySlots, true );
define_pd_global(bool, OptimizeSinglePrecision, false);
define_pd_global(bool, CSEArrayLength, true);
define_pd_global(bool, CSEArrayLength, true );
define_pd_global(bool, TwoOperandLIRForm, false);
define_pd_global(intx, SafepointPollOffset, 0);
define_pd_global(intx, SafepointPollOffset, 0 );
......@@ -59,7 +59,6 @@ define_pd_global(intx, FLOATPRESSURE, 52); // C2 on V9 gets to u
define_pd_global(intx, FreqInlineSize, 175);
define_pd_global(intx, INTPRESSURE, 48); // large register set
define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment
define_pd_global(intx, NewRatio, 2);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
// The default setting 16/16 seems to work best.
// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.)
......@@ -88,20 +87,20 @@ define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 32*G);
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
// InitialCodeCacheSize derived from specjbb2000 run.
define_pd_global(intx, InitialCodeCacheSize, 1536*K); // Integral multiple of CodeCacheExpansionSize
define_pd_global(intx, ReservedCodeCacheSize, 32*M);
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
#endif
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
// Heap related flags
define_pd_global(uintx, PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
......@@ -22,10 +22,8 @@
*
*/
//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
//
// For sparc we do not do call backs when a thread is in the interpreter, because the
// interpreter dispatch needs at least two instructions - first to load the dispatch address
......@@ -42,9 +40,6 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(uintx, TLABSize, 0);
define_pd_global(uintx, NewSize, ScaleForWordSize((2048 * K) + (2 * (64 * K))));
define_pd_global(intx, SurvivorRatio, 8);
define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC
define_pd_global(intx, InlineSmallCode, 1500);
#ifdef _LP64
......
......@@ -22,10 +22,8 @@
*
*/
//
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)
//
#ifndef TIERED
define_pd_global(bool, BackgroundCompilation, true );
......@@ -48,27 +46,24 @@ define_pd_global(intx, Tier4BackEdgeThreshold, 100000);
define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(intx, NewRatio, 12 );
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
define_pd_global(intx, InitialCodeCacheSize, 160*K);
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
define_pd_global(bool, ProfileInterpreter, false);
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
define_pd_global(uintx,CodeCacheMinBlockLength, 1);
define_pd_global(uintx, PermSize, 12*M );
define_pd_global(uintx, MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uintx,PermSize, 12*M );
define_pd_global(uintx,MaxPermSize, 64*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
#endif // TIERED
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
define_pd_global(bool, RoundFPResults, true );
define_pd_global(bool, LIRFillDelaySlots, false);
define_pd_global(bool, OptimizeSinglePrecision, true);
define_pd_global(bool, OptimizeSinglePrecision, true );
define_pd_global(bool, CSEArrayLength, false);
define_pd_global(bool, TwoOperandLIRForm, true);
define_pd_global(bool, TwoOperandLIRForm, true );
define_pd_global(intx, SafepointPollOffset, 256);
define_pd_global(intx, SafepointPollOffset, 256 );
......@@ -22,7 +22,6 @@
*
*/
//
// Sets the default values for platform dependent flags used by the server compiler.
// (see c2_globals.hpp). Alpha-sorted.
......@@ -46,8 +45,8 @@ define_pd_global(intx, CompileThreshold, 1000);
define_pd_global(intx, CompileThreshold, 10000);
#endif // TIERED
define_pd_global(intx, Tier2CompileThreshold, 10000);
define_pd_global(intx, Tier3CompileThreshold, 20000 );
define_pd_global(intx, Tier4CompileThreshold, 40000 );
define_pd_global(intx, Tier3CompileThreshold, 20000);
define_pd_global(intx, Tier4CompileThreshold, 40000);
define_pd_global(intx, BackEdgeThreshold, 100000);
define_pd_global(intx, Tier2BackEdgeThreshold, 100000);
......@@ -61,7 +60,6 @@ define_pd_global(intx, FreqInlineSize, 325);
#ifdef AMD64
define_pd_global(intx, INTPRESSURE, 13);
define_pd_global(intx, InteriorEntryAlignment, 16);
define_pd_global(intx, NewRatio, 2);
define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
define_pd_global(intx, LoopUnrollLimit, 60);
// InitialCodeCacheSize derived from specjbb2000 run.
......@@ -69,11 +67,10 @@ define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multip
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 32*G);
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
#else
define_pd_global(intx, INTPRESSURE, 6);
define_pd_global(intx, InteriorEntryAlignment, 4);
define_pd_global(intx, NewRatio, 8); // Design center runs on 1.3.1
define_pd_global(intx, NewSizeThreadIncrease, 4*K);
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
// InitialCodeCacheSize derived from specjbb2000 run.
......@@ -81,7 +78,7 @@ define_pd_global(intx, InitialCodeCacheSize, 2304*K); // Integral multip
define_pd_global(intx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uintx, DefaultMaxRAM, 1*G);
define_pd_global(uint64_t,MaxRAM, 4ULL*G);
#endif // AMD64
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, RegisterCostAreaRatio, 16000);
......@@ -97,8 +94,8 @@ define_pd_global(intx, ReservedCodeCacheSize, 48*M);
define_pd_global(uintx,CodeCacheMinBlockLength, 4);
// Heap related flags
define_pd_global(uintx, PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx, MaxPermSize, ScaleForWordSize(64*M));
define_pd_global(uintx,PermSize, ScaleForWordSize(16*M));
define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
// Ergonomics related flags
define_pd_global(bool, NeverActAsServerClassMachine, false);
......@@ -22,14 +22,13 @@
*
*/
//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
//
define_pd_global(bool, ConvertSleepToYield, true);
define_pd_global(bool, ShareVtableStubs, true);
define_pd_global(bool, CountInterpCalls, true);
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
......@@ -46,25 +45,20 @@ define_pd_global(intx, CodeEntryAlignment, 32);
#else
define_pd_global(intx, CodeEntryAlignment, 16);
#endif // COMPILER2
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this
define_pd_global(uintx, TLABSize, 0);
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackRedPages, 1);
#ifdef AMD64
define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
// Very large C++ stack frames using solaris-amd64 optimized builds
// due to lack of optimization caused by C++ compiler bugs
define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
#else
define_pd_global(uintx, NewSize, 1024 * K);
define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
#endif // AMD64
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(intx, StackYellowPages, 2);
define_pd_global(intx, StackRedPages, 1);
define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
......@@ -23,10 +23,8 @@
*
*/
//
// Set the default values for platform dependent flags used by the
// runtime system. See globals.hpp for details of what they do.
//
define_pd_global(bool, ConvertSleepToYield, true);
define_pd_global(bool, ShareVtableStubs, true);
......@@ -37,12 +35,6 @@ define_pd_global(bool, ImplicitNullChecks, true);
define_pd_global(bool, UncommonNullCast, true);
define_pd_global(intx, CodeEntryAlignment, 32);
define_pd_global(uintx, TLABSize, 0);
#ifdef _LP64
define_pd_global(uintx, NewSize, ScaleForWordSize(2048 * K));
#else
define_pd_global(uintx, NewSize, ScaleForWordSize(1024 * K));
#endif // _LP64
define_pd_global(intx, InlineFrequencyCount, 100);
define_pd_global(intx, InlineSmallCode, 1000);
define_pd_global(intx, PreInflateSpin, 10);
......
......@@ -22,10 +22,9 @@
*
*/
//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
//
define_pd_global(bool, DontYieldALot, false);
#ifdef AMD64
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
......@@ -39,11 +38,10 @@ define_pd_global(intx, VMThreadStackSize, 512);
#endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
define_pd_global(intx, SurvivorRatio, 8);
define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
// Only used on 64 bit platforms
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
......@@ -22,31 +22,25 @@
*
*/
//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
//
define_pd_global(bool, DontYieldALot, true); // Determined in the design center
#ifdef AMD64
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 1024);
define_pd_global(intx, SurvivorRatio, 6);
define_pd_global(uintx, JVMInvokeMethodSlack, 8*K);
define_pd_global(uintx,JVMInvokeMethodSlack, 8*K);
#else
// UseStackBanging is not pd
// define_pd_global(bool, UseStackBanging, true);
// ThreadStackSize 320 allows TaggedStackInterpreter and a couple of test cases
// to run while keeping the number of threads that can be created high.
define_pd_global(intx, ThreadStackSize, 320);
define_pd_global(intx, VMThreadStackSize, 512);
define_pd_global(intx, SurvivorRatio, 8);
define_pd_global(uintx, JVMInvokeMethodSlack, 10*K);
define_pd_global(uintx,JVMInvokeMethodSlack, 10*K);
#endif // AMD64
define_pd_global(intx, CompilerThreadStackSize, 0);
// Only used on 64 bit platforms
define_pd_global(uintx, HeapBaseMinAddress, 256*M);
define_pd_global(uintx,HeapBaseMinAddress, 256*M);
// Only used on 64 bit Windows platforms
define_pd_global(bool, UseVectoredExceptions, false);
......@@ -22,10 +22,9 @@
*
*/
//
// Sets the default values for platform dependent flags used by the runtime system.
// (see globals.hpp)
//
define_pd_global(bool, DontYieldALot, false);
// Default stack size on Windows is determined by the executable (java.exe
......@@ -35,8 +34,6 @@ define_pd_global(bool, DontYieldALot, false);
define_pd_global(intx, ThreadStackSize, 0); // 0 => use system default
define_pd_global(intx, VMThreadStackSize, 0); // 0 => use system default
define_pd_global(intx, SurvivorRatio, 8);
#ifdef ASSERT
define_pd_global(intx, CompilerThreadStackSize, 1024);
#else
......
......@@ -51,7 +51,7 @@ PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
cname = PerfDataManager::counter_name(name_space(), "oldCapacity");
_old_capacity = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Bytes, (jlong) Arguments::initial_heap_size(), CHECK);
PerfData::U_Bytes, (jlong) InitialHeapSize, CHECK);
cname = PerfDataManager::counter_name(name_space(), "boundaryMoved");
_boundary_moved = PerfDataManager::create_variable(SUN_GC, cname,
......
......@@ -55,7 +55,7 @@ void CollectorPolicy::initialize_flags() {
void CollectorPolicy::initialize_size_info() {
// User inputs from -mx and ms are aligned
set_initial_heap_byte_size(Arguments::initial_heap_size());
set_initial_heap_byte_size(InitialHeapSize);
if (initial_heap_byte_size() == 0) {
set_initial_heap_byte_size(NewSize + OldSize);
}
......
......@@ -37,7 +37,6 @@ SystemProperty* Arguments::_system_properties = NULL;
const char* Arguments::_gc_log_filename = NULL;
bool Arguments::_has_profile = false;
bool Arguments::_has_alloc_profile = false;
uintx Arguments::_initial_heap_size = 0;
uintx Arguments::_min_heap_size = 0;
Arguments::Mode Arguments::_mode = _mixed;
bool Arguments::_java_compiler = false;
......@@ -182,6 +181,9 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "ProcessingToTenuringRatio", JDK_Version::jdk(5), JDK_Version::jdk(7) },
{ "MinTrainLength", JDK_Version::jdk(5), JDK_Version::jdk(7) },
{ "AppendRatio", JDK_Version::jdk_update(6,10), JDK_Version::jdk(7) },
{ "DefaultMaxRAM", JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
{ "DefaultInitialRAMFraction",
JDK_Version::jdk_update(6,18), JDK_Version::jdk(7) },
{ NULL, JDK_Version(0), JDK_Version(0) }
};
......@@ -555,6 +557,10 @@ static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
if (!is_neg && CommandLineFlags::uintxAtPut(name, &uintx_v, origin)) {
return true;
}
uint64_t uint64_t_v = (uint64_t) v;
if (!is_neg && CommandLineFlags::uint64_tAtPut(name, &uint64_t_v, origin)) {
return true;
}
return false;
}
......@@ -947,7 +953,7 @@ static void no_shared_spaces() {
// UseParNewGC and not explicitly set ParallelGCThreads we
// set it, unless this is a single cpu machine.
void Arguments::set_parnew_gc_flags() {
assert(!UseSerialGC && !UseParallelGC && !UseG1GC,
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
"control point invariant");
assert(UseParNewGC, "Error");
......@@ -960,13 +966,13 @@ void Arguments::set_parnew_gc_flags() {
if (ParallelGCThreads == 0) {
FLAG_SET_DEFAULT(ParallelGCThreads,
Abstract_VM_Version::parallel_worker_threads());
if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
if (ParallelGCThreads == 1) {
FLAG_SET_DEFAULT(UseParNewGC, false);
FLAG_SET_DEFAULT(ParallelGCThreads, 0);
}
}
if (!UseParNewGC) {
FLAG_SET_DEFAULT(ParallelGCThreads, 0);
} else {
if (UseParNewGC) {
// CDS doesn't work with ParNew yet
no_shared_spaces();
// By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
......@@ -980,7 +986,7 @@ void Arguments::set_parnew_gc_flags() {
FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
}
// AlwaysTenure flag should make ParNew to promote all at first collection.
// AlwaysTenure flag should make ParNew promote all at first collection.
// See CR 6362902.
if (AlwaysTenure) {
FLAG_SET_CMDLINE(intx, MaxTenuringThreshold, 0);
......@@ -1003,7 +1009,7 @@ void Arguments::set_parnew_gc_flags() {
// further optimization and tuning efforts, and would almost
// certainly gain from analysis of platform and environment.
void Arguments::set_cms_and_parnew_gc_flags() {
assert(!UseSerialGC && !UseParallelGC, "Error");
assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
assert(UseConcMarkSweepGC, "CMS is expected to be on here");
// If we are using CMS, we prefer to UseParNewGC,
......@@ -1068,7 +1074,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
} else {
FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
}
if(PrintGCDetails && Verbose) {
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("Ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
}
......@@ -1097,15 +1103,15 @@ void Arguments::set_cms_and_parnew_gc_flags() {
} else {
min_new = NewSize;
}
size_t prev_initial_size = initial_heap_size();
if (prev_initial_size != 0 && prev_initial_size < min_new+OldSize) {
set_initial_heap_size(min_new+OldSize);
size_t prev_initial_size = InitialHeapSize;
if (prev_initial_size != 0 && prev_initial_size < min_new + OldSize) {
FLAG_SET_ERGO(uintx, InitialHeapSize, min_new + OldSize);
// Currently minimum size and the initial heap sizes are the same.
set_min_heap_size(initial_heap_size());
set_min_heap_size(InitialHeapSize);
if (PrintGCDetails && Verbose) {
warning("Initial heap size increased to " SIZE_FORMAT " M from "
SIZE_FORMAT " M; use -XX:NewSize=... for finer control.",
initial_heap_size()/M, prev_initial_size/M);
InitialHeapSize/M, prev_initial_size/M);
}
}
......@@ -1114,12 +1120,12 @@ void Arguments::set_cms_and_parnew_gc_flags() {
align_size_down(MaxHeapSize,
CardTableRS::ct_max_alignment_constraint());
if(PrintGCDetails && Verbose) {
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("CMS set min_heap_size: " SIZE_FORMAT
" initial_heap_size: " SIZE_FORMAT
" max_heap: " SIZE_FORMAT,
min_heap_size(), initial_heap_size(), max_heap);
min_heap_size(), InitialHeapSize, max_heap);
}
if (max_heap > min_new) {
// Unless explicitly requested otherwise, make young gen
......@@ -1127,7 +1133,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
if (FLAG_IS_DEFAULT(NewSize)) {
FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
if(PrintGCDetails && Verbose) {
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("Ergo set NewSize: " SIZE_FORMAT, NewSize);
}
......@@ -1138,7 +1144,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
if (FLAG_IS_DEFAULT(OldSize)) {
if (max_heap > NewSize) {
FLAG_SET_ERGO(uintx, OldSize, MIN2(3*NewSize, max_heap - NewSize));
if(PrintGCDetails && Verbose) {
if (PrintGCDetails && Verbose) {
// Too early to use gclog_or_tty
tty->print_cr("Ergo set OldSize: " SIZE_FORMAT, OldSize);
}
......@@ -1186,7 +1192,7 @@ void Arguments::set_cms_and_parnew_gc_flags() {
inline uintx max_heap_for_compressed_oops() {
LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
NOT_LP64(return DefaultMaxRAM);
NOT_LP64(ShouldNotReachHere(); return 0);
}
bool Arguments::should_auto_select_low_pause_collector() {
......@@ -1205,7 +1211,7 @@ bool Arguments::should_auto_select_low_pause_collector() {
void Arguments::set_ergonomics_flags() {
// Parallel GC is not compatible with sharing. If one specifies
// that they want sharing explicitly, do not set ergonmics flags.
// that they want sharing explicitly, do not set ergonomics flags.
if (DumpSharedSpaces || ForceSharedSpaces) {
return;
}
......@@ -1271,8 +1277,6 @@ void Arguments::set_parallel_gc_flags() {
FLAG_SET_ERGO(uintx, ParallelGCThreads,
Abstract_VM_Version::parallel_worker_threads());
// PS is a server collector, setup the heap sizes accordingly.
set_server_heap_size();
// If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
// SurvivorRatio has been set, reset their default values to SurvivorRatio +
// 2. By doing this we make SurvivorRatio also work for Parallel Scavenger.
......@@ -1302,8 +1306,6 @@ void Arguments::set_parallel_gc_flags() {
void Arguments::set_g1_gc_flags() {
assert(UseG1GC, "Error");
// G1 is a server collector, setup the heap sizes accordingly.
set_server_heap_size();
#ifdef COMPILER1
FastTLABRefill = false;
#endif
......@@ -1321,50 +1323,77 @@ void Arguments::set_g1_gc_flags() {
}
}
void Arguments::set_server_heap_size() {
void Arguments::set_heap_size() {
if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
// Deprecated flag
FLAG_SET_CMDLINE(uintx, MaxRAMFraction, DefaultMaxRAMFraction);
}
const julong phys_mem =
FLAG_IS_DEFAULT(MaxRAM) ? MIN2(os::physical_memory(), (julong)MaxRAM)
: (julong)MaxRAM;
// If the maximum heap size has not been set with -Xmx,
// then set it as fraction of the size of physical memory,
// respecting the maximum and minimum sizes of the heap.
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
const uint64_t reasonable_fraction =
os::physical_memory() / DefaultMaxRAMFraction;
const uint64_t maximum_size = (uint64_t)
(FLAG_IS_DEFAULT(DefaultMaxRAM) && UseCompressedOops ?
MIN2(max_heap_for_compressed_oops(), DefaultMaxRAM) :
DefaultMaxRAM);
size_t reasonable_max =
(size_t) os::allocatable_physical_memory(reasonable_fraction);
if (reasonable_max > maximum_size) {
reasonable_max = maximum_size;
julong reasonable_max = phys_mem / MaxRAMFraction;
if (phys_mem <= MaxHeapSize * MinRAMFraction) {
// Small physical memory, so use a minimum fraction of it for the heap
reasonable_max = phys_mem / MinRAMFraction;
} else {
// Not-small physical memory, so require a heap at least
// as large as MaxHeapSize
reasonable_max = MAX2(reasonable_max, (julong)MaxHeapSize);
}
if (!FLAG_IS_DEFAULT(ErgoHeapSizeLimit) && ErgoHeapSizeLimit != 0) {
// Limit the heap size to ErgoHeapSizeLimit
reasonable_max = MIN2(reasonable_max, (julong)ErgoHeapSizeLimit);
}
if (UseCompressedOops) {
// Limit the heap size to the maximum possible when using compressed oops
reasonable_max = MIN2(reasonable_max, (julong)max_heap_for_compressed_oops());
}
reasonable_max = os::allocatable_physical_memory(reasonable_max);
if (!FLAG_IS_DEFAULT(InitialHeapSize)) {
// An initial heap size was specified on the command line,
// so be sure that the maximum size is consistent. Done
// after call to allocatable_physical_memory because that
// method might reduce the allocation size.
reasonable_max = MAX2(reasonable_max, (julong)InitialHeapSize);
}
if (PrintGCDetails && Verbose) {
// Cannot use gclog_or_tty yet.
tty->print_cr(" Max heap size for server class platform "
SIZE_FORMAT, reasonable_max);
tty->print_cr(" Maximum heap size " SIZE_FORMAT, reasonable_max);
}
FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx)reasonable_max);
}
// If the initial_heap_size has not been set with -Xms,
// then set it as fraction of size of physical memory
// If the initial_heap_size has not been set with InitialHeapSize
// or -Xms, then set it as fraction of the size of physical memory,
// respecting the maximum and minimum sizes of the heap.
if (initial_heap_size() == 0) {
const uint64_t reasonable_initial_fraction =
os::physical_memory() / DefaultInitialRAMFraction;
const size_t reasonable_initial =
(size_t) os::allocatable_physical_memory(reasonable_initial_fraction);
const size_t minimum_size = NewSize + OldSize;
set_initial_heap_size(MAX2(MIN2(reasonable_initial, reasonable_max),
minimum_size));
// Currently the minimum size and the initial heap sizes are the same.
set_min_heap_size(initial_heap_size());
if (FLAG_IS_DEFAULT(InitialHeapSize)) {
julong reasonable_initial = phys_mem / InitialRAMFraction;
reasonable_initial = MAX2(reasonable_initial, (julong)(OldSize + NewSize));
reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
reasonable_initial = os::allocatable_physical_memory(reasonable_initial);
if (PrintGCDetails && Verbose) {
// Cannot use gclog_or_tty yet.
tty->print_cr(" Initial heap size for server class platform "
SIZE_FORMAT, initial_heap_size());
}
} else {
// A minimum size was specified on the command line. Be sure
// that the maximum size is consistent.
if (initial_heap_size() > reasonable_max) {
reasonable_max = initial_heap_size();
}
tty->print_cr(" Initial heap size " SIZE_FORMAT, (uintx)reasonable_initial);
}
FLAG_SET_ERGO(uintx, MaxHeapSize, (uintx) reasonable_max);
FLAG_SET_ERGO(uintx, InitialHeapSize, (uintx)reasonable_initial);
// Subsequent ergonomics code may expect min_heap_size to be set
// if InitialHeapSize is. Use whatever the current values are
// for OldSize and NewSize, whether or not they were set on the
// command line.
set_min_heap_size(OldSize + NewSize);
}
}
......@@ -1448,7 +1477,7 @@ bool Arguments::verify_percentage(uintx value, const char* name) {
return false;
}
static void set_serial_gc_flags() {
static void force_serial_gc() {
FLAG_SET_DEFAULT(UseSerialGC, true);
FLAG_SET_DEFAULT(UseParNewGC, false);
FLAG_SET_DEFAULT(UseConcMarkSweepGC, false);
......@@ -1584,15 +1613,15 @@ bool Arguments::check_vm_args_consistency() {
// force sharing off.
if (DumpSharedSpaces || ForceSharedSpaces) {
jio_fprintf(defaultStream::error_stream(),
"Reverting to Serial GC because of %s \n",
"Reverting to Serial GC because of %s\n",
ForceSharedSpaces ? " -Xshare:on" : "-Xshare:dump");
set_serial_gc_flags();
force_serial_gc();
FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false);
} else {
if (UseSharedSpaces) {
if (UseSharedSpaces && Verbose) {
jio_fprintf(defaultStream::error_stream(),
"Turning off use of shared archive because of "
"choice of garbage collector or large pages \n");
"choice of garbage collector or large pages\n");
}
no_shared_spaces();
}
......@@ -1925,8 +1954,8 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
describe_range_error(errcode);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, MaxNewSize, (size_t) long_initial_eden_size);
FLAG_SET_CMDLINE(uintx, NewSize, (size_t) long_initial_eden_size);
FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size);
FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size);
// -Xms
} else if (match_option(option, "-Xms", &tail)) {
julong long_initial_heap_size = 0;
......@@ -1937,9 +1966,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
describe_range_error(errcode);
return JNI_EINVAL;
}
set_initial_heap_size((size_t) long_initial_heap_size);
FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size);
// Currently the minimum size and the initial heap sizes are the same.
set_min_heap_size(initial_heap_size());
set_min_heap_size(InitialHeapSize);
// -Xmx
} else if (match_option(option, "-Xmx", &tail)) {
julong long_max_heap_size = 0;
......@@ -1950,7 +1979,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
describe_range_error(errcode);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, MaxHeapSize, (size_t) long_max_heap_size);
FLAG_SET_CMDLINE(uintx, MaxHeapSize, (uintx)long_max_heap_size);
// Xmaxf
} else if (match_option(option, "-Xmaxf", &tail)) {
int maxf = (int)(atof(tail) * 100);
......@@ -2196,9 +2225,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize);
set_initial_heap_size(MaxHeapSize);
FLAG_SET_CMDLINE(uintx, InitialHeapSize, initHeapSize);
// Currently the minimum size and the initial heap sizes are the same.
set_min_heap_size(initial_heap_size());
set_min_heap_size(initHeapSize);
}
if (FLAG_IS_DEFAULT(NewSize)) {
// Make the young generation 3/8ths of the total heap.
......@@ -2676,7 +2705,7 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
}
#ifdef SERIALGC
set_serial_gc_flags();
force_serial_gc();
#endif // SERIALGC
#ifdef KERNEL
no_shared_spaces();
......@@ -2690,19 +2719,23 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
return JNI_EINVAL;
}
if (UseConcMarkSweepGC) {
// Set flags for CMS and ParNew. Check UseConcMarkSweep first
// to ensure that when both UseConcMarkSweepGC and UseParNewGC
// are true, we don't call set_parnew_gc_flags() as well.
set_cms_and_parnew_gc_flags();
} else {
// Set heap size based on available physical memory
set_heap_size();
// Set per-collector flags
if (UseParallelGC || UseParallelOldGC) {
// Set some flags for ParallelGC if needed.
set_parallel_gc_flags();
} else if (UseConcMarkSweepGC) {
// Set some flags for CMS
set_cms_and_parnew_gc_flags();
} else if (UseParNewGC) {
// Set some flags for ParNew
set_parnew_gc_flags();
} else if (UseG1GC) {
// Set some flags for garbage-first, if needed.
set_g1_gc_flags();
}
}
#ifdef SERIALGC
assert(verify_serial_gc_flags(), "SerialGC unset");
......
......@@ -254,7 +254,6 @@ class Arguments : AllStatic {
static bool _has_profile;
static bool _has_alloc_profile;
static const char* _gc_log_filename;
static uintx _initial_heap_size;
static uintx _min_heap_size;
// -Xrun arguments
......@@ -300,8 +299,8 @@ class Arguments : AllStatic {
static void set_g1_gc_flags();
// GC ergonomics
static void set_ergonomics_flags();
// Setup heap size for a server platform
static void set_server_heap_size();
// Setup heap size
static void set_heap_size();
// Based on automatic selection criteria, should the
// low pause collector be used.
static bool should_auto_select_low_pause_collector();
......@@ -434,9 +433,7 @@ class Arguments : AllStatic {
static bool has_profile() { return _has_profile; }
static bool has_alloc_profile() { return _has_alloc_profile; }
// -Xms , -Xmx
static uintx initial_heap_size() { return _initial_heap_size; }
static void set_initial_heap_size(uintx v) { _initial_heap_size = v; }
// -Xms, -Xmx
static uintx min_heap_size() { return _min_heap_size; }
static void set_min_heap_size(uintx v) { _min_heap_size = v; }
......
......@@ -324,6 +324,32 @@ void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, F
faddr->origin = origin;
}
bool CommandLineFlags::uint64_tAt(char* name, size_t len, uint64_t* value) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_uint64_t()) return false;
*value = result->get_uint64_t();
return true;
}
bool CommandLineFlags::uint64_tAtPut(char* name, size_t len, uint64_t* value, FlagValueOrigin origin) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
if (!result->is_uint64_t()) return false;
uint64_t old_value = result->get_uint64_t();
result->set_uint64_t(*value);
*value = old_value;
result->origin = origin;
return true;
}
void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin) {
Flag* faddr = address_of_flag(flag);
guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
faddr->set_uint64_t(value);
faddr->origin = origin;
}
bool CommandLineFlags::doubleAt(char* name, size_t len, double* value) {
Flag* result = Flag::find_flag(name, len);
if (result == NULL) return false;
......
此差异已折叠。
......@@ -202,6 +202,7 @@ class CommandLineFlagsEx : CommandLineFlags {
static void boolAtPut(CommandLineFlagWithType flag, bool value, FlagValueOrigin origin);
static void intxAtPut(CommandLineFlagWithType flag, intx value, FlagValueOrigin origin);
static void uintxAtPut(CommandLineFlagWithType flag, uintx value, FlagValueOrigin origin);
static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, FlagValueOrigin origin);
static void doubleAtPut(CommandLineFlagWithType flag, double value, FlagValueOrigin origin);
static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, FlagValueOrigin origin);
......
......@@ -790,7 +790,7 @@ JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
assert(!has_undefined_init_size, "Undefined init size");
assert(!has_undefined_max_size, "Undefined max size");
MemoryUsage usage((heap ? Arguments::initial_heap_size() : total_init),
MemoryUsage usage((heap ? InitialHeapSize : total_init),
total_used,
total_committed,
(heap ? Universe::heap()->max_capacity() : total_max));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册