From b021a51fdfd263eb827e58f868178b8fa02c8eb7 Mon Sep 17 00:00:00 2001 From: iveresov Date: Wed, 4 May 2011 15:08:44 -0700 Subject: [PATCH] 7041501: NUMA: Expand the old gen more aggressively Summary: Expand the old gen in bigger increments Reviewed-by: jmasa --- .../vm/gc_implementation/parallelScavenge/psOldGen.cpp | 6 ++++++ src/share/vm/runtime/arguments.cpp | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp index acbe76f94..55bbdd6d7 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp @@ -224,6 +224,12 @@ void PSOldGen::expand(size_t bytes) { const size_t alignment = virtual_space()->alignment(); size_t aligned_bytes = align_size_up(bytes, alignment); size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); + + if (UseNUMA) { + // With NUMA we use round-robin page allocation for the old gen. Expand by at least + // providing a page per lgroup. Alignment is larger or equal to the page size. + aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); + } if (aligned_bytes == 0){ // The alignment caused the number of bytes to wrap. An expand_by(0) will // return true with the implication that and expansion was done when it diff --git a/src/share/vm/runtime/arguments.cpp b/src/share/vm/runtime/arguments.cpp index 498e4e9d2..89e326283 100644 --- a/src/share/vm/runtime/arguments.cpp +++ b/src/share/vm/runtime/arguments.cpp @@ -1423,6 +1423,11 @@ void Arguments::set_parallel_gc_flags() { } } } + if (UseNUMA) { + if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) { + FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M); + } + } } void Arguments::set_g1_gc_flags() { -- GitLab