提交 25e8ed36 编写于 作者: A amurillo

Merge

...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014 ...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25 HS_MAJOR_VER=25
HS_MINOR_VER=40 HS_MINOR_VER=40
HS_BUILD_NUMBER=13 HS_BUILD_NUMBER=14
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8
......
...@@ -2066,14 +2066,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { ...@@ -2066,14 +2066,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
LIR_Opr base_op = base.result(); LIR_Opr base_op = base.result();
LIR_Opr index_op = idx.result(); LIR_Opr index_op = idx.result();
#ifndef _LP64 #ifndef _LP64
if (x->base()->type()->tag() == longTag) { if (base_op->type() == T_LONG) {
base_op = new_register(T_INT); base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op); __ convert(Bytecodes::_l2i, base.result(), base_op);
} }
if (x->has_index()) { if (x->has_index()) {
if (x->index()->type()->tag() == longTag) { if (index_op->type() == T_LONG) {
LIR_Opr long_index_op = index_op; LIR_Opr long_index_op = index_op;
if (x->index()->type()->is_constant()) { if (index_op->is_constant()) {
long_index_op = new_register(T_LONG); long_index_op = new_register(T_LONG);
__ move(index_op, long_index_op); __ move(index_op, long_index_op);
} }
...@@ -2088,14 +2088,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { ...@@ -2088,14 +2088,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int"); assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
#else #else
if (x->has_index()) { if (x->has_index()) {
if (x->index()->type()->tag() == intTag) { if (index_op->type() == T_INT) {
if (!x->index()->type()->is_constant()) { if (!index_op->is_constant()) {
index_op = new_register(T_LONG); index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op); __ convert(Bytecodes::_i2l, idx.result(), index_op);
} }
} else { } else {
assert(x->index()->type()->tag() == longTag, "must be"); assert(index_op->type() == T_LONG, "must be");
if (x->index()->type()->is_constant()) { if (index_op->is_constant()) {
index_op = new_register(T_LONG); index_op = new_register(T_LONG);
__ move(idx.result(), index_op); __ move(idx.result(), index_op);
} }
...@@ -2176,12 +2176,12 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { ...@@ -2176,12 +2176,12 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
LIR_Opr index_op = idx.result(); LIR_Opr index_op = idx.result();
#ifndef _LP64 #ifndef _LP64
if (x->base()->type()->tag() == longTag) { if (base_op->type() == T_LONG) {
base_op = new_register(T_INT); base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op); __ convert(Bytecodes::_l2i, base.result(), base_op);
} }
if (x->has_index()) { if (x->has_index()) {
if (x->index()->type()->tag() == longTag) { if (index_op->type() == T_LONG) {
index_op = new_register(T_INT); index_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, idx.result(), index_op); __ convert(Bytecodes::_l2i, idx.result(), index_op);
} }
...@@ -2191,7 +2191,7 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { ...@@ -2191,7 +2191,7 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int"); assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
#else #else
if (x->has_index()) { if (x->has_index()) {
if (x->index()->type()->tag() == intTag) { if (index_op->type() == T_INT) {
index_op = new_register(T_LONG); index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op); __ convert(Bytecodes::_i2l, idx.result(), index_op);
} }
......
...@@ -2733,10 +2733,12 @@ void CFLS_LAB::retire(int tid) { ...@@ -2733,10 +2733,12 @@ void CFLS_LAB::retire(int tid) {
} }
} }
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { // Used by par_get_chunk_of_blocks() for the chunks from the
assert(fl->count() == 0, "Precondition."); // indexed_free_lists. Looks for a chunk with size that is a multiple
assert(word_sz < CompactibleFreeListSpace::IndexSetSize, // of "word_sz" and if found, splits it into "word_sz" chunks and add
"Precondition"); // to the free list "fl". "n" is the maximum number of chunks to
// be added to "fl".
bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
// We'll try all multiples of word_sz in the indexed set, starting with // We'll try all multiples of word_sz in the indexed set, starting with
// word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
...@@ -2817,11 +2819,15 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2817,11 +2819,15 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
Mutex::_no_safepoint_check_flag); Mutex::_no_safepoint_check_flag);
ssize_t births = _indexedFreeList[word_sz].split_births() + num; ssize_t births = _indexedFreeList[word_sz].split_births() + num;
_indexedFreeList[word_sz].set_split_births(births); _indexedFreeList[word_sz].set_split_births(births);
return; return true;
} }
} }
return found;
} }
// Otherwise, we'll split a block from the dictionary. }
FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
FreeChunk* fc = NULL; FreeChunk* fc = NULL;
FreeChunk* rem_fc = NULL; FreeChunk* rem_fc = NULL;
size_t rem; size_t rem;
...@@ -2832,16 +2838,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2832,16 +2838,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()), fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast); FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) { if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_update(fc->size(),
true /*split*/,
false /*birth*/);
break; break;
} else { } else {
n--; n--;
} }
} }
if (fc == NULL) return; if (fc == NULL) return NULL;
// Otherwise, split up that block. // Otherwise, split up that block.
assert((ssize_t)n >= 1, "Control point invariant"); assert((ssize_t)n >= 1, "Control point invariant");
assert(fc->is_free(), "Error: should be a free block"); assert(fc->is_free(), "Error: should be a free block");
...@@ -2863,10 +2865,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2863,10 +2865,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// dictionary and return, leaving "fl" empty. // dictionary and return, leaving "fl" empty.
if (n == 0) { if (n == 0) {
returnChunkToDictionary(fc); returnChunkToDictionary(fc);
assert(fl->count() == 0, "We never allocated any blocks"); return NULL;
return;
} }
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_update(fc->size(),
true /*split*/,
false /*birth*/);
// First return the remainder, if any. // First return the remainder, if any.
// Note that we hold the lock until we decide if we're going to give // Note that we hold the lock until we decide if we're going to give
// back the remainder to the dictionary, since a concurrent allocation // back the remainder to the dictionary, since a concurrent allocation
...@@ -2899,7 +2905,24 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2899,7 +2905,24 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
_indexedFreeList[rem].return_chunk_at_head(rem_fc); _indexedFreeList[rem].return_chunk_at_head(rem_fc);
smallSplitBirth(rem); smallSplitBirth(rem);
} }
assert((ssize_t)n > 0 && fc != NULL, "Consistency"); assert(n * word_sz == fc->size(),
err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
fc->size(), n, word_sz));
return fc;
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
if (fc == NULL) {
return;
}
size_t n = fc->size() / word_sz;
assert((ssize_t)n > 0, "Consistency");
// Now do the splitting up. // Now do the splitting up.
// Must do this in reverse order, so that anybody attempting to // Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we // access the main chunk sees it as a single free block until we
...@@ -2947,6 +2970,20 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n ...@@ -2947,6 +2970,20 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
assert(fl->tail()->next() == NULL, "List invariant."); assert(fl->tail()->next() == NULL, "List invariant.");
} }
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
// Got it
return;
}
// Otherwise, we'll split a block from the dictionary.
par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
}
// Set up the space's par_seq_tasks structure for work claiming // Set up the space's par_seq_tasks structure for work claiming
// for parallel rescan. See CMSParRemarkTask where this is currently used. // for parallel rescan. See CMSParRemarkTask where this is currently used.
// XXX Need to suitably abstract and generalize this and the next // XXX Need to suitably abstract and generalize this and the next
......
...@@ -172,6 +172,20 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -172,6 +172,20 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// list of size "word_sz", and must now be decremented. // list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Used by par_get_chunk_of_blocks() for the chunks from the
// indexed_free_lists.
bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Used by par_get_chunk_of_blocks_dictionary() to get a chunk
// evenly splittable into "n" "word_sz" chunks. Returns that
// evenly splittable chunk. May split a larger chunk to get the
// evenly splittable chunk.
FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
// Used by par_get_chunk_of_blocks() for the chunks from the
// dictionary.
void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Allocation helper functions // Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists // Allocate using a strategy that takes from the indexed free lists
// first. This allocation strategy assumes a companion sweeping // first. This allocation strategy assumes a companion sweeping
......
...@@ -2343,6 +2343,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { ...@@ -2343,6 +2343,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
case GCCause::_gc_locker: return GCLockerInvokesConcurrent; case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
case GCCause::_g1_humongous_allocation: return true; case GCCause::_g1_humongous_allocation: return true;
case GCCause::_update_allocation_context_stats_inc: return true;
default: return false; default: return false;
} }
} }
......
...@@ -95,8 +95,9 @@ void VM_G1IncCollectionPause::doit() { ...@@ -95,8 +95,9 @@ void VM_G1IncCollectionPause::doit() {
assert(!_should_initiate_conc_mark || assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
_gc_cause == GCCause::_g1_humongous_allocation), _gc_cause == GCCause::_g1_humongous_allocation ||
"only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle"); _gc_cause == GCCause::_update_allocation_context_stats_inc),
"only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
if (_word_size > 0) { if (_word_size > 0) {
// An allocation has been requested. So, try to do that first. // An allocation has been requested. So, try to do that first.
......
...@@ -54,7 +54,8 @@ const char* GCCause::to_string(GCCause::Cause cause) { ...@@ -54,7 +54,8 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _wb_young_gc: case _wb_young_gc:
return "WhiteBox Initiated Young GC"; return "WhiteBox Initiated Young GC";
case _update_allocation_context_stats: case _update_allocation_context_stats_inc:
case _update_allocation_context_stats_full:
return "Update Allocation Context Stats"; return "Update Allocation Context Stats";
case _no_gc: case _no_gc:
......
...@@ -47,7 +47,8 @@ class GCCause : public AllStatic { ...@@ -47,7 +47,8 @@ class GCCause : public AllStatic {
_heap_inspection, _heap_inspection,
_heap_dump, _heap_dump,
_wb_young_gc, _wb_young_gc,
_update_allocation_context_stats, _update_allocation_context_stats_inc,
_update_allocation_context_stats_full,
/* implementation independent, but reserved for GC use */ /* implementation independent, but reserved for GC use */
_no_gc, _no_gc,
......
...@@ -21,39 +21,32 @@ ...@@ -21,39 +21,32 @@
* questions. * questions.
*/ */
import sun.hotspot.WhiteBox; /*
* @test TestRangeCheck
class AllocateBeyondMetaspaceSize { * @bug 8054883
public static Object dummy; * @summary Tests that range check is not skipped
*/
public static void main(String [] args) { public class TestRangeCheck {
if (args.length != 2) { public static void main(String args[]) {
throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>"); try {
test();
throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown");
} catch (ArrayIndexOutOfBoundsException e) {
System.out.println("Expected ArrayIndexOutOfBoundsException was thrown");
}
} }
long metaspaceSize = Long.parseLong(args[0]); private static void test() {
long youngGenSize = Long.parseLong(args[1]); int arr[] = new int[1];
int result = 1;
run(metaspaceSize, youngGenSize);
}
private static void run(long metaspaceSize, long youngGenSize) {
WhiteBox wb = WhiteBox.getWhiteBox();
long allocationBeyondMetaspaceSize = metaspaceSize * 2;
long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
triggerYoungGC(youngGenSize);
wb.freeMetaspace(null, metaspace, metaspace);
}
private static void triggerYoungGC(long youngGenSize) { // provoke OSR compilation
long approxAllocSize = 32 * 1024; for (int i = 0; i < Integer.MAX_VALUE; i++) {
long numAllocations = 2 * youngGenSize / approxAllocSize; }
for (long i = 0; i < numAllocations; i++) { if (result > 0 && arr[~result] > 0) {
dummy = new byte[(int)approxAllocSize]; arr[~result] = 0;
}
} }
}
} }
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* @key gc * @key gc
* @bug 8049831 * @bug 8049831
* @library /testlibrary /testlibrary/whitebox * @library /testlibrary /testlibrary/whitebox
* @build TestCMSClassUnloadingEnabledHWM AllocateBeyondMetaspaceSize * @build TestCMSClassUnloadingEnabledHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestCMSClassUnloadingEnabledHWM * @run driver TestCMSClassUnloadingEnabledHWM
* @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated. * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
...@@ -34,9 +34,11 @@ ...@@ -34,9 +34,11 @@
import com.oracle.java.testlibrary.OutputAnalyzer; import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools; import com.oracle.java.testlibrary.ProcessTools;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import sun.hotspot.WhiteBox;
public class TestCMSClassUnloadingEnabledHWM { public class TestCMSClassUnloadingEnabledHWM {
private static long MetaspaceSize = 32 * 1024 * 1024; private static long MetaspaceSize = 32 * 1024 * 1024;
...@@ -47,15 +49,18 @@ public class TestCMSClassUnloadingEnabledHWM { ...@@ -47,15 +49,18 @@ public class TestCMSClassUnloadingEnabledHWM {
"-Xbootclasspath/a:.", "-Xbootclasspath/a:.",
"-XX:+UnlockDiagnosticVMOptions", "-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI", "-XX:+WhiteBoxAPI",
"-Xmx128m",
"-XX:CMSMaxAbortablePrecleanTime=1",
"-XX:CMSWaitDuration=50",
"-XX:MetaspaceSize=" + MetaspaceSize, "-XX:MetaspaceSize=" + MetaspaceSize,
"-Xmn" + YoungGenSize, "-Xmn" + YoungGenSize,
"-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC",
"-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled", "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
"-XX:+PrintHeapAtGC", "-XX:+PrintHeapAtGC",
"-XX:+PrintGCDetails", "-XX:+PrintGCDetails",
"AllocateBeyondMetaspaceSize", "-XX:+PrintGCTimeStamps",
"" + MetaspaceSize, TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
"" + YoungGenSize); "" + MetaspaceSize);
return new OutputAnalyzer(pb.start()); return new OutputAnalyzer(pb.start());
} }
...@@ -87,5 +92,37 @@ public class TestCMSClassUnloadingEnabledHWM { ...@@ -87,5 +92,37 @@ public class TestCMSClassUnloadingEnabledHWM {
testWithCMSClassUnloading(); testWithCMSClassUnloading();
testWithoutCMSClassUnloading(); testWithoutCMSClassUnloading();
} }
public static class AllocateBeyondMetaspaceSize {
public static void main(String [] args) throws Exception {
if (args.length != 1) {
throw new IllegalArgumentException("Usage: <MetaspaceSize>");
}
WhiteBox wb = WhiteBox.getWhiteBox();
// Allocate past the MetaspaceSize limit.
long metaspaceSize = Long.parseLong(args[0]);
long allocationBeyondMetaspaceSize = metaspaceSize * 2;
long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
// Wait for at least one GC to occur. The caller will parse the log files produced.
GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
while (cmsGCBean.getCollectionCount() == 0) {
Thread.sleep(100);
}
wb.freeMetaspace(null, metaspace, metaspace);
}
private static GarbageCollectorMXBean getCMSGCBean() {
for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
return gcBean;
}
}
return null;
}
}
} }
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* @key gc * @key gc
* @bug 8049831 * @bug 8049831
* @library /testlibrary /testlibrary/whitebox * @library /testlibrary /testlibrary/whitebox
* @build TestG1ClassUnloadingHWM AllocateBeyondMetaspaceSize * @build TestG1ClassUnloadingHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestG1ClassUnloadingHWM * @run driver TestG1ClassUnloadingHWM
* @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated. * @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated.
...@@ -34,9 +34,9 @@ ...@@ -34,9 +34,9 @@
import com.oracle.java.testlibrary.OutputAnalyzer; import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools; import com.oracle.java.testlibrary.ProcessTools;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import sun.hotspot.WhiteBox;
public class TestG1ClassUnloadingHWM { public class TestG1ClassUnloadingHWM {
private static long MetaspaceSize = 32 * 1024 * 1024; private static long MetaspaceSize = 32 * 1024 * 1024;
...@@ -53,7 +53,7 @@ public class TestG1ClassUnloadingHWM { ...@@ -53,7 +53,7 @@ public class TestG1ClassUnloadingHWM {
"-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark", "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
"-XX:+PrintHeapAtGC", "-XX:+PrintHeapAtGC",
"-XX:+PrintGCDetails", "-XX:+PrintGCDetails",
"AllocateBeyondMetaspaceSize", TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(),
"" + MetaspaceSize, "" + MetaspaceSize,
"" + YoungGenSize); "" + YoungGenSize);
return new OutputAnalyzer(pb.start()); return new OutputAnalyzer(pb.start());
...@@ -87,5 +87,36 @@ public class TestG1ClassUnloadingHWM { ...@@ -87,5 +87,36 @@ public class TestG1ClassUnloadingHWM {
testWithG1ClassUnloading(); testWithG1ClassUnloading();
testWithoutG1ClassUnloading(); testWithoutG1ClassUnloading();
} }
public static class AllocateBeyondMetaspaceSize {
public static Object dummy;
public static void main(String [] args) throws Exception {
if (args.length != 2) {
throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
}
WhiteBox wb = WhiteBox.getWhiteBox();
// Allocate past the MetaspaceSize limit
long metaspaceSize = Long.parseLong(args[0]);
long allocationBeyondMetaspaceSize = metaspaceSize * 2;
long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
long youngGenSize = Long.parseLong(args[1]);
triggerYoungGCs(youngGenSize);
wb.freeMetaspace(null, metaspace, metaspace);
}
public static void triggerYoungGCs(long youngGenSize) {
long approxAllocSize = 32 * 1024;
long numAllocations = 2 * youngGenSize / approxAllocSize;
for (long i = 0; i < numAllocations; i++) {
dummy = new byte[(int)approxAllocSize];
}
}
}
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册