提交 f4d4ba98 编写于 作者: A asaha

Merge

......@@ -524,6 +524,7 @@ b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07
905a16825d2931345a7d6dba9e427f98eb51761a jdk8u40-b08
d96716f6cbba9f000dfb1da39d2b81264f4cdea7 hs25.40-b13
7ff8d51e0d8fc71f3ad31fd15817083341416ca8 jdk8u40-b09
e193bbae24effeaf476f688d8d840787db53d74e hs25.40-b14
a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00
9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01
d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02
......
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=13
HS_BUILD_NUMBER=14
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -2066,14 +2066,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
LIR_Opr base_op = base.result();
LIR_Opr index_op = idx.result();
#ifndef _LP64
if (x->base()->type()->tag() == longTag) {
if (base_op->type() == T_LONG) {
base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op);
}
if (x->has_index()) {
if (x->index()->type()->tag() == longTag) {
if (index_op->type() == T_LONG) {
LIR_Opr long_index_op = index_op;
if (x->index()->type()->is_constant()) {
if (index_op->is_constant()) {
long_index_op = new_register(T_LONG);
__ move(index_op, long_index_op);
}
......@@ -2088,14 +2088,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
#else
if (x->has_index()) {
if (x->index()->type()->tag() == intTag) {
if (!x->index()->type()->is_constant()) {
if (index_op->type() == T_INT) {
if (!index_op->is_constant()) {
index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op);
}
} else {
assert(x->index()->type()->tag() == longTag, "must be");
if (x->index()->type()->is_constant()) {
assert(index_op->type() == T_LONG, "must be");
if (index_op->is_constant()) {
index_op = new_register(T_LONG);
__ move(idx.result(), index_op);
}
......@@ -2176,12 +2176,12 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
LIR_Opr index_op = idx.result();
#ifndef _LP64
if (x->base()->type()->tag() == longTag) {
if (base_op->type() == T_LONG) {
base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op);
}
if (x->has_index()) {
if (x->index()->type()->tag() == longTag) {
if (index_op->type() == T_LONG) {
index_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, idx.result(), index_op);
}
......@@ -2191,7 +2191,7 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
#else
if (x->has_index()) {
if (x->index()->type()->tag() == intTag) {
if (index_op->type() == T_INT) {
index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op);
}
......
......@@ -2733,10 +2733,12 @@ void CFLS_LAB::retire(int tid) {
}
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
// Used by par_get_chunk_of_blocks() for the chunks from the
// indexed_free_lists. Looks for a chunk with size that is a multiple
// of "word_sz" and if found, splits it into "word_sz" chunks and add
// to the free list "fl". "n" is the maximum number of chunks to
// be added to "fl".
bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
// We'll try all multiples of word_sz in the indexed set, starting with
// word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
......@@ -2817,11 +2819,15 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
Mutex::_no_safepoint_check_flag);
ssize_t births = _indexedFreeList[word_sz].split_births() + num;
_indexedFreeList[word_sz].set_split_births(births);
return;
return true;
}
}
return found;
}
// Otherwise, we'll split a block from the dictionary.
}
FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
FreeChunk* fc = NULL;
FreeChunk* rem_fc = NULL;
size_t rem;
......@@ -2832,16 +2838,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
FreeBlockDictionary<FreeChunk>::atLeast);
if (fc != NULL) {
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_update(fc->size(),
true /*split*/,
false /*birth*/);
break;
} else {
n--;
}
}
if (fc == NULL) return;
if (fc == NULL) return NULL;
// Otherwise, split up that block.
assert((ssize_t)n >= 1, "Control point invariant");
assert(fc->is_free(), "Error: should be a free block");
......@@ -2863,10 +2865,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
// dictionary and return, leaving "fl" empty.
if (n == 0) {
returnChunkToDictionary(fc);
assert(fl->count() == 0, "We never allocated any blocks");
return;
return NULL;
}
_bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
dictionary()->dict_census_update(fc->size(),
true /*split*/,
false /*birth*/);
// First return the remainder, if any.
// Note that we hold the lock until we decide if we're going to give
// back the remainder to the dictionary, since a concurrent allocation
......@@ -2899,7 +2905,24 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
_indexedFreeList[rem].return_chunk_at_head(rem_fc);
smallSplitBirth(rem);
}
assert((ssize_t)n > 0 && fc != NULL, "Consistency");
assert(n * word_sz == fc->size(),
err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
fc->size(), n, word_sz));
return fc;
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
if (fc == NULL) {
return;
}
size_t n = fc->size() / word_sz;
assert((ssize_t)n > 0, "Consistency");
// Now do the splitting up.
// Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we
......@@ -2947,6 +2970,20 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n
assert(fl->tail()->next() == NULL, "List invariant.");
}
void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
// Got it
return;
}
// Otherwise, we'll split a block from the dictionary.
par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
}
// Set up the space's par_seq_tasks structure for work claiming
// for parallel rescan. See CMSParRemarkTask where this is currently used.
// XXX Need to suitably abstract and generalize this and the next
......
......@@ -172,6 +172,20 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// list of size "word_sz", and must now be decremented.
void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Used by par_get_chunk_of_blocks() for the chunks from the
// indexed_free_lists.
bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Used by par_get_chunk_of_blocks_dictionary() to get a chunk
// evenly splittable into "n" "word_sz" chunks. Returns that
// evenly splittable chunk. May split a larger chunk to get the
// evenly splittable chunk.
FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
// Used by par_get_chunk_of_blocks() for the chunks from the
// dictionary.
void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
// Allocation helper functions
// Allocate using a strategy that takes from the indexed free lists
// first. This allocation strategy assumes a companion sweeping
......
......@@ -2343,6 +2343,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
case GCCause::_g1_humongous_allocation: return true;
case GCCause::_update_allocation_context_stats_inc: return true;
default: return false;
}
}
......
......@@ -95,8 +95,9 @@ void VM_G1IncCollectionPause::doit() {
assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
_gc_cause == GCCause::_g1_humongous_allocation),
"only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
_gc_cause == GCCause::_g1_humongous_allocation ||
_gc_cause == GCCause::_update_allocation_context_stats_inc),
"only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
......
......@@ -54,7 +54,8 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _wb_young_gc:
return "WhiteBox Initiated Young GC";
case _update_allocation_context_stats:
case _update_allocation_context_stats_inc:
case _update_allocation_context_stats_full:
return "Update Allocation Context Stats";
case _no_gc:
......
......@@ -47,7 +47,8 @@ class GCCause : public AllStatic {
_heap_inspection,
_heap_dump,
_wb_young_gc,
_update_allocation_context_stats,
_update_allocation_context_stats_inc,
_update_allocation_context_stats_full,
/* implementation independent, but reserved for GC use */
_no_gc,
......
......@@ -21,39 +21,32 @@
* questions.
*/
import sun.hotspot.WhiteBox;
class AllocateBeyondMetaspaceSize {
public static Object dummy;
/*
* @test TestRangeCheck
* @bug 8054883
* @summary Tests that range check is not skipped
*/
public static void main(String [] args) {
if (args.length != 2) {
throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
public class TestRangeCheck {
public static void main(String args[]) {
try {
test();
throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown");
} catch (ArrayIndexOutOfBoundsException e) {
System.out.println("Expected ArrayIndexOutOfBoundsException was thrown");
}
}
long metaspaceSize = Long.parseLong(args[0]);
long youngGenSize = Long.parseLong(args[1]);
run(metaspaceSize, youngGenSize);
}
private static void run(long metaspaceSize, long youngGenSize) {
WhiteBox wb = WhiteBox.getWhiteBox();
long allocationBeyondMetaspaceSize = metaspaceSize * 2;
long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
triggerYoungGC(youngGenSize);
wb.freeMetaspace(null, metaspace, metaspace);
}
private static void test() {
int arr[] = new int[1];
int result = 1;
private static void triggerYoungGC(long youngGenSize) {
long approxAllocSize = 32 * 1024;
long numAllocations = 2 * youngGenSize / approxAllocSize;
// provoke OSR compilation
for (int i = 0; i < Integer.MAX_VALUE; i++) {
}
for (long i = 0; i < numAllocations; i++) {
dummy = new byte[(int)approxAllocSize];
if (result > 0 && arr[~result] > 0) {
arr[~result] = 0;
}
}
}
}
......@@ -26,7 +26,7 @@
* @key gc
* @bug 8049831
* @library /testlibrary /testlibrary/whitebox
* @build TestCMSClassUnloadingEnabledHWM AllocateBeyondMetaspaceSize
* @build TestCMSClassUnloadingEnabledHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestCMSClassUnloadingEnabledHWM
* @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
......@@ -34,9 +34,11 @@
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Arrays;
import sun.hotspot.WhiteBox;
public class TestCMSClassUnloadingEnabledHWM {
private static long MetaspaceSize = 32 * 1024 * 1024;
......@@ -47,15 +49,18 @@ public class TestCMSClassUnloadingEnabledHWM {
"-Xbootclasspath/a:.",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-Xmx128m",
"-XX:CMSMaxAbortablePrecleanTime=1",
"-XX:CMSWaitDuration=50",
"-XX:MetaspaceSize=" + MetaspaceSize,
"-Xmn" + YoungGenSize,
"-XX:+UseConcMarkSweepGC",
"-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
"-XX:+PrintHeapAtGC",
"-XX:+PrintGCDetails",
"AllocateBeyondMetaspaceSize",
"" + MetaspaceSize,
"" + YoungGenSize);
"-XX:+PrintGCTimeStamps",
TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
"" + MetaspaceSize);
return new OutputAnalyzer(pb.start());
}
......@@ -87,5 +92,37 @@ public class TestCMSClassUnloadingEnabledHWM {
testWithCMSClassUnloading();
testWithoutCMSClassUnloading();
}
public static class AllocateBeyondMetaspaceSize {
public static void main(String [] args) throws Exception {
if (args.length != 1) {
throw new IllegalArgumentException("Usage: <MetaspaceSize>");
}
WhiteBox wb = WhiteBox.getWhiteBox();
// Allocate past the MetaspaceSize limit.
long metaspaceSize = Long.parseLong(args[0]);
long allocationBeyondMetaspaceSize = metaspaceSize * 2;
long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
// Wait for at least one GC to occur. The caller will parse the log files produced.
GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
while (cmsGCBean.getCollectionCount() == 0) {
Thread.sleep(100);
}
wb.freeMetaspace(null, metaspace, metaspace);
}
private static GarbageCollectorMXBean getCMSGCBean() {
for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
return gcBean;
}
}
return null;
}
}
}
......@@ -26,7 +26,7 @@
* @key gc
* @bug 8049831
* @library /testlibrary /testlibrary/whitebox
* @build TestG1ClassUnloadingHWM AllocateBeyondMetaspaceSize
* @build TestG1ClassUnloadingHWM
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run driver TestG1ClassUnloadingHWM
* @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated.
......@@ -34,9 +34,9 @@
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.ProcessTools;
import java.util.ArrayList;
import java.util.Arrays;
import sun.hotspot.WhiteBox;
public class TestG1ClassUnloadingHWM {
private static long MetaspaceSize = 32 * 1024 * 1024;
......@@ -53,7 +53,7 @@ public class TestG1ClassUnloadingHWM {
"-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark",
"-XX:+PrintHeapAtGC",
"-XX:+PrintGCDetails",
"AllocateBeyondMetaspaceSize",
TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(),
"" + MetaspaceSize,
"" + YoungGenSize);
return new OutputAnalyzer(pb.start());
......@@ -87,5 +87,36 @@ public class TestG1ClassUnloadingHWM {
testWithG1ClassUnloading();
testWithoutG1ClassUnloading();
}
public static class AllocateBeyondMetaspaceSize {
public static Object dummy;
public static void main(String [] args) throws Exception {
if (args.length != 2) {
throw new IllegalArgumentException("Usage: <MetaspaceSize> <YoungGenSize>");
}
WhiteBox wb = WhiteBox.getWhiteBox();
// Allocate past the MetaspaceSize limit
long metaspaceSize = Long.parseLong(args[0]);
long allocationBeyondMetaspaceSize = metaspaceSize * 2;
long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
long youngGenSize = Long.parseLong(args[1]);
triggerYoungGCs(youngGenSize);
wb.freeMetaspace(null, metaspace, metaspace);
}
public static void triggerYoungGCs(long youngGenSize) {
long approxAllocSize = 32 * 1024;
long numAllocations = 2 * youngGenSize / approxAllocSize;
for (long i = 0; i < numAllocations; i++) {
dummy = new byte[(int)approxAllocSize];
}
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册