diff --git a/.hgtags b/.hgtags index 719df779181fc5840fe45764de7b8c3ec9602e31..cb2dd3317361eaa8bb699bbdbdc6bb98cf255b86 100644 --- a/.hgtags +++ b/.hgtags @@ -524,6 +524,7 @@ b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07 905a16825d2931345a7d6dba9e427f98eb51761a jdk8u40-b08 d96716f6cbba9f000dfb1da39d2b81264f4cdea7 hs25.40-b13 7ff8d51e0d8fc71f3ad31fd15817083341416ca8 jdk8u40-b09 +e193bbae24effeaf476f688d8d840787db53d74e hs25.40-b14 a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00 9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01 d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02 diff --git a/make/hotspot_version b/make/hotspot_version index 0bf2de529c3aee7818fd80aadaaec61dcf535410..4aa5395277cf99d969a31ee83783b5377cf56904 100644 --- a/make/hotspot_version +++ b/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014 HS_MAJOR_VER=25 HS_MINOR_VER=40 -HS_BUILD_NUMBER=13 +HS_BUILD_NUMBER=14 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp index aefc1387b36ef0aa68309e5af12c13fe8f6b5cd8..22a018b931433a5dd1f1412520bb264dd81086c6 100644 --- a/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/src/share/vm/c1/c1_LIRGenerator.cpp @@ -2066,14 +2066,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { LIR_Opr base_op = base.result(); LIR_Opr index_op = idx.result(); #ifndef _LP64 - if (x->base()->type()->tag() == longTag) { + if (base_op->type() == T_LONG) { base_op = new_register(T_INT); __ convert(Bytecodes::_l2i, base.result(), base_op); } if (x->has_index()) { - if (x->index()->type()->tag() == longTag) { + if (index_op->type() == T_LONG) { LIR_Opr long_index_op = index_op; - if (x->index()->type()->is_constant()) { + if (index_op->is_constant()) { long_index_op = new_register(T_LONG); __ move(index_op, long_index_op); } @@ -2088,14 +2088,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { assert(!x->has_index() || index_op->type() == T_INT, "index should be an int"); #else if (x->has_index()) { - if (x->index()->type()->tag() == intTag) { - if (!x->index()->type()->is_constant()) { + if (index_op->type() == T_INT) { + if (!index_op->is_constant()) { index_op = new_register(T_LONG); __ convert(Bytecodes::_i2l, idx.result(), index_op); } } else { - assert(x->index()->type()->tag() == longTag, "must be"); - if (x->index()->type()->is_constant()) { + assert(index_op->type() == T_LONG, "must be"); + if (index_op->is_constant()) { index_op = new_register(T_LONG); __ move(idx.result(), index_op); } @@ -2176,12 +2176,12 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { LIR_Opr index_op = idx.result(); #ifndef _LP64 - if (x->base()->type()->tag() == longTag) { + if (base_op->type() == T_LONG) { base_op = new_register(T_INT); __ convert(Bytecodes::_l2i, base.result(), base_op); } if (x->has_index()) { - if (x->index()->type()->tag() == longTag) { + if (index_op->type() == T_LONG) { index_op = new_register(T_INT); __ convert(Bytecodes::_l2i, idx.result(), index_op); } @@ -2191,7 +2191,7 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int"); #else if (x->has_index()) { - if (x->index()->type()->tag() == intTag) { + if (index_op->type() == T_INT) { index_op = new_register(T_LONG); __ convert(Bytecodes::_i2l, idx.result(), index_op); } diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp index f153ad7aa5103872b71810bb3e5763da9020213c..50528ac08af00fb54aa7e2474db0a2313804c059 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @@ -2733,10 +2733,12 @@ void CFLS_LAB::retire(int tid) { } } -void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList* fl) { - assert(fl->count() == 0, "Precondition."); - assert(word_sz < CompactibleFreeListSpace::IndexSetSize, - "Precondition"); +// Used by par_get_chunk_of_blocks() for the chunks from the +// indexed_free_lists. Looks for a chunk with size that is a multiple +// of "word_sz" and if found, splits it into "word_sz" chunks and add +// to the free list "fl". "n" is the maximum number of chunks to +// be added to "fl". +bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList* fl) { // We'll try all multiples of word_sz in the indexed set, starting with // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, @@ -2817,11 +2819,15 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n Mutex::_no_safepoint_check_flag); ssize_t births = _indexedFreeList[word_sz].split_births() + num; _indexedFreeList[word_sz].set_split_births(births); - return; + return true; } } + return found; } - // Otherwise, we'll split a block from the dictionary. +} + +FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) { + FreeChunk* fc = NULL; FreeChunk* rem_fc = NULL; size_t rem; @@ -2832,16 +2838,12 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()), FreeBlockDictionary::atLeast); if (fc != NULL) { - _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk - dictionary()->dict_census_update(fc->size(), - true /*split*/, - false /*birth*/); break; } else { n--; } } - if (fc == NULL) return; + if (fc == NULL) return NULL; // Otherwise, split up that block. assert((ssize_t)n >= 1, "Control point invariant"); assert(fc->is_free(), "Error: should be a free block"); @@ -2863,10 +2865,14 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n // dictionary and return, leaving "fl" empty. if (n == 0) { returnChunkToDictionary(fc); - assert(fl->count() == 0, "We never allocated any blocks"); - return; + return NULL; } + _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk + dictionary()->dict_census_update(fc->size(), + true /*split*/, + false /*birth*/); + // First return the remainder, if any. // Note that we hold the lock until we decide if we're going to give // back the remainder to the dictionary, since a concurrent allocation @@ -2899,7 +2905,24 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n _indexedFreeList[rem].return_chunk_at_head(rem_fc); smallSplitBirth(rem); } - assert((ssize_t)n > 0 && fc != NULL, "Consistency"); + assert(n * word_sz == fc->size(), + err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by " + SIZE_FORMAT " sized chunks of size " SIZE_FORMAT, + fc->size(), n, word_sz)); + return fc; +} + +void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList* fl) { + + FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks); + + if (fc == NULL) { + return; + } + + size_t n = fc->size() / word_sz; + + assert((ssize_t)n > 0, "Consistency"); // Now do the splitting up. // Must do this in reverse order, so that anybody attempting to // access the main chunk sees it as a single free block until we @@ -2947,6 +2970,20 @@ void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n assert(fl->tail()->next() == NULL, "List invariant."); } +void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList* fl) { + assert(fl->count() == 0, "Precondition."); + assert(word_sz < CompactibleFreeListSpace::IndexSetSize, + "Precondition"); + + if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) { + // Got it + return; + } + + // Otherwise, we'll split a block from the dictionary. + par_get_chunk_of_blocks_dictionary(word_sz, n, fl); +} + // Set up the space's par_seq_tasks structure for work claiming // for parallel rescan. See CMSParRemarkTask where this is currently used. // XXX Need to suitably abstract and generalize this and the next diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp index e79ba698a1df51f7870fecec71a5b20f2b265648..7269068f630ef6c7d52771caf158e27e7b19ff67 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @@ -172,6 +172,20 @@ class CompactibleFreeListSpace: public CompactibleSpace { // list of size "word_sz", and must now be decremented. void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList* fl); + // Used by par_get_chunk_of_blocks() for the chunks from the + // indexed_free_lists. + bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList* fl); + + // Used by par_get_chunk_of_blocks_dictionary() to get a chunk + // evenly splittable into "n" "word_sz" chunks. Returns that + // evenly splittable chunk. May split a larger chunk to get the + // evenly splittable chunk. + FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); + + // Used by par_get_chunk_of_blocks() for the chunks from the + // dictionary. + void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList* fl); + // Allocation helper functions // Allocate using a strategy that takes from the indexed free lists // first. This allocation strategy assumes a companion sweeping diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 2fa28d9f3cb306d92ceb5ae1002574243d0d54c0..c4f97d8422697e13cc015eb98558491b28372aba 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -2343,6 +2343,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { case GCCause::_gc_locker: return GCLockerInvokesConcurrent; case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent; case GCCause::_g1_humongous_allocation: return true; + case GCCause::_update_allocation_context_stats_inc: return true; default: return false; } } diff --git a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp index c3847e0da4e63dc1d668a95f2b37a7f338789e4e..9b50ae6af53ee223dbdf2261aa3bdafa08e295f7 100644 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp @@ -95,8 +95,9 @@ void VM_G1IncCollectionPause::doit() { assert(!_should_initiate_conc_mark || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || - _gc_cause == GCCause::_g1_humongous_allocation), - "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle"); + _gc_cause == GCCause::_g1_humongous_allocation || + _gc_cause == GCCause::_update_allocation_context_stats_inc), + "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle"); if (_word_size > 0) { // An allocation has been requested. So, try to do that first. diff --git a/src/share/vm/gc_interface/gcCause.cpp b/src/share/vm/gc_interface/gcCause.cpp index ff9402507c5b9ad3cd5843150f2a41981e22160c..4778d8aa45a9ed1f3d5ba8b322394d590f297296 100644 --- a/src/share/vm/gc_interface/gcCause.cpp +++ b/src/share/vm/gc_interface/gcCause.cpp @@ -54,7 +54,8 @@ const char* GCCause::to_string(GCCause::Cause cause) { case _wb_young_gc: return "WhiteBox Initiated Young GC"; - case _update_allocation_context_stats: + case _update_allocation_context_stats_inc: + case _update_allocation_context_stats_full: return "Update Allocation Context Stats"; case _no_gc: diff --git a/src/share/vm/gc_interface/gcCause.hpp b/src/share/vm/gc_interface/gcCause.hpp index 3c7b6bc72aefaaccedc1ae81d4487b34dd767653..cb8dddd5ce5a2bd758586328684117147a79b9fc 100644 --- a/src/share/vm/gc_interface/gcCause.hpp +++ b/src/share/vm/gc_interface/gcCause.hpp @@ -47,7 +47,8 @@ class GCCause : public AllStatic { _heap_inspection, _heap_dump, _wb_young_gc, - _update_allocation_context_stats, + _update_allocation_context_stats_inc, + _update_allocation_context_stats_full, /* implementation independent, but reserved for GC use */ _no_gc, diff --git a/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java b/test/compiler/osr/TestRangeCheck.java similarity index 51% rename from test/gc/class_unloading/AllocateBeyondMetaspaceSize.java rename to test/compiler/osr/TestRangeCheck.java index 4998fa8e562632e3ca7bdb0853a01297878e3881..6079cb9ba4f20079471832c35bd53b651fa15755 100644 --- a/test/gc/class_unloading/AllocateBeyondMetaspaceSize.java +++ b/test/compiler/osr/TestRangeCheck.java @@ -21,39 +21,32 @@ * questions. */ -import sun.hotspot.WhiteBox; - -class AllocateBeyondMetaspaceSize { - public static Object dummy; +/* + * @test TestRangeCheck + * @bug 8054883 + * @summary Tests that range check is not skipped + */ - public static void main(String [] args) { - if (args.length != 2) { - throw new IllegalArgumentException("Usage: "); +public class TestRangeCheck { + public static void main(String args[]) { + try { + test(); + throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown"); + } catch (ArrayIndexOutOfBoundsException e) { + System.out.println("Expected ArrayIndexOutOfBoundsException was thrown"); + } } - long metaspaceSize = Long.parseLong(args[0]); - long youngGenSize = Long.parseLong(args[1]); - - run(metaspaceSize, youngGenSize); - } - - private static void run(long metaspaceSize, long youngGenSize) { - WhiteBox wb = WhiteBox.getWhiteBox(); - - long allocationBeyondMetaspaceSize = metaspaceSize * 2; - long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize); - - triggerYoungGC(youngGenSize); - - wb.freeMetaspace(null, metaspace, metaspace); - } + private static void test() { + int arr[] = new int[1]; + int result = 1; - private static void triggerYoungGC(long youngGenSize) { - long approxAllocSize = 32 * 1024; - long numAllocations = 2 * youngGenSize / approxAllocSize; + // provoke OSR compilation + for (int i = 0; i < Integer.MAX_VALUE; i++) { + } - for (long i = 0; i < numAllocations; i++) { - dummy = new byte[(int)approxAllocSize]; + if (result > 0 && arr[~result] > 0) { + arr[~result] = 0; + } } - } } diff --git a/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java b/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java index 906ac0ff3c4984637628f9699272bf59e67cfa46..d832d69a7cceac691bb2eb9bca6be2378fcc370d 100644 --- a/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java +++ b/test/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java @@ -26,7 +26,7 @@ * @key gc * @bug 8049831 * @library /testlibrary /testlibrary/whitebox - * @build TestCMSClassUnloadingEnabledHWM AllocateBeyondMetaspaceSize + * @build TestCMSClassUnloadingEnabledHWM * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run driver TestCMSClassUnloadingEnabledHWM * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated. @@ -34,9 +34,11 @@ import com.oracle.java.testlibrary.OutputAnalyzer; import com.oracle.java.testlibrary.ProcessTools; - +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.Arrays; +import sun.hotspot.WhiteBox; public class TestCMSClassUnloadingEnabledHWM { private static long MetaspaceSize = 32 * 1024 * 1024; @@ -47,15 +49,18 @@ public class TestCMSClassUnloadingEnabledHWM { "-Xbootclasspath/a:.", "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI", + "-Xmx128m", + "-XX:CMSMaxAbortablePrecleanTime=1", + "-XX:CMSWaitDuration=50", "-XX:MetaspaceSize=" + MetaspaceSize, "-Xmn" + YoungGenSize, "-XX:+UseConcMarkSweepGC", "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled", "-XX:+PrintHeapAtGC", "-XX:+PrintGCDetails", - "AllocateBeyondMetaspaceSize", - "" + MetaspaceSize, - "" + YoungGenSize); + "-XX:+PrintGCTimeStamps", + TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(), + "" + MetaspaceSize); return new OutputAnalyzer(pb.start()); } @@ -87,5 +92,37 @@ public class TestCMSClassUnloadingEnabledHWM { testWithCMSClassUnloading(); testWithoutCMSClassUnloading(); } + + public static class AllocateBeyondMetaspaceSize { + public static void main(String [] args) throws Exception { + if (args.length != 1) { + throw new IllegalArgumentException("Usage: "); + } + + WhiteBox wb = WhiteBox.getWhiteBox(); + + // Allocate past the MetaspaceSize limit. + long metaspaceSize = Long.parseLong(args[0]); + long allocationBeyondMetaspaceSize = metaspaceSize * 2; + long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize); + + // Wait for at least one GC to occur. The caller will parse the log files produced. + GarbageCollectorMXBean cmsGCBean = getCMSGCBean(); + while (cmsGCBean.getCollectionCount() == 0) { + Thread.sleep(100); + } + + wb.freeMetaspace(null, metaspace, metaspace); + } + + private static GarbageCollectorMXBean getCMSGCBean() { + for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) { + if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) { + return gcBean; + } + } + return null; + } + } } diff --git a/test/gc/class_unloading/TestG1ClassUnloadingHWM.java b/test/gc/class_unloading/TestG1ClassUnloadingHWM.java index f4c830ae2b5384cbd059cc7386f4468dcbdb4b5e..799211479b2e4d8f8ced6bc870b9c5c4ac9d9de2 100644 --- a/test/gc/class_unloading/TestG1ClassUnloadingHWM.java +++ b/test/gc/class_unloading/TestG1ClassUnloadingHWM.java @@ -26,7 +26,7 @@ * @key gc * @bug 8049831 * @library /testlibrary /testlibrary/whitebox - * @build TestG1ClassUnloadingHWM AllocateBeyondMetaspaceSize + * @build TestG1ClassUnloadingHWM * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run driver TestG1ClassUnloadingHWM * @summary Test that -XX:-ClassUnloadingWithConcurrentMark will trigger a Full GC when more than MetaspaceSize metadata is allocated. @@ -34,9 +34,9 @@ import com.oracle.java.testlibrary.OutputAnalyzer; import com.oracle.java.testlibrary.ProcessTools; - import java.util.ArrayList; import java.util.Arrays; +import sun.hotspot.WhiteBox; public class TestG1ClassUnloadingHWM { private static long MetaspaceSize = 32 * 1024 * 1024; @@ -53,7 +53,7 @@ public class TestG1ClassUnloadingHWM { "-XX:" + (enableUnloading ? "+" : "-") + "ClassUnloadingWithConcurrentMark", "-XX:+PrintHeapAtGC", "-XX:+PrintGCDetails", - "AllocateBeyondMetaspaceSize", + TestG1ClassUnloadingHWM.AllocateBeyondMetaspaceSize.class.getName(), "" + MetaspaceSize, "" + YoungGenSize); return new OutputAnalyzer(pb.start()); @@ -87,5 +87,36 @@ public class TestG1ClassUnloadingHWM { testWithG1ClassUnloading(); testWithoutG1ClassUnloading(); } + + public static class AllocateBeyondMetaspaceSize { + public static Object dummy; + + public static void main(String [] args) throws Exception { + if (args.length != 2) { + throw new IllegalArgumentException("Usage: "); + } + + WhiteBox wb = WhiteBox.getWhiteBox(); + + // Allocate past the MetaspaceSize limit + long metaspaceSize = Long.parseLong(args[0]); + long allocationBeyondMetaspaceSize = metaspaceSize * 2; + long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize); + + long youngGenSize = Long.parseLong(args[1]); + triggerYoungGCs(youngGenSize); + + wb.freeMetaspace(null, metaspace, metaspace); + } + + public static void triggerYoungGCs(long youngGenSize) { + long approxAllocSize = 32 * 1024; + long numAllocations = 2 * youngGenSize / approxAllocSize; + + for (long i = 0; i < numAllocations; i++) { + dummy = new byte[(int)approxAllocSize]; + } + } + } }