diff --git a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp index cbf9e8e7fd34b695d3a8af7e612a34b0ea9aaec9..cdc13041a5d20a062a0e6fcfe39132510cc60a43 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp @@ -187,19 +187,23 @@ public: size_t code_root_elems() const { return _code_root_elems; } void print_rs_mem_info_on(outputStream * out, size_t total) { - out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions", + round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name); } void print_cards_occupied_info_on(outputStream * out, size_t total) { - out->print_cr(" %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) entries by "SIZE_FORMAT" %s regions", + cards_occupied(), cards_occupied_percent_of(total), amount(), _name); } void print_code_root_mem_info_on(outputStream * out, size_t total) { - out->print_cr(" %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)"K (%5.1f%%) by "SIZE_FORMAT" %s regions", + round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name); } void print_code_root_elems_info_on(outputStream * out, size_t total) { - out->print_cr(" %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name); + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) elements by "SIZE_FORMAT" %s regions", + code_root_elems(), code_root_elems_percent_of(total), amount(), _name); } }; @@ -327,14 +331,14 @@ void G1RemSetSummary::print_on(outputStream* out) { out->print_cr("\n Recent concurrent refinement statistics"); out->print_cr(" Processed "SIZE_FORMAT" cards", num_concurrent_refined_cards()); - out->print_cr(" Of %d completed buffers:", num_processed_buf_total()); - out->print_cr(" %8d (%5.1f%%) by concurrent RS threads.", + out->print_cr(" Of "SIZE_FORMAT" completed buffers:", num_processed_buf_total()); + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) by concurrent RS threads.", num_processed_buf_total(), percent_of(num_processed_buf_rs_threads(), num_processed_buf_total())); - out->print_cr(" %8d (%5.1f%%) by mutator threads.", + out->print_cr(" "SIZE_FORMAT_W(8)" (%5.1f%%) by mutator threads.", num_processed_buf_mutator(), percent_of(num_processed_buf_mutator(), num_processed_buf_total())); - out->print_cr(" Did %d coarsenings.", num_coarsenings()); + out->print_cr(" Did "SIZE_FORMAT" coarsenings.", num_coarsenings()); out->print_cr(" Concurrent RS threads times (s)"); out->print(" "); for (uint i = 0; i < _num_vtimes; i++) { diff --git a/src/share/vm/memory/metaspace.cpp b/src/share/vm/memory/metaspace.cpp index 9b321de4c442d1499e0f7f2f2a5642eea89b2913..1877967408d739f4ed021864f1d0a8065f08d81e 100644 --- a/src/share/vm/memory/metaspace.cpp +++ b/src/share/vm/memory/metaspace.cpp @@ -75,8 +75,7 @@ enum ChunkSizes { // in words. ClassSmallChunk = 256, SmallChunk = 512, ClassMediumChunk = 4 * K, - MediumChunk = 8 * K, - HumongousChunkGranularity = 8 + MediumChunk = 8 * K }; static ChunkIndex next_chunk_index(ChunkIndex i) { @@ -92,6 +91,7 @@ typedef class FreeList ChunkList; // Manages the global free lists of chunks. class ChunkManager : public CHeapObj { + friend class TestVirtualSpaceNodeTest; // Free list of chunks of different sizes. // SpecializedChunk @@ -257,6 +257,8 @@ class VirtualSpaceNode : public CHeapObj { // VirtualSpace Metachunk* first_chunk() { return (Metachunk*) bottom(); } + // Committed but unused space in the virtual space + size_t free_words_in_vs() const; public: VirtualSpaceNode(size_t byte_size); @@ -301,7 +303,6 @@ class VirtualSpaceNode : public CHeapObj { // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; - size_t free_words_in_vs() const; bool initialize(); @@ -319,6 +320,13 @@ class VirtualSpaceNode : public CHeapObj { // in the node from any freelist. void purge(ChunkManager* chunk_manager); + // If an allocation doesn't fit in the current node a new node is created. + // Allocate chunks out of the remaining committed space in this node + // to avoid wasting that memory. + // This always adds up because all the chunk sizes are multiples of + // the smallest chunk size. + void retire(ChunkManager* chunk_manager); + #ifdef ASSERT // Debug support void mangle(); @@ -461,6 +469,10 @@ class VirtualSpaceList : public CHeapObj { // and is typically followed by the allocation of a chunk. bool create_new_virtual_space(size_t vs_word_size); + // Chunk up the unused committed space in the current + // virtual space and add the chunks to the free list. + void retire_current_virtual_space(); + public: VirtualSpaceList(size_t word_size); VirtualSpaceList(ReservedSpace rs); @@ -624,10 +636,12 @@ class SpaceManager : public CHeapObj { bool is_class() { return _mdtype == Metaspace::ClassType; } // Accessors - size_t specialized_chunk_size() { return SpecializedChunk; } - size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } - size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } - size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } + size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } + size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } + size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } + size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } + + size_t smallest_chunk_size() { return specialized_chunk_size(); } size_t allocated_blocks_words() const { return _allocated_blocks_words; } size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } @@ -1056,6 +1070,35 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) { #endif } +void VirtualSpaceList::retire_current_virtual_space() { + assert_lock_strong(SpaceManager::expand_lock()); + + VirtualSpaceNode* vsn = current_virtual_space(); + + ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : + Metaspace::chunk_manager_metadata(); + + vsn->retire(cm); +} + +void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { + for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { + ChunkIndex index = (ChunkIndex)i; + size_t chunk_size = chunk_manager->free_chunks(index)->size(); + + while (free_words_in_vs() >= chunk_size) { + DEBUG_ONLY(verify_container_count();) + Metachunk* chunk = get_chunk_vs(chunk_size); + assert(chunk != NULL, "allocation should have been successful"); + + chunk_manager->return_chunks(index, chunk); + chunk_manager->inc_free_chunks_total(chunk_size); + DEBUG_ONLY(verify_container_count();) + } + } + assert(free_words_in_vs() == 0, "should be empty now"); +} + VirtualSpaceList::VirtualSpaceList(size_t word_size) : _is_class(false), _virtual_space_list(NULL), @@ -1181,6 +1224,7 @@ bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { if (vs_expanded) { return true; } + retire_current_virtual_space(); // Get another virtual space. size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); @@ -1902,12 +1946,12 @@ size_t SpaceManager::calc_chunk_size(size_t word_size) { chunk_word_size = medium_chunk_size(); } - // Might still need a humongous chunk. Enforce an - // eight word granularity to facilitate reuse (some - // wastage but better chance of reuse). + // Might still need a humongous chunk. Enforce + // humongous allocations sizes to be aligned up to + // the smallest chunk size. size_t if_humongous_sized_chunk = align_size_up(word_size + Metachunk::overhead(), - HumongousChunkGranularity); + smallest_chunk_size()); chunk_word_size = MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); @@ -2151,10 +2195,10 @@ SpaceManager::~SpaceManager() { } assert(humongous_chunks->word_size() == (size_t) align_size_up(humongous_chunks->word_size(), - HumongousChunkGranularity), + smallest_chunk_size()), err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT " granularity %d", - humongous_chunks->word_size(), HumongousChunkGranularity)); + humongous_chunks->word_size(), smallest_chunk_size())); Metachunk* next_humongous_chunks = humongous_chunks->next(); humongous_chunks->container()->dec_container_count(); chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); @@ -3301,9 +3345,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, } if (result == NULL) { - report_metadata_oome(loader_data, word_size, mdtype, THREAD); - // Will not reach here. - return NULL; + report_metadata_oome(loader_data, word_size, mdtype, CHECK_NULL); } // Zero initialize. @@ -3494,4 +3536,94 @@ void TestMetaspaceAux_test() { TestMetaspaceAuxTest::test(); } +class TestVirtualSpaceNodeTest { + static void chunk_up(size_t words_left, size_t& num_medium_chunks, + size_t& num_small_chunks, + size_t& num_specialized_chunks) { + num_medium_chunks = words_left / MediumChunk; + words_left = words_left % MediumChunk; + + num_small_chunks = words_left / SmallChunk; + words_left = words_left % SmallChunk; + // how many specialized chunks can we get? + num_specialized_chunks = words_left / SpecializedChunk; + assert(words_left % SpecializedChunk == 0, "should be nothing left"); + } + + public: + static void test() { + MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); + const size_t vsn_test_size_words = MediumChunk * 4; + const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; + + // The chunk sizes must be multiples of eachother, or this will fail + STATIC_ASSERT(MediumChunk % SmallChunk == 0); + STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); + + { // No committed memory in VSN + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.retire(&cm); + assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); + } + + { // All of VSN is committed, half is used by chunks + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.expand_by(vsn_test_size_words, vsn_test_size_words); + vsn.get_chunk_vs(MediumChunk); + vsn.get_chunk_vs(MediumChunk); + vsn.retire(&cm); + assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); + assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); + } + + { // 4 pages of VSN is committed, some is used by chunks + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; + assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size"); + vsn.initialize(); + vsn.expand_by(page_chunks, page_chunks); + vsn.get_chunk_vs(SmallChunk); + vsn.get_chunk_vs(SpecializedChunk); + vsn.retire(&cm); + + // committed - used = words left to retire + const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; + + size_t num_medium_chunks, num_small_chunks, num_spec_chunks; + chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); + + assert(num_medium_chunks == 0, "should not get any medium chunks"); + assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); + assert(cm.sum_free_chunks() == words_left, "sizes should add up"); + } + + { // Half of VSN is committed, a humongous chunk is used + ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); + VirtualSpaceNode vsn(vsn_test_size_bytes); + vsn.initialize(); + vsn.expand_by(MediumChunk * 2, MediumChunk * 2); + vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk + vsn.retire(&cm); + + const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); + size_t num_medium_chunks, num_small_chunks, num_spec_chunks; + chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); + + assert(num_medium_chunks == 0, "should not get any medium chunks"); + assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); + assert(cm.sum_free_chunks() == words_left, "sizes should add up"); + } + + } +}; + +void TestVirtualSpaceNode_test() { + TestVirtualSpaceNodeTest::test(); +} + #endif diff --git a/src/share/vm/prims/jni.cpp b/src/share/vm/prims/jni.cpp index b5c271897e4bfa1afde1db4bbe49c98f2cd647e3..ab24446baf19056fb1a80dddaa3d475e85ced385 100644 --- a/src/share/vm/prims/jni.cpp +++ b/src/share/vm/prims/jni.cpp @@ -5060,6 +5060,7 @@ void TestReserveMemorySpecial_test(); void TestVirtualSpace_test(); void TestMetaspaceAux_test(); void TestMetachunk_test(); +void TestVirtualSpaceNode_test(); #if INCLUDE_ALL_GCS void TestG1BiasedArray_test(); #endif @@ -5072,6 +5073,7 @@ void execute_internal_vm_tests() { run_unit_test(TestVirtualSpace_test()); run_unit_test(TestMetaspaceAux_test()); run_unit_test(TestMetachunk_test()); + run_unit_test(TestVirtualSpaceNode_test()); run_unit_test(GlobalDefinitions::test_globals()); run_unit_test(GCTimerAllTest::all()); run_unit_test(arrayOopDesc::test_max_array_length()); diff --git a/src/share/vm/prims/jvmtiImpl.cpp b/src/share/vm/prims/jvmtiImpl.cpp index 0fcd1ba94ed07337e467cd6c505230437af3c7e3..5d560f56a2d5365760af3b78606afc0099217b9f 100644 --- a/src/share/vm/prims/jvmtiImpl.cpp +++ b/src/share/vm/prims/jvmtiImpl.cpp @@ -225,18 +225,20 @@ JvmtiBreakpoint::JvmtiBreakpoint() { _method = NULL; _bci = 0; _class_loader = NULL; -#ifdef CHECK_UNHANDLED_OOPS - // This one is always allocated with new, but check it just in case. - Thread *thread = Thread::current(); - if (thread->is_in_stack((address)&_method)) { - thread->allow_unhandled_oop((oop*)&_method); - } -#endif // CHECK_UNHANDLED_OOPS } JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location) { _method = m_method; _class_loader = _method->method_holder()->class_loader_data()->class_loader(); +#ifdef CHECK_UNHANDLED_OOPS + // _class_loader can't be wrapped in a Handle, because JvmtiBreakpoint:s are + // eventually allocated on the heap. + // + // The code handling JvmtiBreakpoint:s allocated on the stack can't be + // interrupted by a GC until _class_loader is reachable by the GC via the + // oops_do method. + Thread::current()->allow_unhandled_oop(&_class_loader); +#endif // CHECK_UNHANDLED_OOPS assert(_method != NULL, "_method != NULL"); _bci = (int) location; assert(_bci >= 0, "_bci >= 0"); diff --git a/test/gc/TestSystemGC.java b/test/gc/TestSystemGC.java new file mode 100644 index 0000000000000000000000000000000000000000..b882f9fc72d12beb5b8f03ea7c17b34e639c2beb --- /dev/null +++ b/test/gc/TestSystemGC.java @@ -0,0 +1,46 @@ +/* +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +/* + * @test TestSystemGC + * @key gc + * @summary Runs System.gc() with different flags. + * @run main/othervm TestSystemGC + * @run main/othervm -XX:+UseSerialGC TestSystemGC + * @run main/othervm -XX:+UseParNewGC TestSystemGC + * @run main/othervm -XX:+UseParallelGC TestSystemGC + * @run main/othervm -XX:+UseParallelGC -XX:-UseParallelOldGC TestSystemGC + * @run main/othervm -XX:+UseConcMarkSweepGC TestSystemGC + * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC + * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:-UseParNewGC TestSystemGC + * @run main/othervm -XX:+UseG1GC TestSystemGC + * @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC + * @run main/othervm -XX:+UseLargePages TestSystemGC + * @run main/othervm -XX:+UseLargePages -XX:+UseLargePagesInMetaspace TestSystemGC + */ + +public class TestSystemGC { + public static void main(String args[]) throws Exception { + System.gc(); + } +}