From a2031db949f8a42bdbc74a94aaf6e7982bcdc6c0 Mon Sep 17 00:00:00 2001 From: johnc Date: Wed, 2 Feb 2011 10:41:20 -0800 Subject: [PATCH] 6923430: G1: assert(res != 0,"This should have worked.") 7007446: G1: expand the heap with a single step, not one region at a time Summary: Changed G1CollectedHeap::expand() to expand the committed space by calling VirtualSpace::expand_by() once rather than for every region in the expansion amount. This allows the success or failure of the expansion to be determined before creating any heap regions. Introduced a develop flag G1ExitOnExpansionFailure (false by default) that, when true, will exit the VM if the expansion of the committed space fails. Finally G1CollectedHeap::expand() returns a status back to it's caller so that the caller knows whether to attempt the allocation. Reviewed-by: brutisso, tonyp --- .../g1/concurrentG1Refine.cpp | 4 +- .../gc_implementation/g1/g1CollectedHeap.cpp | 157 +++++++++++------- .../gc_implementation/g1/g1CollectedHeap.hpp | 7 +- .../g1/g1CollectorPolicy.cpp | 2 +- .../vm/gc_implementation/g1/g1RemSet.cpp | 4 +- .../vm/gc_implementation/g1/g1_globals.hpp | 8 +- 6 files changed, 114 insertions(+), 68 deletions(-) diff --git a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp index d26530323..c0d968aa8 100644 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,7 +99,7 @@ void ConcurrentG1Refine::init() { if (G1ConcRSLogCacheSize > 0) { _g1h = G1CollectedHeap::heap(); _max_n_card_counts = - (unsigned) (_g1h->g1_reserved_obj_bytes() >> CardTableModRefBS::card_shift); + (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift); size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1; guarantee(_max_n_card_counts < max_card_num, "card_num representation"); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 3d3522461..a4f01adf0 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -546,8 +546,11 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size, res = new_region_try_secondary_free_list(word_size); } if (res == NULL && do_expand) { - expand(word_size * HeapWordSize); - res = _free_list.remove_head_or_null(); + if (expand(word_size * HeapWordSize)) { + // The expansion succeeded and so we should have at least one + // region on the free list. + res = _free_list.remove_head(); + } } if (res != NULL) { if (G1PrintHeapRegions) { @@ -631,9 +634,22 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { if (first == -1) { // The only thing we can do now is attempt expansion. if (fs + x_size >= num_regions) { - expand((num_regions - fs) * HeapRegion::GrainBytes); - first = humongous_obj_allocate_find_first(num_regions, word_size); - assert(first != -1, "this should have worked"); + // If the number of regions we're trying to allocate for this + // object is at most the number of regions in the free suffix, + // then the call to humongous_obj_allocate_find_first() above + // should have succeeded and we wouldn't be here. + // + // We should only be trying to expand when the free suffix is + // not sufficient for the object _and_ we have some expansion + // room available. + assert(num_regions > fs, "earlier allocation should have succeeded"); + + if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { + first = humongous_obj_allocate_find_first(num_regions, word_size); + // If the expansion was successful then the allocation + // should have been successful. + assert(first != -1, "this should have worked"); + } } } @@ -1647,16 +1663,17 @@ resize_if_necessary_after_full_collection(size_t word_size) { if (capacity_after_gc < minimum_desired_capacity) { // Don't expand unless it's significant size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; - expand(expand_bytes); - if (PrintGC && Verbose) { - gclog_or_tty->print_cr(" " - " expanding:" - " max_heap_size: %6.1fK" - " minimum_desired_capacity: %6.1fK" - " expand_bytes: %6.1fK", - (double) max_heap_size / (double) K, - (double) minimum_desired_capacity / (double) K, - (double) expand_bytes / (double) K); + if (expand(expand_bytes)) { + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" " + " expanding:" + " max_heap_size: %6.1fK" + " minimum_desired_capacity: %6.1fK" + " expand_bytes: %6.1fK", + (double) max_heap_size / (double) K, + (double) minimum_desired_capacity / (double) K, + (double) expand_bytes / (double) K); + } } // No expansion, now see if we want to shrink @@ -1757,66 +1774,84 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { verify_region_sets_optional(); - size_t expand_bytes = word_size * HeapWordSize; - if (expand_bytes < MinHeapDeltaBytes) { - expand_bytes = MinHeapDeltaBytes; + size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); + if (expand(expand_bytes)) { + verify_region_sets_optional(); + return attempt_allocation_at_safepoint(word_size, + false /* expect_null_cur_alloc_region */); } - expand(expand_bytes); - - verify_region_sets_optional(); - - return attempt_allocation_at_safepoint(word_size, - false /* expect_null_cur_alloc_region */); + return NULL; } -// FIXME: both this and shrink could probably be more efficient by -// doing one "VirtualSpace::expand_by" call rather than several. -void G1CollectedHeap::expand(size_t expand_bytes) { +bool G1CollectedHeap::expand(size_t expand_bytes) { size_t old_mem_size = _g1_storage.committed_size(); - // We expand by a minimum of 1K. - expand_bytes = MAX2(expand_bytes, (size_t)K); - size_t aligned_expand_bytes = - ReservedSpace::page_align_size_up(expand_bytes); + size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, HeapRegion::GrainBytes); - expand_bytes = aligned_expand_bytes; - while (expand_bytes > 0) { - HeapWord* base = (HeapWord*)_g1_storage.high(); - // Commit more storage. - bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); - if (!successful) { - expand_bytes = 0; - } else { - expand_bytes -= HeapRegion::GrainBytes; - // Expand the committed region. - HeapWord* high = (HeapWord*) _g1_storage.high(); - _g1_committed.set_end(high); + + if (Verbose && PrintGC) { + gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", + old_mem_size/K, aligned_expand_bytes/K); + } + + HeapWord* old_end = (HeapWord*)_g1_storage.high(); + bool successful = _g1_storage.expand_by(aligned_expand_bytes); + if (successful) { + HeapWord* new_end = (HeapWord*)_g1_storage.high(); + + // Expand the committed region. + _g1_committed.set_end(new_end); + + // Tell the cardtable about the expansion. + Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); + + // And the offset table as well. + _bot_shared->resize(_g1_committed.word_size()); + + expand_bytes = aligned_expand_bytes; + HeapWord* base = old_end; + + // Create the heap regions for [old_end, new_end) + while (expand_bytes > 0) { + HeapWord* high = base + HeapRegion::GrainWords; + // Create a new HeapRegion. MemRegion mr(base, high); bool is_zeroed = !_g1_max_committed.contains(base); HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); - // Now update max_committed if necessary. - _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); - // Add it to the HeapRegionSeq. _hrs->insert(hr); _free_list.add_as_tail(hr); + // And we used up an expansion region to create it. _expansion_regions--; - // Tell the cardtable about it. - Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); - // And the offset table as well. - _bot_shared->resize(_g1_committed.word_size()); + + expand_bytes -= HeapRegion::GrainBytes; + base += HeapRegion::GrainWords; + } + assert(base == new_end, "sanity"); + + // Now update max_committed if necessary. + _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end)); + + } else { + // The expansion of the virtual storage space was unsuccessful. + // Let's see if it was because we ran out of swap. + if (G1ExitOnExpansionFailure && + _g1_storage.uncommitted_size() >= aligned_expand_bytes) { + // We had head room... + vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); } } if (Verbose && PrintGC) { size_t new_mem_size = _g1_storage.committed_size(); - gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", - old_mem_size/K, aligned_expand_bytes/K, + gclog_or_tty->print_cr("...%s, expanded to %ldK", + (successful ? "Successful" : "Failed"), new_mem_size/K); } + return successful; } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) @@ -2088,7 +2123,10 @@ jint G1CollectedHeap::initialize() { HeapRegionRemSet::init_heap(max_regions()); // Now expand into the initial heap size. - expand(init_byte_size); + if (!expand(init_byte_size)) { + vm_exit_during_initialization("Failed to allocate initial heap."); + return JNI_ENOMEM; + } // Perform any initialization actions delegated to the policy. g1_policy()->init(); @@ -2744,7 +2782,7 @@ size_t G1CollectedHeap::large_typearray_limit() { } size_t G1CollectedHeap::max_capacity() const { - return g1_reserved_obj_bytes(); + return _g1_reserved.byte_size(); } jlong G1CollectedHeap::millis_since_last_gc() { @@ -3538,7 +3576,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { size_t expand_bytes = g1_policy()->expansion_amount(); if (expand_bytes > 0) { size_t bytes_before = capacity(); - expand(expand_bytes); + if (!expand(expand_bytes)) { + // We failed to expand the heap so let's verify that + // committed/uncommitted amount match the backing store + assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); + assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); + } } } @@ -3762,7 +3805,7 @@ void G1CollectedHeap::get_gc_alloc_regions() { if (alloc_region == NULL) { // we will get a new GC alloc region - alloc_region = new_gc_alloc_region(ap, 0); + alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); } else { // the region was retained from the last collection ++_gc_alloc_region_counts[ap]; @@ -5311,7 +5354,7 @@ size_t G1CollectedHeap::n_regions() { size_t G1CollectedHeap::max_regions() { return - (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / + (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) / HeapRegion::GrainBytes; } diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 4bad5fd43..e31858569 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -619,8 +619,10 @@ protected: public: // Expand the garbage-first heap by at least the given size (in bytes!). + // Returns true if the heap was expanded by the requested amount; + // false otherwise. // (Rounds up to a HeapRegion boundary.) - virtual void expand(size_t expand_bytes); + bool expand(size_t expand_bytes); // Do anything common to GC's. virtual void gc_prologue(bool full); @@ -981,9 +983,6 @@ public: // Reference Processing accessor ReferenceProcessor* ref_processor() { return _ref_processor; } - // Reserved (g1 only; super method includes perm), capacity and the used - // portion in bytes. - size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); } virtual size_t capacity() const; virtual size_t used() const; // This should be called when we're not holding the heap lock. The diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index 63a5d63c7..ff3c55acc 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -2011,7 +2011,7 @@ size_t G1CollectorPolicy::expansion_amount() { // space, whichever is smaller, bounded below by a minimum // expansion (unless that's all that's left.) const size_t min_expand_bytes = 1*M; - size_t reserved_bytes = _g1->g1_reserved_obj_bytes(); + size_t reserved_bytes = _g1->max_capacity(); size_t committed_bytes = _g1->capacity(); size_t uncommitted_bytes = reserved_bytes - committed_bytes; size_t expand_bytes; diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp index 622028c40..48d3dbd10 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -735,7 +735,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO - init_ct_freq_table(_g1->g1_reserved_obj_bytes()); + init_ct_freq_table(_g1->max_capacity()); ct_freq_note_card(_ct_bs->index_for(start)); #endif diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp index 0e5d23189..19a8338c6 100644 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -301,9 +301,13 @@ develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \ "Artificial delay during concurrent region freeing") \ \ - develop(bool, ReduceInitialCardMarksForG1, false, \ + develop(bool, ReduceInitialCardMarksForG1, false, \ "When ReduceInitialCardMarks is true, this flag setting " \ - " controls whether G1 allows the RICM optimization") + " controls whether G1 allows the RICM optimization") \ + \ + develop(bool, G1ExitOnExpansionFailure, false, \ + "Raise a fatal VM exit out of memory failure in the event " \ + " that heap expansion fails due to running out of swap.") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) -- GitLab