diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp index b8f7ae47383aa30c0223d9cc6779e9dd85a15d8e..c59d4c4ce5607f910af19c1006c763ecd0b32f65 100644 --- a/src/share/vm/compiler/compileBroker.cpp +++ b/src/share/vm/compiler/compileBroker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1210,7 +1210,17 @@ uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) { // Should the current thread be blocked until this compilation request // has been fulfilled? bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) { - return !BackgroundCompilation; + if (!BackgroundCompilation) { + Symbol* class_name = method->method_holder()->klass_part()->name(); + if (class_name->starts_with("java/lang/ref/Reference", 23)) { + // The reference handler thread can dead lock with the GC if compilation is blocking, + // so we avoid blocking compiles for anything in the java.lang.ref.Reference class, + // including inner classes such as ReferenceHandler. + return false; + } + return true; + } + return false; } diff --git a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp index d26530323d24cf641f2b5b2914727306738d64c5..c0d968aa8f45c2d2d4f59ab89cb16e1f760575fe 100644 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,7 +99,7 @@ void ConcurrentG1Refine::init() { if (G1ConcRSLogCacheSize > 0) { _g1h = G1CollectedHeap::heap(); _max_n_card_counts = - (unsigned) (_g1h->g1_reserved_obj_bytes() >> CardTableModRefBS::card_shift); + (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift); size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1; guarantee(_max_n_card_counts < max_card_num, "card_num representation"); diff --git a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp index 91fdd4a1996dfb07df251fac355e7e2d63590921..5ff79ac6122a317c5dda193f77ba6fd2221c1767 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp @@ -251,6 +251,14 @@ void ConcurrentMarkThread::run() { // Now do the remainder of the cleanup operation. _cm->completeCleanup(); + // Notify anyone who's waiting that there are no more free + // regions coming. We have to do this before we join the STS, + // otherwise we might deadlock: a GC worker could be blocked + // waiting for the notification whereas this thread will be + // blocked for the pause to finish while it's trying to join + // the STS, which is conditional on the GC workers finishing. + g1h->reset_free_regions_coming(); + _sts.join(); g1_policy->record_concurrent_mark_cleanup_completed(); _sts.leave(); @@ -262,9 +270,6 @@ void ConcurrentMarkThread::run() { gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf]", cleanup_end_sec - cleanup_start_sec); } - - // We're done: no more free regions coming. - g1h->reset_free_regions_coming(); } guarantee(cm()->cleanup_list_is_empty(), "at this point there should be no regions on the cleanup list"); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 3d3522461a556bf4a7d0570505f03b194d63e004..a4f01adf0aa8b2a253c964e4379b7a9f1089b4cb 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -546,8 +546,11 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size, res = new_region_try_secondary_free_list(word_size); } if (res == NULL && do_expand) { - expand(word_size * HeapWordSize); - res = _free_list.remove_head_or_null(); + if (expand(word_size * HeapWordSize)) { + // The expansion succeeded and so we should have at least one + // region on the free list. + res = _free_list.remove_head(); + } } if (res != NULL) { if (G1PrintHeapRegions) { @@ -631,9 +634,22 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { if (first == -1) { // The only thing we can do now is attempt expansion. if (fs + x_size >= num_regions) { - expand((num_regions - fs) * HeapRegion::GrainBytes); - first = humongous_obj_allocate_find_first(num_regions, word_size); - assert(first != -1, "this should have worked"); + // If the number of regions we're trying to allocate for this + // object is at most the number of regions in the free suffix, + // then the call to humongous_obj_allocate_find_first() above + // should have succeeded and we wouldn't be here. + // + // We should only be trying to expand when the free suffix is + // not sufficient for the object _and_ we have some expansion + // room available. + assert(num_regions > fs, "earlier allocation should have succeeded"); + + if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { + first = humongous_obj_allocate_find_first(num_regions, word_size); + // If the expansion was successful then the allocation + // should have been successful. + assert(first != -1, "this should have worked"); + } } } @@ -1647,16 +1663,17 @@ resize_if_necessary_after_full_collection(size_t word_size) { if (capacity_after_gc < minimum_desired_capacity) { // Don't expand unless it's significant size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; - expand(expand_bytes); - if (PrintGC && Verbose) { - gclog_or_tty->print_cr(" " - " expanding:" - " max_heap_size: %6.1fK" - " minimum_desired_capacity: %6.1fK" - " expand_bytes: %6.1fK", - (double) max_heap_size / (double) K, - (double) minimum_desired_capacity / (double) K, - (double) expand_bytes / (double) K); + if (expand(expand_bytes)) { + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" " + " expanding:" + " max_heap_size: %6.1fK" + " minimum_desired_capacity: %6.1fK" + " expand_bytes: %6.1fK", + (double) max_heap_size / (double) K, + (double) minimum_desired_capacity / (double) K, + (double) expand_bytes / (double) K); + } } // No expansion, now see if we want to shrink @@ -1757,66 +1774,84 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) { verify_region_sets_optional(); - size_t expand_bytes = word_size * HeapWordSize; - if (expand_bytes < MinHeapDeltaBytes) { - expand_bytes = MinHeapDeltaBytes; + size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); + if (expand(expand_bytes)) { + verify_region_sets_optional(); + return attempt_allocation_at_safepoint(word_size, + false /* expect_null_cur_alloc_region */); } - expand(expand_bytes); - - verify_region_sets_optional(); - - return attempt_allocation_at_safepoint(word_size, - false /* expect_null_cur_alloc_region */); + return NULL; } -// FIXME: both this and shrink could probably be more efficient by -// doing one "VirtualSpace::expand_by" call rather than several. -void G1CollectedHeap::expand(size_t expand_bytes) { +bool G1CollectedHeap::expand(size_t expand_bytes) { size_t old_mem_size = _g1_storage.committed_size(); - // We expand by a minimum of 1K. - expand_bytes = MAX2(expand_bytes, (size_t)K); - size_t aligned_expand_bytes = - ReservedSpace::page_align_size_up(expand_bytes); + size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, HeapRegion::GrainBytes); - expand_bytes = aligned_expand_bytes; - while (expand_bytes > 0) { - HeapWord* base = (HeapWord*)_g1_storage.high(); - // Commit more storage. - bool successful = _g1_storage.expand_by(HeapRegion::GrainBytes); - if (!successful) { - expand_bytes = 0; - } else { - expand_bytes -= HeapRegion::GrainBytes; - // Expand the committed region. - HeapWord* high = (HeapWord*) _g1_storage.high(); - _g1_committed.set_end(high); + + if (Verbose && PrintGC) { + gclog_or_tty->print("Expanding garbage-first heap from %ldK by %ldK", + old_mem_size/K, aligned_expand_bytes/K); + } + + HeapWord* old_end = (HeapWord*)_g1_storage.high(); + bool successful = _g1_storage.expand_by(aligned_expand_bytes); + if (successful) { + HeapWord* new_end = (HeapWord*)_g1_storage.high(); + + // Expand the committed region. + _g1_committed.set_end(new_end); + + // Tell the cardtable about the expansion. + Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); + + // And the offset table as well. + _bot_shared->resize(_g1_committed.word_size()); + + expand_bytes = aligned_expand_bytes; + HeapWord* base = old_end; + + // Create the heap regions for [old_end, new_end) + while (expand_bytes > 0) { + HeapWord* high = base + HeapRegion::GrainWords; + // Create a new HeapRegion. MemRegion mr(base, high); bool is_zeroed = !_g1_max_committed.contains(base); HeapRegion* hr = new HeapRegion(_bot_shared, mr, is_zeroed); - // Now update max_committed if necessary. - _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), high)); - // Add it to the HeapRegionSeq. _hrs->insert(hr); _free_list.add_as_tail(hr); + // And we used up an expansion region to create it. _expansion_regions--; - // Tell the cardtable about it. - Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); - // And the offset table as well. - _bot_shared->resize(_g1_committed.word_size()); + + expand_bytes -= HeapRegion::GrainBytes; + base += HeapRegion::GrainWords; + } + assert(base == new_end, "sanity"); + + // Now update max_committed if necessary. + _g1_max_committed.set_end(MAX2(_g1_max_committed.end(), new_end)); + + } else { + // The expansion of the virtual storage space was unsuccessful. + // Let's see if it was because we ran out of swap. + if (G1ExitOnExpansionFailure && + _g1_storage.uncommitted_size() >= aligned_expand_bytes) { + // We had head room... + vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion"); } } if (Verbose && PrintGC) { size_t new_mem_size = _g1_storage.committed_size(); - gclog_or_tty->print_cr("Expanding garbage-first heap from %ldK by %ldK to %ldK", - old_mem_size/K, aligned_expand_bytes/K, + gclog_or_tty->print_cr("...%s, expanded to %ldK", + (successful ? "Successful" : "Failed"), new_mem_size/K); } + return successful; } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) @@ -2088,7 +2123,10 @@ jint G1CollectedHeap::initialize() { HeapRegionRemSet::init_heap(max_regions()); // Now expand into the initial heap size. - expand(init_byte_size); + if (!expand(init_byte_size)) { + vm_exit_during_initialization("Failed to allocate initial heap."); + return JNI_ENOMEM; + } // Perform any initialization actions delegated to the policy. g1_policy()->init(); @@ -2744,7 +2782,7 @@ size_t G1CollectedHeap::large_typearray_limit() { } size_t G1CollectedHeap::max_capacity() const { - return g1_reserved_obj_bytes(); + return _g1_reserved.byte_size(); } jlong G1CollectedHeap::millis_since_last_gc() { @@ -3538,7 +3576,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { size_t expand_bytes = g1_policy()->expansion_amount(); if (expand_bytes > 0) { size_t bytes_before = capacity(); - expand(expand_bytes); + if (!expand(expand_bytes)) { + // We failed to expand the heap so let's verify that + // committed/uncommitted amount match the backing store + assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); + assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); + } } } @@ -3762,7 +3805,7 @@ void G1CollectedHeap::get_gc_alloc_regions() { if (alloc_region == NULL) { // we will get a new GC alloc region - alloc_region = new_gc_alloc_region(ap, 0); + alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords); } else { // the region was retained from the last collection ++_gc_alloc_region_counts[ap]; @@ -5311,7 +5354,7 @@ size_t G1CollectedHeap::n_regions() { size_t G1CollectedHeap::max_regions() { return - (size_t)align_size_up(g1_reserved_obj_bytes(), HeapRegion::GrainBytes) / + (size_t)align_size_up(max_capacity(), HeapRegion::GrainBytes) / HeapRegion::GrainBytes; } diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 4bad5fd43209776a36ffe8edcc15eb0af4dc814b..e3185856948b3dc83ca8183e674c4a96280c586b 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -619,8 +619,10 @@ protected: public: // Expand the garbage-first heap by at least the given size (in bytes!). + // Returns true if the heap was expanded by the requested amount; + // false otherwise. // (Rounds up to a HeapRegion boundary.) - virtual void expand(size_t expand_bytes); + bool expand(size_t expand_bytes); // Do anything common to GC's. virtual void gc_prologue(bool full); @@ -981,9 +983,6 @@ public: // Reference Processing accessor ReferenceProcessor* ref_processor() { return _ref_processor; } - // Reserved (g1 only; super method includes perm), capacity and the used - // portion in bytes. - size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); } virtual size_t capacity() const; virtual size_t used() const; // This should be called when we're not holding the heap lock. The diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index 63a5d63c780d64c7c60e9504fe398a8127cd1ddb..ff3c55acc4bf0afeed9102d4668629888531707c 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -2011,7 +2011,7 @@ size_t G1CollectorPolicy::expansion_amount() { // space, whichever is smaller, bounded below by a minimum // expansion (unless that's all that's left.) const size_t min_expand_bytes = 1*M; - size_t reserved_bytes = _g1->g1_reserved_obj_bytes(); + size_t reserved_bytes = _g1->max_capacity(); size_t committed_bytes = _g1->capacity(); size_t uncommitted_bytes = reserved_bytes - committed_bytes; size_t expand_bytes; diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp index 622028c402d270aa71bf6eefbd7a46c7542bf775..48d3dbd100cec4ad1ae9524c504246f61b81ed89 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -735,7 +735,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO - init_ct_freq_table(_g1->g1_reserved_obj_bytes()); + init_ct_freq_table(_g1->max_capacity()); ct_freq_note_card(_ct_bs->index_for(start)); #endif diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp index 0e5d23189533f94b08ce7afa89fd7bcad09ae047..19a8338c6e60ef9777c0457c3c17a5550d80a4ef 100644 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -301,9 +301,13 @@ develop(uintx, G1StressConcRegionFreeingDelayMillis, 0, \ "Artificial delay during concurrent region freeing") \ \ - develop(bool, ReduceInitialCardMarksForG1, false, \ + develop(bool, ReduceInitialCardMarksForG1, false, \ "When ReduceInitialCardMarks is true, this flag setting " \ - " controls whether G1 allows the RICM optimization") + " controls whether G1 allows the RICM optimization") \ + \ + develop(bool, G1ExitOnExpansionFailure, false, \ + "Raise a fatal VM exit out of memory failure in the event " \ + " that heap expansion fails due to running out of swap.") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) diff --git a/src/share/vm/gc_implementation/shared/allocationStats.hpp b/src/share/vm/gc_implementation/shared/allocationStats.hpp index b2d410ad6173a9a0aa867a92395271e174b10620..4e089c14cde2cee772edea1ef79c5541eba13e71 100644 --- a/src/share/vm/gc_implementation/shared/allocationStats.hpp +++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp @@ -116,10 +116,8 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC { _demand_rate_estimate.sample(rate); float new_rate = _demand_rate_estimate.padded_average(); ssize_t old_desired = _desired; - _desired = (ssize_t)(new_rate * (inter_sweep_estimate - + CMSExtrapolateSweep - ? intra_sweep_estimate - : 0.0)); + float delta_ise = (CMSExtrapolateSweep ? intra_sweep_estimate : 0.0); + _desired = (ssize_t)(new_rate * (inter_sweep_estimate + delta_ise)); if (PrintFLSStatistics > 1) { gclog_or_tty->print_cr("demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d", demand, old_rate, rate, new_rate, old_desired, _desired); diff --git a/src/share/vm/gc_implementation/shared/gcUtil.cpp b/src/share/vm/gc_implementation/shared/gcUtil.cpp index 267f73d3b99392d303cb99b988143f651252cbfe..caa6efed1c8f61cd76ea8a7e901a470192ee406d 100644 --- a/src/share/vm/gc_implementation/shared/gcUtil.cpp +++ b/src/share/vm/gc_implementation/shared/gcUtil.cpp @@ -106,8 +106,8 @@ void AdaptivePaddedNoZeroDevAverage::sample(float new_sample) { } LinearLeastSquareFit::LinearLeastSquareFit(unsigned weight) : - _sum_x(0), _sum_y(0), _sum_xy(0), - _mean_x(weight), _mean_y(weight) {} + _sum_x(0), _sum_x_squared(0), _sum_y(0), _sum_xy(0), + _intercept(0), _slope(0), _mean_x(weight), _mean_y(weight) {} void LinearLeastSquareFit::update(double x, double y) { _sum_x = _sum_x + x;