diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 6eb8f73deae6dfa98da0c603d3ba5f2158ad406b..42ec32d8b2e886ba48d6ea507a3f76bcfb4c45ec 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1945,6 +1945,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : clear_cset_start_regions(); + // Initialize the G1EvacuationFailureALot counters and flags. + NOT_PRODUCT(reset_evacuation_should_fail();) + guarantee(_task_queues != NULL, "task_queues allocation failure."); #ifdef SPARC // Issue a stern warning, but allow use for experimentation and debugging. @@ -4564,7 +4567,15 @@ oop G1ParCopyClosure GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, word_sz); HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz); - oop obj = oop(obj_ptr); +#ifndef PRODUCT + // Should this evacuation fail? + if (_g1->evacuation_should_fail()) { + if (obj_ptr != NULL) { + _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz); + obj_ptr = NULL; + } + } +#endif // !PRODUCT if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has @@ -4573,6 +4584,8 @@ oop G1ParCopyClosure return _g1->handle_evacuation_failure_par(cl, old); } + oop obj = oop(obj_ptr); + // We're going to allocate linearly, so might as well prefetch ahead. Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); @@ -5578,6 +5591,9 @@ void G1CollectedHeap::evacuate_collection_set() { _expand_heap_after_alloc_failure = true; set_evacuation_failed(false); + // Should G1EvacuationFailureALot be in effect for this GC? + NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();) + g1_rem_set()->prepare_for_oops_into_collection_set_do(); concurrent_g1_refine()->set_use_cache(false); concurrent_g1_refine()->clear_hot_cache_claimed_index(); @@ -5669,6 +5685,11 @@ void G1CollectedHeap::evacuate_collection_set() { if (evacuation_failed()) { remove_self_forwarding_pointers(); + + // Reset the G1EvacuationFailureALot counters and flags + // Note: the values are reset only when an actual + // evacuation failure occurs. + NOT_PRODUCT(reset_evacuation_should_fail();) } // Enqueue any remaining references remaining on the STW diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index e92c8eaf135b714614736488f30d7f7d226bbcff..facc576f77b5d087d9d9762437279fd3b4433c26 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -915,6 +915,39 @@ protected: oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); void handle_evacuation_failure_common(oop obj, markOop m); +#ifndef PRODUCT + // Support for forcing evacuation failures. Analogous to + // PromotionFailureALot for the other collectors. + + // Records whether G1EvacuationFailureALot should be in effect + // for the current GC + bool _evacuation_failure_alot_for_current_gc; + + // Used to record the GC number for interval checking when + // determining whether G1EvaucationFailureALot is in effect + // for the current GC. + size_t _evacuation_failure_alot_gc_number; + + // Count of the number of evacuations between failures. + volatile size_t _evacuation_failure_alot_count; + + // Set whether G1EvacuationFailureALot should be in effect + // for the current GC (based upon the type of GC and which + // command line flags are set); + inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young, + bool during_initial_mark, + bool during_marking); + + inline void set_evacuation_failure_alot_for_current_gc(); + + // Return true if it's time to cause an evacuation failure. + inline bool evacuation_should_fail(); + + // Reset the G1EvacuationFailureALot counters. Should be called at + // the end of an evacuation pause in which an evacuation failure ocurred. + inline void reset_evacuation_should_fail(); +#endif // !PRODUCT + // ("Weak") Reference processing support. // // G1 has 2 instances of the referece processor class. One diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp index ee18c4de450bbc9139ae0320f9c49f5aefcc56a0..4f9c7726292ec042f48e367f50b958c2952dd99c 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,7 +138,7 @@ inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { return _task_queues->queue(i); } -inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { +inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); } @@ -146,4 +146,77 @@ inline bool G1CollectedHeap::isMarkedNext(oop obj) const { return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); } +#ifndef PRODUCT +// Support for G1EvacuationFailureALot + +inline bool +G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, + bool during_initial_mark, + bool during_marking) { + bool res = false; + if (during_marking) { + res |= G1EvacuationFailureALotDuringConcMark; + } + if (during_initial_mark) { + res |= G1EvacuationFailureALotDuringInitialMark; + } + if (gcs_are_young) { + res |= G1EvacuationFailureALotDuringYoungGC; + } else { + // GCs are mixed + res |= G1EvacuationFailureALotDuringMixedGC; + } + return res; +} + +inline void +G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { + if (G1EvacuationFailureALot) { + // Note we can't assert that _evacuation_failure_alot_for_current_gc + // is clear here. It may have been set during a previous GC but that GC + // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to + // trigger an evacuation failure and clear the flags and and counts. + + // Check if we have gone over the interval. + const size_t gc_num = total_collections(); + const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; + + _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); + + // Now check if G1EvacuationFailureALot is enabled for the current GC type. + const bool gcs_are_young = g1_policy()->gcs_are_young(); + const bool during_im = g1_policy()->during_initial_mark_pause(); + const bool during_marking = mark_in_progress(); + + _evacuation_failure_alot_for_current_gc &= + evacuation_failure_alot_for_gc_type(gcs_are_young, + during_im, + during_marking); + } +} + +inline bool +G1CollectedHeap::evacuation_should_fail() { + if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { + return false; + } + // G1EvacuationFailureALot is in effect for current GC + // Access to _evacuation_failure_alot_count is not atomic; + // the value does not have to be exact. + if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { + return false; + } + _evacuation_failure_alot_count = 0; + return true; +} + +inline void G1CollectedHeap::reset_evacuation_should_fail() { + if (G1EvacuationFailureALot) { + _evacuation_failure_alot_gc_number = total_collections(); + _evacuation_failure_alot_count = 0; + _evacuation_failure_alot_for_current_gc = false; + } +} +#endif // #ifndef PRODUCT + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp index 14aadb8ab4cf3d9a3d574ebe381d2cf4375490c4..a8405e0c50e8d9e884068dbf3148f95bb097c0be 100644 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -311,7 +311,35 @@ "as a percentage of the heap size.") \ \ experimental(ccstr, G1LogLevel, NULL, \ - "Log level for G1 logging: fine, finer, finest") + "Log level for G1 logging: fine, finer, finest") \ + \ + notproduct(bool, G1EvacuationFailureALot, false, \ + "Force use of evacuation failure handling during certain " \ + "evacuation pauses") \ + \ + develop(uintx, G1EvacuationFailureALotCount, 1000, \ + "Number of successful evacuations between evacuation failures " \ + "occurring at object copying") \ + \ + develop(uintx, G1EvacuationFailureALotInterval, 5, \ + "Total collections between forced triggering of evacuation " \ + "failures") \ + \ + develop(bool, G1EvacuationFailureALotDuringConcMark, true, \ + "Force use of evacuation failure handling during evacuation " \ + "pauses when marking is in progress") \ + \ + develop(bool, G1EvacuationFailureALotDuringInitialMark, true, \ + "Force use of evacuation failure handling during initial mark " \ + "evacuation pauses") \ + \ + develop(bool, G1EvacuationFailureALotDuringYoungGC, true, \ + "Force use of evacuation failure handling during young " \ + "evacuation pauses") \ + \ + develop(bool, G1EvacuationFailureALotDuringMixedGC, true, \ + "Force use of evacuation failure handling during mixed " \ + "evacuation pauses") G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)