diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 3f72bc84dc5bc84be8461db54d13d617d7bcd183..38ddad4d987cda3d59487f404878fe380662423e 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -1828,7 +1828,7 @@ void ConcurrentMark::completeCleanup() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); _cleanup_list.verify_optional(); - FreeRegionList local_free_list("Local Cleanup List"); + FreeRegionList tmp_free_list("Tmp Free List"); if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " @@ -1842,7 +1842,7 @@ void ConcurrentMark::completeCleanup() { HeapRegion* hr = _cleanup_list.remove_head(); assert(hr != NULL, "the list was not empty"); hr->rem_set()->clear(); - local_free_list.add_as_tail(hr); + tmp_free_list.add_as_tail(hr); // Instead of adding one region at a time to the secondary_free_list, // we accumulate them in the local list and move them a few at a @@ -1850,20 +1850,20 @@ void ConcurrentMark::completeCleanup() { // we do during this process. We'll also append the local list when // _cleanup_list is empty (which means we just removed the last // region from the _cleanup_list). - if ((local_free_list.length() % G1SecondaryFreeListAppendLength == 0) || + if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || _cleanup_list.is_empty()) { if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " "appending "SIZE_FORMAT" entries to the " "secondary_free_list, clean list still has " SIZE_FORMAT" entries", - local_free_list.length(), + tmp_free_list.length(), _cleanup_list.length()); } { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); - g1h->secondary_free_list_add_as_tail(&local_free_list); + g1h->secondary_free_list_add_as_tail(&tmp_free_list); SecondaryFreeList_lock->notify_all(); } @@ -1874,7 +1874,7 @@ void ConcurrentMark::completeCleanup() { } } } - assert(local_free_list.is_empty(), "post-condition"); + assert(tmp_free_list.is_empty(), "post-condition"); } // Support closures for reference procssing in G1 @@ -3182,7 +3182,7 @@ public: template void do_oop_work(T* p) { assert( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant"); - assert(!_g1h->is_on_free_list( + assert(!_g1h->is_on_master_free_list( _g1h->heap_region_containing((HeapWord*) p)), "invariant"); oop obj = oopDesc::load_decode_heap_oop(p); @@ -3403,7 +3403,7 @@ void CMTask::deal_with_reference(oop obj) { void CMTask::push(oop obj) { HeapWord* objAddr = (HeapWord*) obj; assert(_g1h->is_in_g1_reserved(objAddr), "invariant"); - assert(!_g1h->is_on_free_list( + assert(!_g1h->is_on_master_free_list( _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant"); assert(!_g1h->is_obj_ill(obj), "invariant"); assert(_nextMarkBitMap->isMarked(objAddr), "invariant"); @@ -3649,7 +3649,7 @@ void CMTask::drain_local_queue(bool partially) { (void*) obj); assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" ); - assert(!_g1h->is_on_free_list( + assert(!_g1h->is_on_master_free_list( _g1h->heap_region_containing((HeapWord*) obj)), "invariant"); scan_object(obj); diff --git a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp index 5ff79ac6122a317c5dda193f77ba6fd2221c1767..9fbf20fb10aba11c33fc8e739384804a7883f993 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp @@ -237,9 +237,9 @@ void ConcurrentMarkThread::run() { // The following will finish freeing up any regions that we // found to be empty during cleanup. We'll do this part // without joining the suspendible set. If an evacuation pause - // takes places, then we would carry on freeing regions in + // takes place, then we would carry on freeing regions in // case they are needed by the pause. If a Full GC takes - // places, it would wait for us to process the regions + // place, it would wait for us to process the regions // reclaimed by cleanup. double cleanup_start_sec = os::elapsedTime(); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index e34d7270e71ab7cfdb11716c8ccc2453a7518cbc..63ec2e6e56bf2e22021da28f691742ef7b47852e 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -479,7 +479,7 @@ G1CollectedHeap* G1CollectedHeap::_g1h; // Private methods. HeapRegion* -G1CollectedHeap::new_region_try_secondary_free_list(size_t word_size) { +G1CollectedHeap::new_region_try_secondary_free_list() { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); while (!_secondary_free_list.is_empty() || free_regions_coming()) { if (!_secondary_free_list.is_empty()) { @@ -531,7 +531,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size, gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "forced to look at the secondary_free_list"); } - res = new_region_try_secondary_free_list(word_size); + res = new_region_try_secondary_free_list(); if (res != NULL) { return res; } @@ -543,7 +543,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size, gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "res == NULL, trying the secondary_free_list"); } - res = new_region_try_secondary_free_list(word_size); + res = new_region_try_secondary_free_list(); } if (res == NULL && do_expand) { if (expand(word_size * HeapWordSize)) { @@ -579,6 +579,9 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose, int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, size_t word_size) { + assert(isHumongous(word_size), "word_size should be humongous"); + assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); + int first = -1; if (num_regions == 1) { // Only one region to allocate, no need to go through the slower @@ -600,7 +603,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, // request. If we are only allocating one region we use the common // region allocation code (see above). wait_while_free_regions_coming(); - append_secondary_free_list_if_not_empty(); + append_secondary_free_list_if_not_empty_with_lock(); if (free_regions() >= num_regions) { first = _hrs->find_contiguous(num_regions); @@ -608,7 +611,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, for (int i = first; i < first + (int) num_regions; ++i) { HeapRegion* hr = _hrs->at(i); assert(hr->is_empty(), "sanity"); - assert(is_on_free_list(hr), "sanity"); + assert(is_on_master_free_list(hr), "sanity"); hr->set_pending_removal(true); } _free_list.remove_all_pending(num_regions); @@ -618,6 +621,126 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions, return first; } +HeapWord* +G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first, + size_t num_regions, + size_t word_size) { + assert(first != -1, "pre-condition"); + assert(isHumongous(word_size), "word_size should be humongous"); + assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); + + // Index of last region in the series + 1. + int last = first + (int) num_regions; + + // We need to initialize the region(s) we just discovered. This is + // a bit tricky given that it can happen concurrently with + // refinement threads refining cards on these regions and + // potentially wanting to refine the BOT as they are scanning + // those cards (this can happen shortly after a cleanup; see CR + // 6991377). So we have to set up the region(s) carefully and in + // a specific order. + + // The word size sum of all the regions we will allocate. + size_t word_size_sum = num_regions * HeapRegion::GrainWords; + assert(word_size <= word_size_sum, "sanity"); + + // This will be the "starts humongous" region. + HeapRegion* first_hr = _hrs->at(first); + // The header of the new object will be placed at the bottom of + // the first region. + HeapWord* new_obj = first_hr->bottom(); + // This will be the new end of the first region in the series that + // should also match the end of the last region in the seriers. + HeapWord* new_end = new_obj + word_size_sum; + // This will be the new top of the first region that will reflect + // this allocation. + HeapWord* new_top = new_obj + word_size; + + // First, we need to zero the header of the space that we will be + // allocating. When we update top further down, some refinement + // threads might try to scan the region. By zeroing the header we + // ensure that any thread that will try to scan the region will + // come across the zero klass word and bail out. + // + // NOTE: It would not have been correct to have used + // CollectedHeap::fill_with_object() and make the space look like + // an int array. The thread that is doing the allocation will + // later update the object header to a potentially different array + // type and, for a very short period of time, the klass and length + // fields will be inconsistent. This could cause a refinement + // thread to calculate the object size incorrectly. + Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); + + // We will set up the first region as "starts humongous". This + // will also update the BOT covering all the regions to reflect + // that there is a single object that starts at the bottom of the + // first region. + first_hr->set_startsHumongous(new_top, new_end); + + // Then, if there are any, we will set up the "continues + // humongous" regions. + HeapRegion* hr = NULL; + for (int i = first + 1; i < last; ++i) { + hr = _hrs->at(i); + hr->set_continuesHumongous(first_hr); + } + // If we have "continues humongous" regions (hr != NULL), then the + // end of the last one should match new_end. + assert(hr == NULL || hr->end() == new_end, "sanity"); + + // Up to this point no concurrent thread would have been able to + // do any scanning on any region in this series. All the top + // fields still point to bottom, so the intersection between + // [bottom,top] and [card_start,card_end] will be empty. Before we + // update the top fields, we'll do a storestore to make sure that + // no thread sees the update to top before the zeroing of the + // object header and the BOT initialization. + OrderAccess::storestore(); + + // Now that the BOT and the object header have been initialized, + // we can update top of the "starts humongous" region. + assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), + "new_top should be in this region"); + first_hr->set_top(new_top); + + // Now, we will update the top fields of the "continues humongous" + // regions. The reason we need to do this is that, otherwise, + // these regions would look empty and this will confuse parts of + // G1. For example, the code that looks for a consecutive number + // of empty regions will consider them empty and try to + // re-allocate them. We can extend is_empty() to also include + // !continuesHumongous(), but it is easier to just update the top + // fields here. The way we set top for all regions (i.e., top == + // end for all regions but the last one, top == new_top for the + // last one) is actually used when we will free up the humongous + // region in free_humongous_region(). + hr = NULL; + for (int i = first + 1; i < last; ++i) { + hr = _hrs->at(i); + if ((i + 1) == last) { + // last continues humongous region + assert(hr->bottom() < new_top && new_top <= hr->end(), + "new_top should fall on this region"); + hr->set_top(new_top); + } else { + // not last one + assert(new_top > hr->end(), "new_top should be above this region"); + hr->set_top(hr->end()); + } + } + // If we have continues humongous regions (hr != NULL), then the + // end of the last one should match new_end and its top should + // match new_top. + assert(hr == NULL || + (hr->end() == new_end && hr->top() == new_top), "sanity"); + + assert(first_hr->used() == word_size * HeapWordSize, "invariant"); + _summary_bytes_used += first_hr->used(); + _humongous_set.add(first_hr); + + return new_obj; +} + // If could fit into free regions w/o expansion, try. // Otherwise, if can expand, do so. // Otherwise, if using ex regions might help, try with ex given back. @@ -653,121 +776,16 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) { } } + HeapWord* result = NULL; if (first != -1) { - // Index of last region in the series + 1. - int last = first + (int) num_regions; - - // We need to initialize the region(s) we just discovered. This is - // a bit tricky given that it can happen concurrently with - // refinement threads refining cards on these regions and - // potentially wanting to refine the BOT as they are scanning - // those cards (this can happen shortly after a cleanup; see CR - // 6991377). So we have to set up the region(s) carefully and in - // a specific order. - - // The word size sum of all the regions we will allocate. - size_t word_size_sum = num_regions * HeapRegion::GrainWords; - assert(word_size <= word_size_sum, "sanity"); - - // This will be the "starts humongous" region. - HeapRegion* first_hr = _hrs->at(first); - // The header of the new object will be placed at the bottom of - // the first region. - HeapWord* new_obj = first_hr->bottom(); - // This will be the new end of the first region in the series that - // should also match the end of the last region in the seriers. - HeapWord* new_end = new_obj + word_size_sum; - // This will be the new top of the first region that will reflect - // this allocation. - HeapWord* new_top = new_obj + word_size; - - // First, we need to zero the header of the space that we will be - // allocating. When we update top further down, some refinement - // threads might try to scan the region. By zeroing the header we - // ensure that any thread that will try to scan the region will - // come across the zero klass word and bail out. - // - // NOTE: It would not have been correct to have used - // CollectedHeap::fill_with_object() and make the space look like - // an int array. The thread that is doing the allocation will - // later update the object header to a potentially different array - // type and, for a very short period of time, the klass and length - // fields will be inconsistent. This could cause a refinement - // thread to calculate the object size incorrectly. - Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); - - // We will set up the first region as "starts humongous". This - // will also update the BOT covering all the regions to reflect - // that there is a single object that starts at the bottom of the - // first region. - first_hr->set_startsHumongous(new_top, new_end); - - // Then, if there are any, we will set up the "continues - // humongous" regions. - HeapRegion* hr = NULL; - for (int i = first + 1; i < last; ++i) { - hr = _hrs->at(i); - hr->set_continuesHumongous(first_hr); - } - // If we have "continues humongous" regions (hr != NULL), then the - // end of the last one should match new_end. - assert(hr == NULL || hr->end() == new_end, "sanity"); - - // Up to this point no concurrent thread would have been able to - // do any scanning on any region in this series. All the top - // fields still point to bottom, so the intersection between - // [bottom,top] and [card_start,card_end] will be empty. Before we - // update the top fields, we'll do a storestore to make sure that - // no thread sees the update to top before the zeroing of the - // object header and the BOT initialization. - OrderAccess::storestore(); - - // Now that the BOT and the object header have been initialized, - // we can update top of the "starts humongous" region. - assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), - "new_top should be in this region"); - first_hr->set_top(new_top); - - // Now, we will update the top fields of the "continues humongous" - // regions. The reason we need to do this is that, otherwise, - // these regions would look empty and this will confuse parts of - // G1. For example, the code that looks for a consecutive number - // of empty regions will consider them empty and try to - // re-allocate them. We can extend is_empty() to also include - // !continuesHumongous(), but it is easier to just update the top - // fields here. The way we set top for all regions (i.e., top == - // end for all regions but the last one, top == new_top for the - // last one) is actually used when we will free up the humongous - // region in free_humongous_region(). - hr = NULL; - for (int i = first + 1; i < last; ++i) { - hr = _hrs->at(i); - if ((i + 1) == last) { - // last continues humongous region - assert(hr->bottom() < new_top && new_top <= hr->end(), - "new_top should fall on this region"); - hr->set_top(new_top); - } else { - // not last one - assert(new_top > hr->end(), "new_top should be above this region"); - hr->set_top(hr->end()); - } - } - // If we have continues humongous regions (hr != NULL), then the - // end of the last one should match new_end and its top should - // match new_top. - assert(hr == NULL || - (hr->end() == new_end && hr->top() == new_top), "sanity"); - - assert(first_hr->used() == word_size * HeapWordSize, "invariant"); - _summary_bytes_used += first_hr->used(); - _humongous_set.add(first_hr); - - return new_obj; + result = + humongous_obj_allocate_initialize_regions(first, num_regions, word_size); + assert(result != NULL, "it should always return a valid result"); } verify_region_sets_optional(); - return NULL; + + return result; } void @@ -1389,7 +1407,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, g1_policy()->record_full_collection_start(); wait_while_free_regions_coming(); - append_secondary_free_list_if_not_empty(); + append_secondary_free_list_if_not_empty_with_lock(); gc_prologue(true); increment_total_collections(true /* full gc */); @@ -3377,15 +3395,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { TraceMemoryManagerStats tms(false /* fullGC */); - // If there are any free regions available on the secondary_free_list - // make sure we append them to the free_list. However, we don't - // have to wait for the rest of the cleanup operation to - // finish. If it's still going on that's OK. If we run out of - // regions, the region allocation code will check the - // secondary_free_list and potentially wait if more free regions - // are coming (see new_region_try_secondary_free_list()). + // If the secondary_free_list is not empty, append it to the + // free_list. No need to wait for the cleanup operation to finish; + // the region allocation code will check the secondary_free_list + // and wait if necessary. If the G1StressConcRegionFreeing flag is + // set, skip this step so that the region allocation code has to + // get entries from the secondary_free_list. if (!G1StressConcRegionFreeing) { - append_secondary_free_list_if_not_empty(); + append_secondary_free_list_if_not_empty_with_lock(); } increment_gc_time_stamp(); @@ -5199,7 +5216,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { size_t rs_lengths = 0; while (cur != NULL) { - assert(!is_on_free_list(cur), "sanity"); + assert(!is_on_master_free_list(cur), "sanity"); if (non_young) { if (cur->is_young()) { @@ -5543,13 +5560,10 @@ void G1CollectedHeap::verify_region_sets() { return; } - { - MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); - // Make sure we append the secondary_free_list on the free_list so - // that all free regions we will come across can be safely - // attributed to the free_list. - append_secondary_free_list(); - } + // Make sure we append the secondary_free_list on the free_list so + // that all free regions we will come across can be safely + // attributed to the free_list. + append_secondary_free_list_if_not_empty_with_lock(); // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap. diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 93b26c0eb1d77cb670bc21c560fa8dbb87f083e9..5863e7c9860f4871daf06667b6572bac34b37c90 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -56,7 +56,6 @@ class HeapRegionRemSetIterator; class ConcurrentMark; class ConcurrentMarkThread; class ConcurrentG1Refine; -class ConcurrentZFThread; typedef OverflowTaskQueue RefToScanQueue; typedef GenericTaskQueueSet RefToScanQueueSet; @@ -64,12 +63,6 @@ typedef GenericTaskQueueSet RefToScanQueueSet; typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) -enum G1GCThreadGroups { - G1CRGroup = 0, - G1ZFGroup = 1, - G1CMGroup = 2 -}; - enum GCAllocPurpose { GCAllocForTenured, GCAllocForSurvived, @@ -294,9 +287,9 @@ private: // These are macros so that, if the assert fires, we get the correct // line number, file, etc. -#define heap_locking_asserts_err_msg(__extra_message) \ +#define heap_locking_asserts_err_msg(_extra_message_) \ err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ - (__extra_message), \ + (_extra_message_), \ BOOL_TO_STR(Heap_lock->owned_by_self()), \ BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ BOOL_TO_STR(Thread::current()->is_VM_thread())) @@ -307,11 +300,11 @@ private: heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ } while (0) -#define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \ +#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \ do { \ assert(Heap_lock->owned_by_self() || \ (SafepointSynchronize::is_at_safepoint() && \ - ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \ + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \ heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ "should be at a safepoint")); \ } while (0) @@ -338,10 +331,10 @@ private: "should not be at a safepoint")); \ } while (0) -#define assert_at_safepoint(__should_be_vm_thread) \ +#define assert_at_safepoint(_should_be_vm_thread_) \ do { \ assert(SafepointSynchronize::is_at_safepoint() && \ - ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \ + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \ heap_locking_asserts_err_msg("should be at a safepoint")); \ } while (0) @@ -371,35 +364,40 @@ protected: // will check whether there's anything available in the // secondary_free_list and/or wait for more regions to appear in that // list, if _free_regions_coming is set. - HeapRegion* new_region_try_secondary_free_list(size_t word_size); - - // It will try to allocate a single non-humongous HeapRegion - // sufficient for an allocation of the given word_size. If - // do_expand is true, it will attempt to expand the heap if - // necessary to satisfy the allocation request. Note that word_size - // is only used to make sure that we expand sufficiently but, given - // that the allocation request is assumed not to be humongous, - // having word_size is not strictly necessary (expanding by a single - // region will always be sufficient). But let's keep that parameter - // in case we need it in the future. + HeapRegion* new_region_try_secondary_free_list(); + + // Try to allocate a single non-humongous HeapRegion sufficient for + // an allocation of the given word_size. If do_expand is true, + // attempt to expand the heap if necessary to satisfy the allocation + // request. HeapRegion* new_region_work(size_t word_size, bool do_expand); - // It will try to allocate a new region to be used for allocation by - // mutator threads. It will not try to expand the heap if not region - // is available. + // Try to allocate a new region to be used for allocation by a + // mutator thread. Attempt to expand the heap if no region is + // available. HeapRegion* new_alloc_region(size_t word_size) { return new_region_work(word_size, false /* do_expand */); } - // It will try to allocate a new region to be used for allocation by - // a GC thread. It will try to expand the heap if no region is - // available. + // Try to allocate a new region to be used for allocation by a GC + // thread. Attempt to expand the heap if no region is available. HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); + // Attempt to satisfy a humongous allocation request of the given + // size by finding a contiguous set of free regions of num_regions + // length and remove them from the master free list. Return the + // index of the first region or -1 if the search was unsuccessful. int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); - // Attempt to allocate an object of the given (very large) "word_size". - // Returns "NULL" on failure. + // Initialize a contiguous set of free regions of length num_regions + // and starting at index first so that they appear as a single + // humongous region. + HeapWord* humongous_obj_allocate_initialize_regions(int first, + size_t num_regions, + size_t word_size); + + // Attempt to allocate a humongous object of the given size. Return + // NULL if unsuccessful. HeapWord* humongous_obj_allocate(size_t word_size); // The following two methods, allocate_new_tlab() and @@ -776,7 +774,7 @@ protected: // Invoke "save_marks" on all heap regions. void save_marks(); - // It frees a non-humongous region by initializing its contents and + // Frees a non-humongous region by initializing its contents and // adding it to the free list that's passed as a parameter (this is // usually a local list which will be appended to the master free // list later). The used bytes of freed regions are accumulated in @@ -787,13 +785,13 @@ protected: FreeRegionList* free_list, bool par); - // It frees a humongous region by collapsing it into individual - // regions and calling free_region() for each of them. The freed - // regions will be added to the free list that's passed as a parameter - // (this is usually a local list which will be appended to the - // master free list later). The used bytes of freed regions are - // accumulated in pre_used. If par is true, the region's RSet will - // not be freed up. The assumption is that this will be done later. + // Frees a humongous region by collapsing it into individual regions + // and calling free_region() for each of them. The freed regions + // will be added to the free list that's passed as a parameter (this + // is usually a local list which will be appended to the master free + // list later). The used bytes of freed regions are accumulated in + // pre_used. If par is true, the region's RSet will not be freed + // up. The assumption is that this will be done later. void free_humongous_region(HeapRegion* hr, size_t* pre_used, FreeRegionList* free_list, @@ -1046,13 +1044,13 @@ public: #endif // HEAP_REGION_SET_FORCE_VERIFY #ifdef ASSERT - bool is_on_free_list(HeapRegion* hr) { + bool is_on_master_free_list(HeapRegion* hr) { return hr->containing_set() == &_free_list; } - bool is_on_humongous_set(HeapRegion* hr) { + bool is_in_humongous_set(HeapRegion* hr) { return hr->containing_set() == &_humongous_set; -} + } #endif // ASSERT // Wrapper for the region list operations that can be called from @@ -1066,7 +1064,9 @@ public: _free_list.add_as_tail(&_secondary_free_list); } - void append_secondary_free_list_if_not_empty() { + void append_secondary_free_list_if_not_empty_with_lock() { + // If the secondary free list looks empty there's no reason to + // take the lock and then try to append it. if (!_secondary_free_list.is_empty()) { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); append_secondary_free_list(); diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp index 68d5d84f2d4641e078f121936bdaa451d51f48e6..4c93507a3ad28201e1e860a16f3d28c6dac50799 100644 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @@ -185,22 +185,22 @@ class G1PrepareCompactClosure: public HeapRegionClosure { G1CollectedHeap* _g1h; ModRefBarrierSet* _mrbs; CompactPoint _cp; - size_t _pre_used; - FreeRegionList _free_list; HumongousRegionSet _humongous_proxy_set; void free_humongous_region(HeapRegion* hr) { HeapWord* end = hr->end(); + size_t dummy_pre_used; + FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep"); + assert(hr->startsHumongous(), "Only the start of a humongous region should be freed."); - _g1h->free_humongous_region(hr, &_pre_used, &_free_list, + _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list, &_humongous_proxy_set, false /* par */); - // Do we also need to do this for the continues humongous regions - // we just collapsed? hr->prepare_for_compaction(&_cp); // Also clear the part of the card table that will be unused after // compaction. _mrbs->clear(MemRegion(hr->compaction_top(), end)); + dummy_free_list.remove_all(); } public: @@ -208,8 +208,6 @@ public: : _g1h(G1CollectedHeap::heap()), _mrbs(G1CollectedHeap::heap()->mr_bs()), _cp(NULL, cs, cs->initialize_threshold()), - _pre_used(0), - _free_list("Local Free List for G1MarkSweep"), _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { } void update_sets() { @@ -219,7 +217,6 @@ public: NULL, /* free_list */ &_humongous_proxy_set, false /* par */); - _free_list.remove_all(); } bool doHeapRegion(HeapRegion* hr) { diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp index 48d3dbd100cec4ad1ae9524c504246f61b81ed89..8b9f3d3c458e424b26654cd6faa2263ad3f56c0c 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -86,28 +86,6 @@ public: bool idempotent() { return true; } }; -class IntoCSRegionClosure: public HeapRegionClosure { - IntoCSOopClosure _blk; - G1CollectedHeap* _g1; -public: - IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) : - _g1(g1), _blk(g1, blk) {} - bool doHeapRegion(HeapRegion* r) { - if (!r->in_collection_set()) { - _blk.set_region(r); - if (r->isHumongous()) { - if (r->startsHumongous()) { - oop obj = oop(r->bottom()); - obj->oop_iterate(&_blk); - } - } else { - r->oop_before_save_marks_iterate(&_blk); - } - } - return false; - } -}; - class VerifyRSCleanCardOopClosure: public OopClosure { G1CollectedHeap* _g1; public: diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp index e764ad4349ca2291c798b4cf1dcac8c22a3aeba2..2d7385ef9b70a20c6bf4452f174284c7a0fd69ff 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -53,8 +53,8 @@ class HeapRegion; class HeapRegionSetBase; #define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" -#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \ - (__hr)->top(), (__hr)->end() +#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \ + (_hr_)->top(), (_hr_)->end() // A dirty card to oop closure for heap regions. It // knows how to get the G1 heap and how to use the bitmap @@ -518,13 +518,13 @@ class HeapRegion: public G1OffsetTableContigSpace { containing_set, _containing_set)); _containing_set = containing_set; -} + } HeapRegionSetBase* containing_set() { return _containing_set; } #else // ASSERT void set_containing_set(HeapRegionSetBase* containing_set) { } - // containing_set() is only used in asserts so there's not reason + // containing_set() is only used in asserts so there's no reason // to provide a dummy version of it. #endif // ASSERT @@ -535,14 +535,15 @@ class HeapRegion: public G1OffsetTableContigSpace { bool pending_removal() { return _pending_removal; } void set_pending_removal(bool pending_removal) { - // We can only set pending_removal to true, if it's false and the - // region belongs to a set. - assert(!pending_removal || - (!_pending_removal && containing_set() != NULL), "pre-condition"); - // We can only set pending_removal to false, if it's true and the - // region does not belong to a set. - assert( pending_removal || - ( _pending_removal && containing_set() == NULL), "pre-condition"); + if (pending_removal) { + assert(!_pending_removal && containing_set() != NULL, + "can only set pending removal to true if it's false and " + "the region belongs to a region set"); + } else { + assert( _pending_removal && containing_set() == NULL, + "can only set pending removal to false if it's true and " + "the region does not belong to a region set"); + } _pending_removal = pending_removal; } diff --git a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp index 2e15190c526570d4d7e3794d6933c625cdedc6a0..764c9ebb0ade481a8967e57bbad2e64a0d0279fa 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp @@ -165,7 +165,7 @@ int HeapRegionSeq::find_contiguous_from(int from, size_t num) { assert(num_so_far <= num, "post-condition"); if (num_so_far == num) { - // we find enough space for the humongous object + // we found enough space for the humongous object assert(from <= first && first < _regions.length(), "post-condition"); assert(first < curr && (curr - first) == (int) num, "post-condition"); for (int i = first; i < first + (int) num; ++i) { diff --git a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp index 932b0cb95726ffeafe78b0bd53f4b61f69ba88a3..167cd65931ea111544ff2dca54508eb487d37abf 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp @@ -76,7 +76,8 @@ class HeapRegionSeq: public CHeapObj { // that are available for allocation. size_t free_suffix(); - // Finds a contiguous set of empty regions of length num. + // Find a contiguous set of empty regions of length num and return + // the index of the first region or -1 if the search was unsuccessful. int find_contiguous(size_t num); // Apply the "doHeapRegion" method of "blk" to all regions in "this", diff --git a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp index 29fff5d8cf3d5dd51fa6a37c12f6e2d2a4579543..be80cd2201ae371de315e6a529ae791a81401c8d 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp @@ -42,7 +42,7 @@ size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) { return region_num; } -void HeapRegionSetBase::fill_in_ext_msg(hrl_ext_msg* msg, const char* message) { +void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) { msg->append("[%s] %s " "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" " "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT, @@ -109,30 +109,30 @@ void HeapRegionSetBase::verify() { // for the verification calls. If we do verification without the // appropriate locks and the set changes underneath our feet // verification might fail and send us on a wild goose chase. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); guarantee(( is_empty() && length() == 0 && region_num() == 0 && total_used_bytes() == 0 && total_capacity_bytes() == 0) || (!is_empty() && length() >= 0 && region_num() >= 0 && total_used_bytes() >= 0 && total_capacity_bytes() >= 0), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); guarantee((!regions_humongous() && region_num() == length()) || ( regions_humongous() && region_num() >= length()), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); guarantee(!regions_empty() || total_used_bytes() == 0, - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); guarantee(total_used_bytes() <= total_capacity_bytes(), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); } void HeapRegionSetBase::verify_start() { // See comment in verify() about MT safety and verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); assert(!_verify_in_progress, - hrl_ext_msg(this, "verification should not be in progress")); + hrs_ext_msg(this, "verification should not be in progress")); // Do the basic verification first before we do the checks over the regions. HeapRegionSetBase::verify(); @@ -146,11 +146,11 @@ void HeapRegionSetBase::verify_start() { void HeapRegionSetBase::verify_next_region(HeapRegion* hr) { // See comment in verify() about MT safety and verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); assert(_verify_in_progress, - hrl_ext_msg(this, "verification should be in progress")); + hrs_ext_msg(this, "verification should be in progress")); - guarantee(verify_region(hr, this), hrl_ext_msg(this, "region verification")); + guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification")); _calc_length += 1; if (!hr->isHumongous()) { @@ -164,28 +164,28 @@ void HeapRegionSetBase::verify_next_region(HeapRegion* hr) { void HeapRegionSetBase::verify_end() { // See comment in verify() about MT safety and verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); assert(_verify_in_progress, - hrl_ext_msg(this, "verification should be in progress")); + hrs_ext_msg(this, "verification should be in progress")); guarantee(length() == _calc_length, - hrl_err_msg("[%s] length: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == " "calc length: "SIZE_FORMAT, name(), length(), _calc_length)); guarantee(region_num() == _calc_region_num, - hrl_err_msg("[%s] region num: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == " "calc region num: "SIZE_FORMAT, name(), region_num(), _calc_region_num)); guarantee(total_capacity_bytes() == _calc_total_capacity_bytes, - hrl_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] capacity bytes: "SIZE_FORMAT" should be == " "calc capacity bytes: "SIZE_FORMAT, name(), total_capacity_bytes(), _calc_total_capacity_bytes)); guarantee(total_used_bytes() == _calc_total_used_bytes, - hrl_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] used bytes: "SIZE_FORMAT" should be == " "calc used bytes: "SIZE_FORMAT, name(), total_used_bytes(), _calc_total_used_bytes)); @@ -221,9 +221,9 @@ HeapRegionSetBase::HeapRegionSetBase(const char* name) //////////////////// HeapRegionSet //////////////////// void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) { - hrl_assert_mt_safety_ok(this); - hrl_assert_mt_safety_ok(proxy_set); - hrl_assert_sets_match(this, proxy_set); + hrs_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(proxy_set); + hrs_assert_sets_match(this, proxy_set); verify_optional(); proxy_set->verify_optional(); @@ -231,19 +231,19 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) { if (proxy_set->is_empty()) return; assert(proxy_set->length() <= _length, - hrl_err_msg("[%s] proxy set length: "SIZE_FORMAT" " + hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" " "should be <= length: "SIZE_FORMAT, name(), proxy_set->length(), _length)); _length -= proxy_set->length(); assert(proxy_set->region_num() <= _region_num, - hrl_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " + hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" " "should be <= region num: "SIZE_FORMAT, name(), proxy_set->region_num(), _region_num)); _region_num -= proxy_set->region_num(); assert(proxy_set->total_used_bytes() <= _total_used_bytes, - hrl_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" " + hrs_err_msg("[%s] proxy set used bytes: "SIZE_FORMAT" " "should be <= used bytes: "SIZE_FORMAT, name(), proxy_set->total_used_bytes(), _total_used_bytes)); @@ -257,13 +257,13 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) { //////////////////// HeapRegionLinkedList //////////////////// -void HeapRegionLinkedList::fill_in_ext_msg_extra(hrl_ext_msg* msg) { +void HeapRegionLinkedList::fill_in_ext_msg_extra(hrs_ext_msg* msg) { msg->append(" hd: "PTR_FORMAT" tl: "PTR_FORMAT, head(), tail()); } void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) { - hrl_assert_mt_safety_ok(this); - hrl_assert_mt_safety_ok(from_list); + hrs_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(from_list); verify_optional(); from_list->verify_optional(); @@ -283,10 +283,10 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) { #endif // ASSERT if (_tail != NULL) { - assert(length() > 0 && _head != NULL, hrl_ext_msg(this, "invariant")); + assert(length() > 0 && _head != NULL, hrs_ext_msg(this, "invariant")); _tail->set_next(from_list->_head); } else { - assert(length() == 0 && _head == NULL, hrl_ext_msg(this, "invariant")); + assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant")); _head = from_list->_head; } _tail = from_list->_tail; @@ -301,12 +301,12 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) { } void HeapRegionLinkedList::remove_all() { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); verify_optional(); HeapRegion* curr = _head; while (curr != NULL) { - hrl_assert_region_ok(this, curr, this); + hrs_assert_region_ok(this, curr, this); HeapRegion* next = curr->next(); curr->set_next(NULL); @@ -319,9 +319,9 @@ void HeapRegionLinkedList::remove_all() { } void HeapRegionLinkedList::remove_all_pending(size_t target_count) { - hrl_assert_mt_safety_ok(this); - assert(target_count > 1, hrl_ext_msg(this, "pre-condition")); - assert(!is_empty(), hrl_ext_msg(this, "pre-condition")); + hrs_assert_mt_safety_ok(this); + assert(target_count > 1, hrs_ext_msg(this, "pre-condition")); + assert(!is_empty(), hrs_ext_msg(this, "pre-condition")); verify_optional(); DEBUG_ONLY(size_t old_length = length();) @@ -330,27 +330,27 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) { HeapRegion* prev = NULL; size_t count = 0; while (curr != NULL) { - hrl_assert_region_ok(this, curr, this); + hrs_assert_region_ok(this, curr, this); HeapRegion* next = curr->next(); if (curr->pending_removal()) { assert(count < target_count, - hrl_err_msg("[%s] should not come across more regions " + hrs_err_msg("[%s] should not come across more regions " "pending for removal than target_count: "SIZE_FORMAT, name(), target_count)); if (prev == NULL) { - assert(_head == curr, hrl_ext_msg(this, "invariant")); + assert(_head == curr, hrs_ext_msg(this, "invariant")); _head = next; } else { - assert(_head != curr, hrl_ext_msg(this, "invariant")); + assert(_head != curr, hrs_ext_msg(this, "invariant")); prev->set_next(next); } if (next == NULL) { - assert(_tail == curr, hrl_ext_msg(this, "invariant")); + assert(_tail == curr, hrs_ext_msg(this, "invariant")); _tail = prev; } else { - assert(_tail != curr, hrl_ext_msg(this, "invariant")); + assert(_tail != curr, hrs_ext_msg(this, "invariant")); } curr->set_next(NULL); @@ -371,10 +371,10 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) { } assert(count == target_count, - hrl_err_msg("[%s] count: "SIZE_FORMAT" should be == " + hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == " "target_count: "SIZE_FORMAT, name(), count, target_count)); assert(length() + target_count == old_length, - hrl_err_msg("[%s] new length should be consistent " + hrs_err_msg("[%s] new length should be consistent " "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" " "target_count: "SIZE_FORMAT, name(), length(), old_length, target_count)); @@ -385,7 +385,7 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) { void HeapRegionLinkedList::verify() { // See comment in HeapRegionSetBase::verify() about MT safety and // verification. - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); // This will also do the basic verification too. verify_start(); @@ -399,7 +399,7 @@ void HeapRegionLinkedList::verify() { count += 1; guarantee(count < _unrealistically_long_length, - hrl_err_msg("[%s] the calculated length: "SIZE_FORMAT" " + hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" " "seems very long, is there maybe a cycle? " "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" " "prev1: "PTR_FORMAT" length: "SIZE_FORMAT, @@ -410,7 +410,7 @@ void HeapRegionLinkedList::verify() { curr = curr->next(); } - guarantee(_tail == prev0, hrl_ext_msg(this, "post-condition")); + guarantee(_tail == prev0, hrs_ext_msg(this, "post-condition")); verify_end(); } diff --git a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp index a973676ae78fb5ff55c4b7fcefcc0287d42aa08a..5d53068ca88e93c45f748e4dc9eaa9a0f18eaa4f 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp @@ -28,8 +28,8 @@ #include "gc_implementation/g1/heapRegion.hpp" // Large buffer for some cases where the output might be larger than normal. -#define HRL_ERR_MSG_BUFSZ 512 -typedef FormatBuffer hrl_err_msg; +#define HRS_ERR_MSG_BUFSZ 512 +typedef FormatBuffer hrs_err_msg; // Set verification will be forced either if someone defines // HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which @@ -45,10 +45,10 @@ typedef FormatBuffer hrl_err_msg; // (e.g., length, region num, used bytes sum) plus any shared // functionality (e.g., verification). -class hrl_ext_msg; +class hrs_ext_msg; class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC { - friend class hrl_ext_msg; + friend class hrs_ext_msg; protected: static size_t calculate_region_num(HeapRegion* hr); @@ -104,10 +104,10 @@ protected: virtual bool check_mt_safety() { return true; } // fill_in_ext_msg() writes the the values of the set's attributes - // in the custom err_msg (hrl_ext_msg). fill_in_ext_msg_extra() + // in the custom err_msg (hrs_ext_msg). fill_in_ext_msg_extra() // allows subclasses to append further information. - virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg) { } - void fill_in_ext_msg(hrl_ext_msg* msg, const char* message); + virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg) { } + void fill_in_ext_msg(hrs_ext_msg* msg, const char* message); // It updates the fields of the set to reflect hr being added to // the set. @@ -170,9 +170,9 @@ public: // the fields of the associated set. This can be very helpful in // diagnosing failures. -class hrl_ext_msg : public hrl_err_msg { +class hrs_ext_msg : public hrs_err_msg { public: - hrl_ext_msg(HeapRegionSetBase* set, const char* message) : hrl_err_msg("") { + hrs_ext_msg(HeapRegionSetBase* set, const char* message) : hrs_err_msg("") { set->fill_in_ext_msg(this, message); } }; @@ -180,25 +180,25 @@ public: // These two macros are provided for convenience, to keep the uses of // these two asserts a bit more concise. -#define hrl_assert_mt_safety_ok(_set_) \ +#define hrs_assert_mt_safety_ok(_set_) \ do { \ - assert((_set_)->check_mt_safety(), hrl_ext_msg((_set_), "MT safety")); \ + assert((_set_)->check_mt_safety(), hrs_ext_msg((_set_), "MT safety")); \ } while (0) -#define hrl_assert_region_ok(_set_, _hr_, _expected_) \ +#define hrs_assert_region_ok(_set_, _hr_, _expected_) \ do { \ assert((_set_)->verify_region((_hr_), (_expected_)), \ - hrl_ext_msg((_set_), "region verification")); \ + hrs_ext_msg((_set_), "region verification")); \ } while (0) //////////////////// HeapRegionSet //////////////////// -#define hrl_assert_sets_match(_set1_, _set2_) \ +#define hrs_assert_sets_match(_set1_, _set2_) \ do { \ assert(((_set1_)->regions_humongous() == \ (_set2_)->regions_humongous()) && \ ((_set1_)->regions_empty() == (_set2_)->regions_empty()), \ - hrl_err_msg("the contents of set %s and set %s should match", \ + hrs_err_msg("the contents of set %s and set %s should match", \ (_set1_)->name(), (_set2_)->name())); \ } while (0) @@ -267,7 +267,7 @@ private: HeapRegion* tail() { return _tail; } protected: - virtual void fill_in_ext_msg_extra(hrl_ext_msg* msg); + virtual void fill_in_ext_msg_extra(hrs_ext_msg* msg); // See the comment for HeapRegionSetBase::clear() virtual void clear(); @@ -309,10 +309,10 @@ public: virtual void print_on(outputStream* out, bool print_contents = false); }; -//////////////////// HeapRegionLinkedList //////////////////// +//////////////////// HeapRegionLinkedListIterator //////////////////// -// Iterator class that provides a convenient way to iterator over the -// regions in a HeapRegionLinkedList instance. +// Iterator class that provides a convenient way to iterate over the +// regions of a HeapRegionLinkedList instance. class HeapRegionLinkedListIterator : public StackObj { private: diff --git a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp index 44f7335f48fe6261ff51e48addfbffbe5f0916da..501f5a7aef84da55c62b66f87fa9158e1e0550ce 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp @@ -42,8 +42,8 @@ inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) { } inline void HeapRegionSetBase::add_internal(HeapRegion* hr) { - hrl_assert_region_ok(this, hr, NULL); - assert(hr->next() == NULL, hrl_ext_msg(this, "should not already be linked")); + hrs_assert_region_ok(this, hr, NULL); + assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked")); update_for_addition(hr); hr->set_containing_set(this); @@ -51,7 +51,7 @@ inline void HeapRegionSetBase::add_internal(HeapRegion* hr) { inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) { // Assumes the caller has already verified the region. - assert(_length > 0, hrl_ext_msg(this, "pre-condition")); + assert(_length > 0, hrs_ext_msg(this, "pre-condition")); _length -= 1; size_t region_num_diff; @@ -61,22 +61,22 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) { region_num_diff = calculate_region_num(hr); } assert(region_num_diff <= _region_num, - hrl_err_msg("[%s] region's region num: "SIZE_FORMAT" " + hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" " "should be <= region num: "SIZE_FORMAT, name(), region_num_diff, _region_num)); _region_num -= region_num_diff; size_t used_bytes = hr->used(); assert(used_bytes <= _total_used_bytes, - hrl_err_msg("[%s] region's used bytes: "SIZE_FORMAT" " + hrs_err_msg("[%s] region's used bytes: "SIZE_FORMAT" " "should be <= used bytes: "SIZE_FORMAT, name(), used_bytes, _total_used_bytes)); _total_used_bytes -= used_bytes; } inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) { - hrl_assert_region_ok(this, hr, this); - assert(hr->next() == NULL, hrl_ext_msg(this, "should already be unlinked")); + hrs_assert_region_ok(this, hr, this); + assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked")); hr->set_containing_set(NULL); update_for_removal(hr); @@ -85,13 +85,13 @@ inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) { //////////////////// HeapRegionSet //////////////////// inline void HeapRegionSet::add(HeapRegion* hr) { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); // add_internal() will verify the region. add_internal(hr); } inline void HeapRegionSet::remove(HeapRegion* hr) { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); // remove_internal() will verify the region. remove_internal(hr); } @@ -101,8 +101,8 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr, // No need to fo the MT safety check here given that this method // does not update the contents of the set but instead accumulates // the changes in proxy_set which is assumed to be thread-local. - hrl_assert_sets_match(this, proxy_set); - hrl_assert_region_ok(this, hr, this); + hrs_assert_sets_match(this, proxy_set); + hrs_assert_region_ok(this, hr, this); hr->set_containing_set(NULL); proxy_set->update_for_addition(hr); @@ -111,10 +111,10 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr, //////////////////// HeapRegionLinkedList //////////////////// inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); assert((length() == 0 && _head == NULL && _tail == NULL) || (length() > 0 && _head != NULL && _tail != NULL), - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); // add_internal() will verify the region. add_internal(hr); @@ -128,10 +128,10 @@ inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) { } inline HeapRegion* HeapRegionLinkedList::remove_head() { - hrl_assert_mt_safety_ok(this); - assert(!is_empty(), hrl_ext_msg(this, "the list should not be empty")); + hrs_assert_mt_safety_ok(this); + assert(!is_empty(), hrs_ext_msg(this, "the list should not be empty")); assert(length() > 0 && _head != NULL && _tail != NULL, - hrl_ext_msg(this, "invariant")); + hrs_ext_msg(this, "invariant")); // We need to unlink it first. HeapRegion* hr = _head; @@ -147,7 +147,7 @@ inline HeapRegion* HeapRegionLinkedList::remove_head() { } inline HeapRegion* HeapRegionLinkedList::remove_head_or_null() { - hrl_assert_mt_safety_ok(this); + hrs_assert_mt_safety_ok(this); if (!is_empty()) { return remove_head(); diff --git a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp index c5f58cb87affd282fe82bdd8f9cf2e6f10da1534..24f25eae325edee9d8610df7d21a73ea01b3ec8b 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp @@ -52,7 +52,7 @@ bool MasterFreeRegionList::check_mt_safety() { FreeList_lock->owned_by_self())) || (!SafepointSynchronize::is_at_safepoint() && Heap_lock->owned_by_self()), - hrl_ext_msg(this, "master free list MT safety protocol")); + hrs_ext_msg(this, "master free list MT safety protocol")); return FreeRegionList::check_mt_safety(); } @@ -65,7 +65,7 @@ bool SecondaryFreeRegionList::check_mt_safety() { // while holding the SecondaryFreeList_lock. guarantee(SecondaryFreeList_lock->owned_by_self(), - hrl_ext_msg(this, "secondary free list MT safety protocol")); + hrs_ext_msg(this, "secondary free list MT safety protocol")); return FreeRegionList::check_mt_safety(); } @@ -81,7 +81,7 @@ const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) { return HeapRegionSet::verify_region_extra(hr); } -//////////////////// HumongousRegionSet //////////////////// +//////////////////// MasterHumongousRegionSet //////////////////// bool MasterHumongousRegionSet::check_mt_safety() { // Master Humongous Set MT safety protocol: @@ -97,6 +97,6 @@ bool MasterHumongousRegionSet::check_mt_safety() { OldSets_lock->owned_by_self())) || (!SafepointSynchronize::is_at_safepoint() && Heap_lock->owned_by_self()), - hrl_ext_msg(this, "master humongous set MT safety protocol")); + hrs_ext_msg(this, "master humongous set MT safety protocol")); return HumongousRegionSet::check_mt_safety(); } diff --git a/src/share/vm/utilities/debug.hpp b/src/share/vm/utilities/debug.hpp index a4f16060e1df37099263e91a065c7d8442498f6f..75a9a27bd6b2a711330ca00323da0d7b3c0c903c 100644 --- a/src/share/vm/utilities/debug.hpp +++ b/src/share/vm/utilities/debug.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_VM_UTILITIES_DEBUG_HPP #define SHARE_VM_UTILITIES_DEBUG_HPP +#include "prims/jvm.h" #include "utilities/globalDefinitions.hpp" #include @@ -48,7 +49,7 @@ template FormatBuffer::FormatBuffer(const char * format, ...) { va_list argp; va_start(argp, format); - vsnprintf(_buf, bufsz, format, argp); + jio_vsnprintf(_buf, bufsz, format, argp); va_end(argp); } @@ -61,7 +62,7 @@ void FormatBuffer::append(const char* format, ...) { va_list argp; va_start(argp, format); - vsnprintf(buf_end, bufsz - len, format, argp); + jio_vsnprintf(buf_end, bufsz - len, format, argp); va_end(argp); } diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp index 820b6abcb7e8a8569b2c5e4022c11ef88443fe3f..a742d2214867ab556793946df66ef152a4c07463 100644 --- a/src/share/vm/utilities/globalDefinitions.hpp +++ b/src/share/vm/utilities/globalDefinitions.hpp @@ -1185,7 +1185,7 @@ inline int build_int_from_shorts( jushort low, jushort high ) { // '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll" // (in ILP32). -#define BOOL_TO_STR(__b) (__b) ? "true" : "false" +#define BOOL_TO_STR(_b_) ((_b_) ? "true" : "false") // Format 32-bit quantities. #define INT32_FORMAT "%d"