From a25a22c6fac3c54aa0cffea32b0def8dd726abb4 Mon Sep 17 00:00:00 2001 From: iveresov Date: Fri, 10 Feb 2012 17:40:20 -0800 Subject: [PATCH] 7144296: PS: Optimize nmethods processing Summary: Prunes scavenge roots in code list every young GC, promote objects directly pointed by the code immediately Reviewed-by: johnc, jcoomes --- .../parallelScavenge/psPromotionManager.cpp | 163 +---------------- .../parallelScavenge/psPromotionManager.hpp | 4 +- .../psPromotionManager.inline.hpp | 170 +++++++++++++++++- .../parallelScavenge/psScavenge.cpp | 5 +- .../parallelScavenge/psScavenge.hpp | 5 +- .../parallelScavenge/psScavenge.inline.hpp | 21 ++- .../parallelScavenge/psTasks.cpp | 5 +- 7 files changed, 193 insertions(+), 180 deletions(-) diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp index 255bb8ee8..f72496905 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,167 +247,6 @@ void PSPromotionManager::flush_labs() { } } -// -// This method is pretty bulky. It would be nice to split it up -// into smaller submethods, but we need to be careful not to hurt -// performance. -// - -oop PSPromotionManager::copy_to_survivor_space(oop o) { - assert(PSScavenge::should_scavenge(&o), "Sanity"); - - oop new_obj = NULL; - - // NOTE! We must be very careful with any methods that access the mark - // in o. There may be multiple threads racing on it, and it may be forwarded - // at any time. Do not use oop methods for accessing the mark! - markOop test_mark = o->mark(); - - // The same test as "o->is_forwarded()" - if (!test_mark->is_marked()) { - bool new_obj_is_tenured = false; - size_t new_obj_size = o->size(); - - // Find the objects age, MT safe. - int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? - test_mark->displaced_mark_helper()->age() : test_mark->age(); - - // Try allocating obj in to-space (unless too old) - if (age < PSScavenge::tenuring_threshold()) { - new_obj = (oop) _young_lab.allocate(new_obj_size); - if (new_obj == NULL && !_young_gen_is_full) { - // Do we allocate directly, or flush and refill? - if (new_obj_size > (YoungPLABSize / 2)) { - // Allocate this object directly - new_obj = (oop)young_space()->cas_allocate(new_obj_size); - } else { - // Flush and fill - _young_lab.flush(); - - HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); - if (lab_base != NULL) { - _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); - // Try the young lab allocation again. - new_obj = (oop) _young_lab.allocate(new_obj_size); - } else { - _young_gen_is_full = true; - } - } - } - } - - // Otherwise try allocating obj tenured - if (new_obj == NULL) { -#ifndef PRODUCT - if (Universe::heap()->promotion_should_fail()) { - return oop_promotion_failed(o, test_mark); - } -#endif // #ifndef PRODUCT - - new_obj = (oop) _old_lab.allocate(new_obj_size); - new_obj_is_tenured = true; - - if (new_obj == NULL) { - if (!_old_gen_is_full) { - // Do we allocate directly, or flush and refill? - if (new_obj_size > (OldPLABSize / 2)) { - // Allocate this object directly - new_obj = (oop)old_gen()->cas_allocate(new_obj_size); - } else { - // Flush and fill - _old_lab.flush(); - - HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); - if(lab_base != NULL) { - _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); - // Try the old lab allocation again. - new_obj = (oop) _old_lab.allocate(new_obj_size); - } - } - } - - // This is the promotion failed test, and code handling. - // The code belongs here for two reasons. It is slightly - // different thatn the code below, and cannot share the - // CAS testing code. Keeping the code here also minimizes - // the impact on the common case fast path code. - - if (new_obj == NULL) { - _old_gen_is_full = true; - return oop_promotion_failed(o, test_mark); - } - } - } - - assert(new_obj != NULL, "allocation should have succeeded"); - - // Copy obj - Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); - - // Now we have to CAS in the header. - if (o->cas_forward_to(new_obj, test_mark)) { - // We won any races, we "own" this object. - assert(new_obj == o->forwardee(), "Sanity"); - - // Increment age if obj still in new generation. Now that - // we're dealing with a markOop that cannot change, it is - // okay to use the non mt safe oop methods. - if (!new_obj_is_tenured) { - new_obj->incr_age(); - assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); - } - - // Do the size comparison first with new_obj_size, which we - // already have. Hopefully, only a few objects are larger than - // _min_array_size_for_chunking, and most of them will be arrays. - // So, the is->objArray() test would be very infrequent. - if (new_obj_size > _min_array_size_for_chunking && - new_obj->is_objArray() && - PSChunkLargeArrays) { - // we'll chunk it - oop* const masked_o = mask_chunked_array_oop(o); - push_depth(masked_o); - TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); - } else { - // we'll just push its contents - new_obj->push_contents(this); - } - } else { - // We lost, someone else "owns" this object - guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); - - // Try to deallocate the space. If it was directly allocated we cannot - // deallocate it, so we have to test. If the deallocation fails, - // overwrite with a filler object. - if (new_obj_is_tenured) { - if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); - } - } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); - } - - // don't update this before the unallocation! - new_obj = o->forwardee(); - } - } else { - assert(o->is_forwarded(), "Sanity"); - new_obj = o->forwardee(); - } - -#ifdef DEBUG - // This code must come after the CAS test, or it will print incorrect - // information. - if (TraceScavenge) { - gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", - PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", - new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); - } -#endif - - return new_obj; -} - template void PSPromotionManager::process_array_chunk_work( oop obj, int start, int end) { diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp index 80c09d913..360640eba 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,7 +171,7 @@ class PSPromotionManager : public CHeapObj { void set_old_gen_is_full(bool state) { _old_gen_is_full = state; } // Promotion methods - oop copy_to_survivor_space(oop o); + template oop copy_to_survivor_space(oop o); oop oop_promotion_failed(oop obj, markOop obj_mark); void reset(); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp index eae7eaded..6c5da3014 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,170 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) { claim_or_forward_internal_depth(p); } +// +// This method is pretty bulky. It would be nice to split it up +// into smaller submethods, but we need to be careful not to hurt +// performance. +// +template +oop PSPromotionManager::copy_to_survivor_space(oop o) { + assert(PSScavenge::should_scavenge(&o), "Sanity"); + + oop new_obj = NULL; + + // NOTE! We must be very careful with any methods that access the mark + // in o. There may be multiple threads racing on it, and it may be forwarded + // at any time. Do not use oop methods for accessing the mark! + markOop test_mark = o->mark(); + + // The same test as "o->is_forwarded()" + if (!test_mark->is_marked()) { + bool new_obj_is_tenured = false; + size_t new_obj_size = o->size(); + + if (!promote_immediately) { + // Find the objects age, MT safe. + int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? + test_mark->displaced_mark_helper()->age() : test_mark->age(); + + // Try allocating obj in to-space (unless too old) + if (age < PSScavenge::tenuring_threshold()) { + new_obj = (oop) _young_lab.allocate(new_obj_size); + if (new_obj == NULL && !_young_gen_is_full) { + // Do we allocate directly, or flush and refill? + if (new_obj_size > (YoungPLABSize / 2)) { + // Allocate this object directly + new_obj = (oop)young_space()->cas_allocate(new_obj_size); + } else { + // Flush and fill + _young_lab.flush(); + + HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); + if (lab_base != NULL) { + _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); + // Try the young lab allocation again. + new_obj = (oop) _young_lab.allocate(new_obj_size); + } else { + _young_gen_is_full = true; + } + } + } + } + } + + // Otherwise try allocating obj tenured + if (new_obj == NULL) { +#ifndef PRODUCT + if (Universe::heap()->promotion_should_fail()) { + return oop_promotion_failed(o, test_mark); + } +#endif // #ifndef PRODUCT + + new_obj = (oop) _old_lab.allocate(new_obj_size); + new_obj_is_tenured = true; + + if (new_obj == NULL) { + if (!_old_gen_is_full) { + // Do we allocate directly, or flush and refill? + if (new_obj_size > (OldPLABSize / 2)) { + // Allocate this object directly + new_obj = (oop)old_gen()->cas_allocate(new_obj_size); + } else { + // Flush and fill + _old_lab.flush(); + + HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); + if(lab_base != NULL) { + _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); + // Try the old lab allocation again. + new_obj = (oop) _old_lab.allocate(new_obj_size); + } + } + } + + // This is the promotion failed test, and code handling. + // The code belongs here for two reasons. It is slightly + // different thatn the code below, and cannot share the + // CAS testing code. Keeping the code here also minimizes + // the impact on the common case fast path code. + + if (new_obj == NULL) { + _old_gen_is_full = true; + return oop_promotion_failed(o, test_mark); + } + } + } + + assert(new_obj != NULL, "allocation should have succeeded"); + + // Copy obj + Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); + + // Now we have to CAS in the header. + if (o->cas_forward_to(new_obj, test_mark)) { + // We won any races, we "own" this object. + assert(new_obj == o->forwardee(), "Sanity"); + + // Increment age if obj still in new generation. Now that + // we're dealing with a markOop that cannot change, it is + // okay to use the non mt safe oop methods. + if (!new_obj_is_tenured) { + new_obj->incr_age(); + assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); + } + + // Do the size comparison first with new_obj_size, which we + // already have. Hopefully, only a few objects are larger than + // _min_array_size_for_chunking, and most of them will be arrays. + // So, the is->objArray() test would be very infrequent. + if (new_obj_size > _min_array_size_for_chunking && + new_obj->is_objArray() && + PSChunkLargeArrays) { + // we'll chunk it + oop* const masked_o = mask_chunked_array_oop(o); + push_depth(masked_o); + TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); + } else { + // we'll just push its contents + new_obj->push_contents(this); + } + } else { + // We lost, someone else "owns" this object + guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); + + // Try to deallocate the space. If it was directly allocated we cannot + // deallocate it, so we have to test. If the deallocation fails, + // overwrite with a filler object. + if (new_obj_is_tenured) { + if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + } + } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + } + + // don't update this before the unallocation! + new_obj = o->forwardee(); + } + } else { + assert(o->is_forwarded(), "Sanity"); + new_obj = o->forwardee(); + } + +#ifdef DEBUG + // This code must come after the CAS test, or it will print incorrect + // information. + if (TraceScavenge) { + gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", + PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", + new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); + } +#endif + + return new_obj; +} + + inline void PSPromotionManager::process_popped_location_depth(StarTask p) { if (is_oop_masked(p)) { assert(PSChunkLargeArrays, "invariant"); @@ -69,9 +233,9 @@ inline void PSPromotionManager::process_popped_location_depth(StarTask p) { } else { if (p.is_narrow()) { assert(UseCompressedOops, "Error"); - PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p); + PSScavenge::copy_and_push_safe_barrier(this, p); } else { - PSScavenge::copy_and_push_safe_barrier(this, (oop*)p); + PSScavenge::copy_and_push_safe_barrier(this, p); } } } diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp index f3cf14203..0912ab670 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/symbolTable.hpp" +#include "code/codeCache.hpp" #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" #include "gc_implementation/parallelScavenge/generationSizer.hpp" @@ -100,7 +101,7 @@ public: // Weak refs may be visited more than once. if (PSScavenge::should_scavenge(p, _to_space)) { - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } @@ -602,6 +603,8 @@ bool PSScavenge::invoke_no_policy() { NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); + CodeCache::prune_scavenge_root_nmethods(); + // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp index f3e65a166..030fdb20c 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -135,7 +135,8 @@ class PSScavenge: AllStatic { template static inline bool should_scavenge(T* p, MutableSpace* to_space); template static inline bool should_scavenge(T* p, bool check_to_space); - template inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p); + template + inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p); // Is an object in the young generation // This assumes that the HeapWord argument is in the heap, diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp index 880f06789..8497dd1ce 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" inline void PSScavenge::save_to_space_top_before_gc() { @@ -65,7 +66,7 @@ inline bool PSScavenge::should_scavenge(T* p, bool check_to_space) { // Attempt to "claim" oop at p via CAS, push the new obj if successful // This version tests the oop* to make sure it is within the heap before // attempting marking. -template +template inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm, T* p) { assert(should_scavenge(p, true), "revisiting object?"); @@ -73,7 +74,7 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm, oop o = oopDesc::load_decode_heap_oop_not_null(p); oop new_obj = o->is_forwarded() ? o->forwardee() - : pm->copy_to_survivor_space(o); + : pm->copy_to_survivor_space(o); oopDesc::encode_store_heap_oop_not_null(p, new_obj); // We cannot mark without test, as some code passes us pointers @@ -86,7 +87,8 @@ inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm, } } -class PSScavengeRootsClosure: public OopClosure { +template +class PSRootsClosure: public OopClosure { private: PSPromotionManager* _promotion_manager; @@ -94,13 +96,16 @@ class PSScavengeRootsClosure: public OopClosure { template void do_oop_work(T *p) { if (PSScavenge::should_scavenge(p)) { // We never card mark roots, maybe call a func without test? - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } public: - PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } - void do_oop(oop* p) { PSScavengeRootsClosure::do_oop_work(p); } - void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } + PSRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { } + void do_oop(oop* p) { PSRootsClosure::do_oop_work(p); } + void do_oop(narrowOop* p) { PSRootsClosure::do_oop_work(p); } }; +typedef PSRootsClosure PSScavengeRootsClosure; +typedef PSRootsClosure PSPromoteRootsClosure; + #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP diff --git a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp index 0800681f9..db28f249f 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,6 +51,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) { PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); PSScavengeRootsClosure roots_closure(pm); + PSPromoteRootsClosure roots_to_old_closure(pm); switch (_root_type) { case universe: @@ -91,7 +92,7 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) { case code_cache: { - CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true); + CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true); CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob); } break; -- GitLab