diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index e2e27c0f353ffb285f7eaee464b15980f3f2ecf4..e83441076594d11a6ed8746ef4e714d193044c91 100644 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -1961,7 +1961,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { ref_processor()->set_enqueuing_is_done(false); ref_processor()->enable_discovery(); - ref_processor()->snap_policy(clear_all_soft_refs); + ref_processor()->setup_policy(clear_all_soft_refs); // If an asynchronous collection finishes, the _modUnionTable is // all clear. If we are assuming the collection from an asynchronous // collection, clear the _modUnionTable. @@ -2386,7 +2386,7 @@ void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { } // Snapshot the soft reference policy to be used in this collection cycle. - ref_processor()->snap_policy(clear_all_soft_refs); + ref_processor()->setup_policy(clear_all_soft_refs); bool init_mark_was_synchronous = false; // until proven otherwise while (_collectorState != Idling) { @@ -5683,7 +5683,7 @@ void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { assert(rp->span().equals(_span), "Spans should be equal"); assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); // Process weak references. - rp->snap_policy(clear_all_soft_refs); + rp->setup_policy(clear_all_soft_refs); verify_work_stacks_empty(); CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp index 65f64e2ad1614a16e85daca8cb86e02f7af8ef98..646804205c73619771091562ddae9601879a5d95 100644 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -811,7 +811,7 @@ void ConcurrentMark::checkpointRootsInitialPost() { ReferenceProcessor* rp = g1h->ref_processor(); rp->verify_no_references_recorded(); rp->enable_discovery(); // enable ("weak") refs discovery - rp->snap_policy(false); // snapshot the soft ref policy to be used in this cycle + rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold); @@ -1834,7 +1834,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { ReferenceProcessor* rp = g1h->ref_processor(); // Process weak references. - rp->snap_policy(clear_all_soft_refs); + rp->setup_policy(clear_all_soft_refs); assert(_markStack.isEmpty(), "mark stack should be empty"); G1CMIsAliveClosure g1IsAliveClosure (g1h); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 5aaa483b91c19ed2b41288fb9a7bc568b1638de6..f0cad8ed231eebdab25b8cf44ecca963969894eb 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -891,7 +891,7 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL); ref_processor()->enable_discovery(); - ref_processor()->snap_policy(clear_all_soft_refs); + ref_processor()->setup_policy(clear_all_soft_refs); // Do collection work { diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp index 4aa08a2319eeb9a4cdce091b389fc5b11ca3e03e..d94c6f97cc6b5d75eeba1bb052b3bb3aff3abbda 100644 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @@ -35,7 +35,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, assert(GenMarkSweep::ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); GenMarkSweep::_ref_processor = rp; - rp->snap_policy(clear_all_softrefs); + rp->setup_policy(clear_all_softrefs); // When collecting the permanent generation methodOops may be moving, // so we either have to flush all bcp data or convert it into bci. @@ -123,7 +123,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, // Process reference objects found during marking ReferenceProcessor* rp = GenMarkSweep::ref_processor(); - rp->snap_policy(clear_all_softrefs); + rp->setup_policy(clear_all_softrefs); rp->process_discovered_references(&GenMarkSweep::is_alive, &GenMarkSweep::keep_alive, &GenMarkSweep::follow_stack_closure, diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp index 0c0196f253d7e124a4fd16484604a75dee402036..7290767990567120c89787ff4989c6ae1f66289c 100644 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @@ -773,7 +773,7 @@ void ParNewGeneration::collect(bool full, set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, &scan_without_gc_barrier, &scan_with_gc_barrier); - rp->snap_policy(clear_all_soft_refs); + rp->setup_policy(clear_all_soft_refs); if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); rp->process_discovered_references(&is_alive, &keep_alive, diff --git a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp index 5134f47e7a543d5d5e9fbf8c0e06f501143b656a..ea0c20b0148d9b938cebeb80f875da652832e827 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp @@ -172,7 +172,7 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); - ref_processor()->snap_policy(clear_all_softrefs); + ref_processor()->setup_policy(clear_all_softrefs); mark_sweep_phase1(clear_all_softrefs); @@ -518,7 +518,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Process reference objects found during marking { - ref_processor()->snap_policy(clear_all_softrefs); + ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->process_discovered_references( is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index 9ca6b6c804c3865d66bb7e17d0f1d9f44f9cefe0..1ad78f84c56a021d5605d1b048a853020b40b9fd 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -1578,7 +1578,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { COMPILER2_PRESENT(DerivedPointerTable::clear()); ref_processor()->enable_discovery(); - ref_processor()->snap_policy(maximum_heap_compaction); + ref_processor()->setup_policy(maximum_heap_compaction); bool marked_for_unloading = false; diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp index 4aa7d5afe388909c1edada0a2a45faf3b57ef323..6357362b0f7d569c4e930a24ca39d43627df53c4 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @@ -330,7 +330,7 @@ bool PSScavenge::invoke_no_policy() { COMPILER2_PRESENT(DerivedPointerTable::clear()); reference_processor()->enable_discovery(); - reference_processor()->snap_policy(false); + reference_processor()->setup_policy(false); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. @@ -395,7 +395,7 @@ bool PSScavenge::invoke_no_policy() { // Process reference objects discovered during scavenge { - reference_processor()->snap_policy(false); // not always_clear + reference_processor()->setup_policy(false); // not always_clear PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); if (reference_processor()->processing_is_mt()) { diff --git a/src/share/vm/memory/defNewGeneration.cpp b/src/share/vm/memory/defNewGeneration.cpp index e953e7545b54b570b12e11a6447d189ff8e08dcc..93a2e57e34618366843726b4b074643583163c44 100644 --- a/src/share/vm/memory/defNewGeneration.cpp +++ b/src/share/vm/memory/defNewGeneration.cpp @@ -567,7 +567,7 @@ void DefNewGeneration::collect(bool full, FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); - rp->snap_policy(clear_all_soft_refs); + rp->setup_policy(clear_all_soft_refs); rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL); if (!promotion_failed()) { diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp index f411d4d7bfa94ca7920200e2e0aca5e0ca0df98f..832a3331e1999679e8928ab258d910c1b93ba962 100644 --- a/src/share/vm/memory/genCollectedHeap.cpp +++ b/src/share/vm/memory/genCollectedHeap.cpp @@ -525,7 +525,7 @@ void GenCollectedHeap::do_collection(bool full, if (rp->discovery_is_atomic()) { rp->verify_no_references_recorded(); rp->enable_discovery(); - rp->snap_policy(clear_all_soft_refs); + rp->setup_policy(clear_all_soft_refs); } else { // collect() below will enable discovery as appropriate } diff --git a/src/share/vm/memory/genMarkSweep.cpp b/src/share/vm/memory/genMarkSweep.cpp index f0c41d75c00117db445dc09c00a20ca84290f904..ba42d5d2143f2beea99a9bf8d3c0ee2eda6a074e 100644 --- a/src/share/vm/memory/genMarkSweep.cpp +++ b/src/share/vm/memory/genMarkSweep.cpp @@ -33,7 +33,7 @@ void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, assert(ref_processor() == NULL, "no stomping"); assert(rp != NULL, "should be non-NULL"); _ref_processor = rp; - rp->snap_policy(clear_all_softrefs); + rp->setup_policy(clear_all_softrefs); TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); @@ -246,7 +246,7 @@ void GenMarkSweep::mark_sweep_phase1(int level, // Process reference objects found during marking { - ref_processor()->snap_policy(clear_all_softrefs); + ref_processor()->setup_policy(clear_all_softrefs); ref_processor()->process_discovered_references( &is_alive, &keep_alive, &follow_stack_closure, NULL); } diff --git a/src/share/vm/memory/referencePolicy.cpp b/src/share/vm/memory/referencePolicy.cpp index 0d23d1ddf369e7142a53663ed7807fba29f2f4c0..26aef4a2fd460ac4251d15aa894fc6e4bd8e6a13 100644 --- a/src/share/vm/memory/referencePolicy.cpp +++ b/src/share/vm/memory/referencePolicy.cpp @@ -26,11 +26,11 @@ # include "incls/_referencePolicy.cpp.incl" LRUCurrentHeapPolicy::LRUCurrentHeapPolicy() { - snap(); + setup(); } // Capture state (of-the-VM) information needed to evaluate the policy -void LRUCurrentHeapPolicy::snap() { +void LRUCurrentHeapPolicy::setup() { _max_interval = (Universe::get_heap_free_at_last_gc() / M) * SoftRefLRUPolicyMSPerMB; assert(_max_interval >= 0,"Sanity check"); } @@ -52,11 +52,11 @@ bool LRUCurrentHeapPolicy::should_clear_reference(oop p) { /////////////////////// MaxHeap ////////////////////// LRUMaxHeapPolicy::LRUMaxHeapPolicy() { - snap(); + setup(); } // Capture state (of-the-VM) information needed to evaluate the policy -void LRUMaxHeapPolicy::snap() { +void LRUMaxHeapPolicy::setup() { size_t max_heap = MaxHeapSize; max_heap -= Universe::get_heap_used_at_last_gc(); max_heap /= M; diff --git a/src/share/vm/memory/referencePolicy.hpp b/src/share/vm/memory/referencePolicy.hpp index 538ab183a0c5a05e40145f6d91c2393ddb80f031..2cf22c825abeb5e9d101b4eb0ed21edbe4f606bc 100644 --- a/src/share/vm/memory/referencePolicy.hpp +++ b/src/share/vm/memory/referencePolicy.hpp @@ -30,7 +30,7 @@ class ReferencePolicy : public CHeapObj { public: virtual bool should_clear_reference(oop p) { ShouldNotReachHere(); return true; } // Capture state (of-the-VM) information needed to evaluate the policy - virtual void snap() { /* do nothing */ } + virtual void setup() { /* do nothing */ } }; class NeverClearPolicy : public ReferencePolicy { @@ -51,7 +51,7 @@ class LRUCurrentHeapPolicy : public ReferencePolicy { LRUCurrentHeapPolicy(); // Capture state (of-the-VM) information needed to evaluate the policy - void snap(); + void setup(); bool should_clear_reference(oop p); }; @@ -63,6 +63,6 @@ class LRUMaxHeapPolicy : public ReferencePolicy { LRUMaxHeapPolicy(); // Capture state (of-the-VM) information needed to evaluate the policy - void snap(); + void setup(); bool should_clear_reference(oop p); }; diff --git a/src/share/vm/memory/referenceProcessor.cpp b/src/share/vm/memory/referenceProcessor.cpp index 648fbbd47367d0c79fa5eb71e0f39fcf5fa86df4..073886f2f93dfe0c321c0c8dc33f063a8dfd5ed3 100644 --- a/src/share/vm/memory/referenceProcessor.cpp +++ b/src/share/vm/memory/referenceProcessor.cpp @@ -115,7 +115,7 @@ ReferenceProcessor::create_ref_processor(MemRegion span, vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); } rp->set_is_alive_non_header(is_alive_non_header); - rp->snap_policy(false /* default soft ref policy */); + rp->setup_policy(false /* default soft ref policy */); return rp; } diff --git a/src/share/vm/memory/referenceProcessor.hpp b/src/share/vm/memory/referenceProcessor.hpp index d4ebec200fa55cac3c7a5bf61333109e96cdbefa..6d82e524d8afffd577b7a525cd820a0c7ef743d1 100644 --- a/src/share/vm/memory/referenceProcessor.hpp +++ b/src/share/vm/memory/referenceProcessor.hpp @@ -98,10 +98,10 @@ class ReferenceProcessor : public CHeapObj { DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; } static oop sentinel_ref() { return _sentinelRef; } static oop* adr_sentinel_ref() { return &_sentinelRef; } - ReferencePolicy* snap_policy(bool always_clear) { + ReferencePolicy* setup_policy(bool always_clear) { _current_soft_ref_policy = always_clear ? _always_clear_soft_ref_policy : _default_soft_ref_policy; - _current_soft_ref_policy->snap(); // snapshot the policy threshold + _current_soft_ref_policy->setup(); // snapshot the policy threshold return _current_soft_ref_policy; }