提交 0ab377be 编写于 作者: K kbarrett

8069367: Eagerly reclaimed humongous objects left on mark stack

Summary: Prevent eager reclaim of objects that might be on mark stack.
Reviewed-by: brutisso, tschatzl
上级 27031b88
...@@ -3510,22 +3510,29 @@ void ConcurrentMark::print_finger() { ...@@ -3510,22 +3510,29 @@ void ConcurrentMark::print_finger() {
} }
#endif #endif
void CMTask::scan_object(oop obj) { template<bool scan>
inline void CMTask::process_grey_object(oop obj) {
assert(scan || obj->is_typeArray(), "Skipping scan of grey non-typeArray");
assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant"); assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
if (_cm->verbose_high()) { if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT, gclog_or_tty->print_cr("[%u] processing grey object " PTR_FORMAT,
_worker_id, p2i((void*) obj)); _worker_id, p2i((void*) obj));
} }
size_t obj_size = obj->size(); size_t obj_size = obj->size();
_words_scanned += obj_size; _words_scanned += obj_size;
obj->oop_iterate(_cm_oop_closure); if (scan) {
obj->oop_iterate(_cm_oop_closure);
}
statsOnly( ++_objs_scanned ); statsOnly( ++_objs_scanned );
check_limits(); check_limits();
} }
template void CMTask::process_grey_object<true>(oop);
template void CMTask::process_grey_object<false>(oop);
// Closure for iteration over bitmaps // Closure for iteration over bitmaps
class CMBitMapClosure : public BitMapClosure { class CMBitMapClosure : public BitMapClosure {
private: private:
......
...@@ -1112,6 +1112,8 @@ private: ...@@ -1112,6 +1112,8 @@ private:
// mark bitmap scan, and so needs to be pushed onto the mark stack. // mark bitmap scan, and so needs to be pushed onto the mark stack.
bool is_below_finger(HeapWord* objAddr, HeapWord* global_finger) const; bool is_below_finger(HeapWord* objAddr, HeapWord* global_finger) const;
template<bool scan> void process_grey_object(oop obj);
public: public:
// It resets the task; it should be called right at the beginning of // It resets the task; it should be called right at the beginning of
// a marking phase. // a marking phase.
...@@ -1164,7 +1166,7 @@ public: ...@@ -1164,7 +1166,7 @@ public:
inline void deal_with_reference(oop obj); inline void deal_with_reference(oop obj);
// It scans an object and visits its children. // It scans an object and visits its children.
void scan_object(oop obj); void scan_object(oop obj) { process_grey_object<true>(obj); }
// It pushes an object on the local queue. // It pushes an object on the local queue.
inline void push(oop obj); inline void push(oop obj);
......
...@@ -332,14 +332,28 @@ inline void CMTask::deal_with_reference(oop obj) { ...@@ -332,14 +332,28 @@ inline void CMTask::deal_with_reference(oop obj) {
// be pushed on the stack. So, some duplicate work, but no // be pushed on the stack. So, some duplicate work, but no
// correctness problems. // correctness problems.
if (is_below_finger(objAddr, global_finger)) { if (is_below_finger(objAddr, global_finger)) {
if (_cm->verbose_high()) { if (obj->is_typeArray()) {
gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT // Immediately process arrays of primitive types, rather
", global: " PTR_FORMAT ") pushing " // than pushing on the mark stack. This keeps us from
PTR_FORMAT " on mark stack", // adding humongous objects to the mark stack that might
_worker_id, p2i(_finger), // be reclaimed before the entry is processed - see
p2i(global_finger), p2i(objAddr)); // selection of candidates for eager reclaim of humongous
// objects. The cost of the additional type test is
// mitigated by avoiding a trip through the mark stack,
// by only doing a bookkeeping update and avoiding the
// actual scan of the object - a typeArray contains no
// references, and the metadata is built-in.
process_grey_object<false>(obj);
} else {
if (_cm->verbose_high()) {
gclog_or_tty->print_cr("[%u] below a finger (local: " PTR_FORMAT
", global: " PTR_FORMAT ") pushing "
PTR_FORMAT " on mark stack",
_worker_id, p2i(_finger),
p2i(global_finger), p2i(objAddr));
}
push(obj);
} }
push(obj);
} }
} }
} }
......
...@@ -1853,7 +1853,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : ...@@ -1853,7 +1853,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
_humongous_is_live(), _humongous_reclaim_candidates(),
_has_humongous_reclaim_candidates(false), _has_humongous_reclaim_candidates(false),
_free_regions_coming(false), _free_regions_coming(false),
_young_list(new YoungList(this)), _young_list(new YoungList(this)),
...@@ -2048,8 +2048,14 @@ jint G1CollectedHeap::initialize() { ...@@ -2048,8 +2048,14 @@ jint G1CollectedHeap::initialize() {
_g1h = this; _g1h = this;
_in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); {
_humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); HeapWord* start = _hrm.reserved().start();
HeapWord* end = _hrm.reserved().end();
size_t granularity = HeapRegion::GrainBytes;
_in_cset_fast_test.initialize(start, end, granularity);
_humongous_reclaim_candidates.initialize(start, end, granularity);
}
// Create the ConcurrentMark data structure and thread. // Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.) // (Must do this late, so that "max_regions" is defined.)
...@@ -2141,11 +2147,6 @@ void G1CollectedHeap::stop() { ...@@ -2141,11 +2147,6 @@ void G1CollectedHeap::stop() {
} }
} }
void G1CollectedHeap::clear_humongous_is_live_table() {
guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
_humongous_is_live.clear();
}
size_t G1CollectedHeap::conservative_max_heap_alignment() { size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size(); return HeapRegion::max_region_size();
} }
...@@ -3666,12 +3667,6 @@ size_t G1CollectedHeap::cards_scanned() { ...@@ -3666,12 +3667,6 @@ size_t G1CollectedHeap::cards_scanned() {
return g1_rem_set()->cardsScanned(); return g1_rem_set()->cardsScanned();
} }
bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
HeapRegion* region = region_at(index);
assert(region->startsHumongous(), "Must start a humongous object");
return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
}
class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
private: private:
size_t _total_humongous; size_t _total_humongous;
...@@ -3679,14 +3674,59 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { ...@@ -3679,14 +3674,59 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
DirtyCardQueue _dcq; DirtyCardQueue _dcq;
bool humongous_region_is_candidate(uint index) { // We don't nominate objects with many remembered set entries, on
HeapRegion* region = G1CollectedHeap::heap()->region_at(index); // the assumption that such objects are likely still live.
assert(region->startsHumongous(), "Must start a humongous object"); bool is_remset_small(HeapRegion* region) const {
HeapRegionRemSet* const rset = region->rem_set(); HeapRegionRemSet* const rset = region->rem_set();
bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs; return G1EagerReclaimHumongousObjectsWithStaleRefs
return !oop(region->bottom())->is_objArray() && ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) || : rset->is_empty();
(!allow_stale_refs && rset->is_empty())); }
bool is_typeArray_region(HeapRegion* region) const {
return oop(region->bottom())->is_typeArray();
}
bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
assert(region->startsHumongous(), "Must start a humongous object");
// Candidate selection must satisfy the following constraints
// while concurrent marking is in progress:
//
// * In order to maintain SATB invariants, an object must not be
// reclaimed if it was allocated before the start of marking and
// has not had its references scanned. Such an object must have
// its references (including type metadata) scanned to ensure no
// live objects are missed by the marking process. Objects
// allocated after the start of concurrent marking don't need to
// be scanned.
//
// * An object must not be reclaimed if it is on the concurrent
// mark stack. Objects allocated after the start of concurrent
// marking are never pushed on the mark stack.
//
// Nominating only objects allocated after the start of concurrent
// marking is sufficient to meet both constraints. This may miss
// some objects that satisfy the constraints, but the marking data
// structures don't support efficiently performing the needed
// additional tests or scrubbing of the mark stack.
//
// However, we presently only nominate is_typeArray() objects.
// A humongous object containing references induces remembered
// set entries on other regions. In order to reclaim such an
// object, those remembered sets would need to be cleaned up.
//
// We also treat is_typeArray() objects specially, allowing them
// to be reclaimed even if allocated before the start of
// concurrent mark. For this we rely on mark stack insertion to
// exclude is_typeArray() objects, preventing reclaiming an object
// that is in the mark stack. We also rely on the metadata for
// such objects to be built-in and so ensured to be kept live.
// Frequent allocation and drop of large binary blobs is an
// important use case for eager reclaim, and this special handling
// may reduce needed headroom.
return is_typeArray_region(region) && is_remset_small(region);
} }
public: public:
...@@ -3702,14 +3742,17 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { ...@@ -3702,14 +3742,17 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
} }
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
uint region_idx = r->hrm_index(); bool is_candidate = humongous_region_is_candidate(g1h, r);
bool is_candidate = humongous_region_is_candidate(region_idx); uint rindex = r->hrm_index();
// Is_candidate already filters out humongous object with large remembered sets. g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
// If we have a humongous object with a few remembered sets, we simply flush these
// remembered set entries into the DCQS. That will result in automatic
// re-evaluation of their remembered set entries during the following evacuation
// phase.
if (is_candidate) { if (is_candidate) {
_candidate_humongous++;
g1h->register_humongous_region_with_in_cset_fast_test(rindex);
// Is_candidate already filters out humongous object with large remembered sets.
// If we have a humongous object with a few remembered sets, we simply flush these
// remembered set entries into the DCQS. That will result in automatic
// re-evaluation of their remembered set entries during the following evacuation
// phase.
if (!r->rem_set()->is_empty()) { if (!r->rem_set()->is_empty()) {
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries), guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
"Found a not-small remembered set here. This is inconsistent with previous assumptions."); "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
...@@ -3726,8 +3769,6 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure { ...@@ -3726,8 +3769,6 @@ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
r->rem_set()->clear_locked(); r->rem_set()->clear_locked();
} }
assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty."); assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
_candidate_humongous++;
} }
_total_humongous++; _total_humongous++;
...@@ -3747,6 +3788,7 @@ void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() { ...@@ -3747,6 +3788,7 @@ void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
} }
double time = os::elapsed_counter(); double time = os::elapsed_counter();
// Collect reclaim candidate information and register candidates with cset.
RegisterHumongousWithInCSetFastTestClosure cl; RegisterHumongousWithInCSetFastTestClosure cl;
heap_region_iterate(&cl); heap_region_iterate(&cl);
...@@ -3756,10 +3798,6 @@ void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() { ...@@ -3756,10 +3798,6 @@ void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
cl.candidate_humongous()); cl.candidate_humongous());
_has_humongous_reclaim_candidates = cl.candidate_humongous() > 0; _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
clear_humongous_is_live_table();
}
// Finally flush all remembered set entries to re-check into the global DCQS. // Finally flush all remembered set entries to re-check into the global DCQS.
cl.flush_rem_set_entries(); cl.flush_rem_set_entries();
} }
...@@ -6321,11 +6359,11 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure { ...@@ -6321,11 +6359,11 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
// required because stale remembered sets might reference locations that // required because stale remembered sets might reference locations that
// are currently allocated into. // are currently allocated into.
uint region_idx = r->hrm_index(); uint region_idx = r->hrm_index();
if (g1h->humongous_is_live(region_idx) || if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
g1h->humongous_region_is_always_live(region_idx)) { !r->rem_set()->is_empty()) {
if (G1TraceEagerReclaimHumongousObjects) { if (G1TraceEagerReclaimHumongousObjects) {
gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d", gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx, region_idx,
obj->size()*HeapWordSize, obj->size()*HeapWordSize,
r->bottom(), r->bottom(),
...@@ -6333,20 +6371,21 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure { ...@@ -6333,20 +6371,21 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
r->rem_set()->occupied(), r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(), r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()), next_bitmap->isMarked(r->bottom()),
g1h->humongous_is_live(region_idx), g1h->is_humongous_reclaim_candidate(region_idx),
obj->is_objArray() obj->is_typeArray()
); );
} }
return false; return false;
} }
guarantee(!obj->is_objArray(), guarantee(obj->is_typeArray(),
err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.", err_msg("Only eagerly reclaiming type arrays is supported, but the object "
PTR_FORMAT " is not.",
r->bottom())); r->bottom()));
if (G1TraceEagerReclaimHumongousObjects) { if (G1TraceEagerReclaimHumongousObjects) {
gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d", gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
region_idx, region_idx,
obj->size()*HeapWordSize, obj->size()*HeapWordSize,
r->bottom(), r->bottom(),
...@@ -6354,8 +6393,8 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure { ...@@ -6354,8 +6393,8 @@ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
r->rem_set()->occupied(), r->rem_set()->occupied(),
r->rem_set()->strong_code_roots_list_length(), r->rem_set()->strong_code_roots_list_length(),
next_bitmap->isMarked(r->bottom()), next_bitmap->isMarked(r->bottom()),
g1h->humongous_is_live(region_idx), g1h->is_humongous_reclaim_candidate(region_idx),
obj->is_objArray() obj->is_typeArray()
); );
} }
// Need to clear mark bit of the humongous object if already set. // Need to clear mark bit of the humongous object if already set.
......
...@@ -233,7 +233,6 @@ private: ...@@ -233,7 +233,6 @@ private:
// It keeps track of the humongous regions. // It keeps track of the humongous regions.
HeapRegionSet _humongous_set; HeapRegionSet _humongous_set;
void clear_humongous_is_live_table();
void eagerly_reclaim_humongous_regions(); void eagerly_reclaim_humongous_regions();
// The number of regions we could create by expansion. // The number of regions we could create by expansion.
...@@ -303,22 +302,26 @@ private: ...@@ -303,22 +302,26 @@ private:
// Helper for monitoring and management support. // Helper for monitoring and management support.
G1MonitoringSupport* _g1mm; G1MonitoringSupport* _g1mm;
// Records whether the region at the given index is kept live by roots or // Records whether the region at the given index is (still) a
// references from the young generation. // candidate for eager reclaim. Only valid for humongous start
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> { // regions; other regions have unspecified values. Humongous start
// regions are initialized at start of collection pause, with
// candidates removed from the set as they are found reachable from
// roots or the young generation.
class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
protected: protected:
bool default_value() const { return false; } bool default_value() const { return false; }
public: public:
void clear() { G1BiasedMappedArray<bool>::clear(); } void clear() { G1BiasedMappedArray<bool>::clear(); }
void set_live(uint region) { void set_candidate(uint region, bool value) {
set_by_index(region, true); set_by_index(region, value);
} }
bool is_live(uint region) { bool is_candidate(uint region) {
return get_by_index(region); return get_by_index(region);
} }
}; };
HumongousIsLiveBiasedMappedArray _humongous_is_live; HumongousReclaimCandidates _humongous_reclaim_candidates;
// Stores whether during humongous object registration we found candidate regions. // Stores whether during humongous object registration we found candidate regions.
// If not, we can skip a few steps. // If not, we can skip a few steps.
bool _has_humongous_reclaim_candidates; bool _has_humongous_reclaim_candidates;
...@@ -655,18 +658,15 @@ public: ...@@ -655,18 +658,15 @@ public:
virtual void gc_prologue(bool full); virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full); virtual void gc_epilogue(bool full);
inline void set_humongous_is_live(oop obj); // Modify the reclaim candidate set and test for presence.
// These are only valid for starts_humongous regions.
inline void set_humongous_reclaim_candidate(uint region, bool value);
inline bool is_humongous_reclaim_candidate(uint region);
bool humongous_is_live(uint region) { // Remove from the reclaim candidate set. Also remove from the
return _humongous_is_live.is_live(region); // collection set so that later encounters avoid the slow path.
} inline void set_humongous_is_live(oop obj);
// Returns whether the given region (which must be a humongous (start) region)
// is to be considered conservatively live regardless of any other conditions.
bool humongous_region_is_always_live(uint index);
// Returns whether the given region (which must be a humongous (start) region)
// is considered a candidate for eager reclamation.
bool humongous_region_is_candidate(uint index);
// Register the given region to be part of the collection set. // Register the given region to be part of the collection set.
inline void register_humongous_region_with_in_cset_fast_test(uint index); inline void register_humongous_region_with_in_cset_fast_test(uint index);
// Register regions with humongous objects (actually on the start region) in // Register regions with humongous objects (actually on the start region) in
......
...@@ -348,20 +348,30 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { ...@@ -348,20 +348,30 @@ inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
return is_obj_ill(obj, heap_region_containing(obj)); return is_obj_ill(obj, heap_region_containing(obj));
} }
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object");
_humongous_reclaim_candidates.set_candidate(region, value);
}
inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
assert(_hrm.at(region)->startsHumongous(), "Must start a humongous object");
return _humongous_reclaim_candidates.is_candidate(region);
}
inline void G1CollectedHeap::set_humongous_is_live(oop obj) { inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
uint region = addr_to_region((HeapWord*)obj); uint region = addr_to_region((HeapWord*)obj);
// We not only set the "live" flag in the humongous_is_live table, but also // Clear the flag in the humongous_reclaim_candidates table. Also
// reset the entry in the _in_cset_fast_test table so that subsequent references // reset the entry in the _in_cset_fast_test table so that subsequent references
// to the same humongous object do not go into the slow path again. // to the same humongous object do not go into the slow path again.
// This is racy, as multiple threads may at the same time enter here, but this // This is racy, as multiple threads may at the same time enter here, but this
// is benign. // is benign.
// During collection we only ever set the "live" flag, and only ever clear the // During collection we only ever clear the "candidate" flag, and only ever clear the
// entry in the in_cset_fast_table. // entry in the in_cset_fast_table.
// We only ever evaluate the contents of these tables (in the VM thread) after // We only ever evaluate the contents of these tables (in the VM thread) after
// having synchronized the worker threads with the VM thread, or in the same // having synchronized the worker threads with the VM thread, or in the same
// thread (i.e. within the VM thread). // thread (i.e. within the VM thread).
if (!_humongous_is_live.is_live(region)) { if (is_humongous_reclaim_candidate(region)) {
_humongous_is_live.set_live(region); set_humongous_reclaim_candidate(region, false);
_in_cset_fast_test.clear_humongous(region); _in_cset_fast_test.clear_humongous(region);
} }
} }
......
...@@ -132,7 +132,8 @@ hotspot_compiler = \ ...@@ -132,7 +132,8 @@ hotspot_compiler = \
sanity/ExecuteInternalVMTests.java sanity/ExecuteInternalVMTests.java
hotspot_gc = \ hotspot_gc = \
sanity/ExecuteInternalVMTests.java sanity/ExecuteInternalVMTests.java \
-gc/g1/TestGreyReclaimedHumongousObjects.java
hotspot_runtime = \ hotspot_runtime = \
sanity/ExecuteInternalVMTests.java sanity/ExecuteInternalVMTests.java
......
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test TestGreyReclaimedHumongousObjects.java
* @bug 8069367
* @requires vm.gc == "G1" | vm.gc == "null"
* @summary Test handling of marked but unscanned reclaimed humongous objects.
* @key gc
* @run main/othervm -XX:+UseG1GC -Xss32m -Xmx128m -XX:G1HeapRegionSize=1m
* -XX:+UnlockExperimentalVMOptions
* -XX:+G1EagerReclaimHumongousObjects
* -XX:+G1EagerReclaimHumongousObjectsWithStaleRefs
* TestGreyReclaimedHumongousObjects 1048576 90
*/
// This test spawns a bunch of threads, each of them rapidly
// allocating large objects and storing them into a circular buffer
// associated with the thread. The circular buffer results in these
// objects becoming dead in fairly short order.
//
// The situation we're trying to provoke is
//
// (1) A humongous object H is marked and added to the mark stack.
//
// (2) An evacuation pause determines H is no longer live, and
// reclaims it. This occurs before concurrent marking has gotten
// around to processing the mark stack entry for H.
//
// (3) Concurrent marking processes the mark stack entry for H. The
// bug is that it would attempt to scan the now dead object.
//
// Unfortunately, this test is *very* sensitive to configuration.
// Among the parameters that affect whether / how often we'll get into
// the desired situation within a reasonable amount of time are:
//
// - THREAD_COUNT: The number of allocating threads.
//
// - OLD_COUNT: The number of objects each thread keeps.
//
// - MAX_MEMORY: The maximum heap size.
//
// - G1HeapRegionSize
//
// - The size of the objects being allocated.
//
// The parameter values specified here:
//
// - THREAD_COUNT = 12
// - OLD_COUNT == 4
// - MAX_MEMORY == 128m
// - G1HeapRegionSize = 1m
// - Object size = 1048576 (2 regions after header overhead and roundup)
//
// seems to work well at provoking the desired state fairly quickly.
// Even relatively small perturbations may change that. The key
// factors seem to be keeping the heap mostly full of live objects but
// having them become dead fairly quickly.
import java.util.Date;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import sun.management.ManagementFactoryHelper;
import com.sun.management.HotSpotDiagnosticMXBean;
import com.sun.management.VMOption;
public class TestGreyReclaimedHumongousObjects {
static class NamedThreadFactory implements ThreadFactory {
private int threadNum = 0;
@Override
public Thread newThread(Runnable r) {
return new Thread(r, THREAD_NAME + (threadNum++));
}
}
static class Runner extends Thread {
private final Date startDate = new Date();
private final int obj_size;
private final Object[] old_garbage;
private int old_index = 0;
public Runner(int obj_size) {
this.obj_size = obj_size;
old_garbage = new Object[OLD_COUNT];
}
private void allocate_garbage() {
byte[] garbage = new byte[obj_size];
old_garbage[Math.abs(++old_index % OLD_COUNT)] = garbage;
}
@Override
public void run() {
try {
while (!isInterrupted()) {
allocate_garbage();
Thread.sleep(0); // Yield, to ensure interruptable.
}
} catch (InterruptedException e) {
System.out.println("Aborted after "
+ (new Date().getTime() - startDate.getTime())
+ " ms");
interrupt();
}
}
}
public static void main(String[] args) throws Exception {
HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
System.out.println("Max memory= " + MAX_MEMORY + " bytes");
int obj_size = 0;
long seconds_to_run = 0;
if (args.length != 2) {
throw new RuntimeException("Object size argument must be supplied");
} else {
obj_size = Integer.parseInt(args[0]);
seconds_to_run = Integer.parseInt(args[1]);
}
System.out.println("Objects size= " + obj_size + " bytes");
System.out.println("Seconds to run=" + seconds_to_run);
int region_size =
Integer.parseInt(diagnostic.getVMOption("G1HeapRegionSize").getValue());
if (obj_size < (region_size / 2)) {
throw new RuntimeException("Object size " + obj_size +
" is not humongous with region size " + region_size);
}
ExecutorService executor =
Executors.newFixedThreadPool(THREAD_COUNT, new NamedThreadFactory());
System.out.println("Starting " + THREAD_COUNT + " threads");
for (int i = 0; i < THREAD_COUNT; i++) {
executor.execute(new Runner(obj_size));
}
Thread.sleep(seconds_to_run * 1000);
executor.shutdownNow();
if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
System.err.println("Thread pool did not terminate after 10 seconds after shutdown");
}
}
private static final long MAX_MEMORY = Runtime.getRuntime().maxMemory();
private static final int OLD_COUNT = 4;
private static final int THREAD_COUNT = 12;
private static final String THREAD_NAME = "TestGreyRH-";
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册