提交 0c235037 编写于 作者: A apetrusenko

6543938: G1: remove the concept of popularity

Reviewed-by: iveresov, tonyp
上级 c59f1c5c
......@@ -4234,7 +4234,6 @@ void MacroAssembler::g1_write_barrier_pre(Register obj, Register index, int offs
static jint num_ct_writes = 0;
static jint num_ct_writes_filtered_in_hr = 0;
static jint num_ct_writes_filtered_null = 0;
static jint num_ct_writes_filtered_pop = 0;
static G1CollectedHeap* g1 = NULL;
static Thread* count_ct_writes(void* filter_val, void* new_val) {
......@@ -4247,25 +4246,19 @@ static Thread* count_ct_writes(void* filter_val, void* new_val) {
if (g1 == NULL) {
g1 = G1CollectedHeap::heap();
}
if ((HeapWord*)new_val < g1->popular_object_boundary()) {
Atomic::inc(&num_ct_writes_filtered_pop);
}
}
if ((num_ct_writes % 1000000) == 0) {
jint num_ct_writes_filtered =
num_ct_writes_filtered_in_hr +
num_ct_writes_filtered_null +
num_ct_writes_filtered_pop;
num_ct_writes_filtered_null;
tty->print_cr("%d potential CT writes: %5.2f%% filtered\n"
" (%5.2f%% intra-HR, %5.2f%% null, %5.2f%% popular).",
" (%5.2f%% intra-HR, %5.2f%% null).",
num_ct_writes,
100.0*(float)num_ct_writes_filtered/(float)num_ct_writes,
100.0*(float)num_ct_writes_filtered_in_hr/
(float)num_ct_writes,
100.0*(float)num_ct_writes_filtered_null/
(float)num_ct_writes,
100.0*(float)num_ct_writes_filtered_pop/
(float)num_ct_writes);
}
return Thread::current();
......
......@@ -277,8 +277,6 @@ printHeapRegion(HeapRegion *hr) {
gclog_or_tty->print("H: ");
if (hr->in_collection_set())
gclog_or_tty->print("CS: ");
if (hr->popular())
gclog_or_tty->print("pop: ");
gclog_or_tty->print_cr("Region " PTR_FORMAT " (%s%s) "
"[" PTR_FORMAT ", " PTR_FORMAT"] "
"Used: " SIZE_FORMAT "K, garbage: " SIZE_FORMAT "K.",
......
......@@ -29,7 +29,6 @@
class HeapRegion;
class HeapRegionSeq;
class HeapRegionList;
class PermanentGenerationSpec;
class GenerationSpec;
class OopsInHeapRegionClosure;
......@@ -143,7 +142,6 @@ class G1CollectedHeap : public SharedHeap {
friend class VM_GenCollectForPermanentAllocation;
friend class VM_G1CollectFull;
friend class VM_G1IncCollectionPause;
friend class VM_G1PopRegionCollectionPause;
friend class VMStructs;
// Closures used in implementation.
......@@ -253,10 +251,6 @@ private:
// than the current allocation region.
size_t _summary_bytes_used;
// Summary information about popular objects; method to print it.
NumberSeq _pop_obj_rc_at_copy;
void print_popularity_summary_info() const;
// This is used for a quick test on whether a reference points into
// the collection set or not. Basically, we have an array, with one
// byte per region, and that byte denotes whether the corresponding
......@@ -447,10 +441,8 @@ protected:
virtual void do_collection_pause();
// The guts of the incremental collection pause, executed by the vm
// thread. If "popular_region" is non-NULL, this pause should evacuate
// this single region whose remembered set has gotten large, moving
// any popular objects to one of the popular regions.
virtual void do_collection_pause_at_safepoint(HeapRegion* popular_region);
// thread.
virtual void do_collection_pause_at_safepoint();
// Actually do the work of evacuating the collection set.
virtual void evacuate_collection_set();
......@@ -625,67 +617,10 @@ protected:
SubTasksDone* _process_strong_tasks;
// Allocate space to hold a popular object. Result is guaranteed below
// "popular_object_boundary()". Note: CURRENTLY halts the system if we
// run out of space to hold popular objects.
HeapWord* allocate_popular_object(size_t word_size);
// The boundary between popular and non-popular objects.
HeapWord* _popular_object_boundary;
HeapRegionList* _popular_regions_to_be_evacuated;
// Compute which objects in "single_region" are popular. If any are,
// evacuate them to a popular region, leaving behind forwarding pointers,
// and select "popular_region" as the single collection set region.
// Otherwise, leave the collection set null.
void popularity_pause_preamble(HeapRegion* populer_region);
// Compute which objects in "single_region" are popular, and evacuate
// them to a popular region, leaving behind forwarding pointers.
// Returns "true" if at least one popular object is discovered and
// evacuated. In any case, "*max_rc" is set to the maximum reference
// count of an object in the region.
bool compute_reference_counts_and_evac_popular(HeapRegion* populer_region,
size_t* max_rc);
// Subroutines used in the above.
bool _rc_region_above;
size_t _rc_region_diff;
jint* obj_rc_addr(oop obj) {
uintptr_t obj_addr = (uintptr_t)obj;
if (_rc_region_above) {
jint* res = (jint*)(obj_addr + _rc_region_diff);
assert((uintptr_t)res > obj_addr, "RC region is above.");
return res;
} else {
jint* res = (jint*)(obj_addr - _rc_region_diff);
assert((uintptr_t)res < obj_addr, "RC region is below.");
return res;
}
}
jint obj_rc(oop obj) {
return *obj_rc_addr(obj);
}
void inc_obj_rc(oop obj) {
(*obj_rc_addr(obj))++;
}
void atomic_inc_obj_rc(oop obj);
// Number of popular objects and bytes (latter is cheaper!).
size_t pop_object_used_objs();
size_t pop_object_used_bytes();
// Index of the popular region in which allocation is currently being
// done.
int _cur_pop_hr_index;
// List of regions which require zero filling.
UncleanRegionList _unclean_region_list;
bool _unclean_regions_coming;
bool check_age_cohort_well_formed_work(int a, HeapRegion* hr);
public:
void set_refine_cte_cl_concurrency(bool concurrent);
......@@ -1066,21 +1001,6 @@ public:
// words.
virtual size_t large_typearray_limit();
// All popular objects are guaranteed to have addresses below this
// boundary.
HeapWord* popular_object_boundary() {
return _popular_object_boundary;
}
// Declare the region as one that should be evacuated because its
// remembered set is too large.
void schedule_popular_region_evac(HeapRegion* r);
// If there is a popular region to evacuate it, remove it from the list
// and return it.
HeapRegion* popular_region_to_evac();
// Evacuate the given popular region.
void evac_popular_region(HeapRegion* r);
// Returns "true" iff the given word_size is "very large".
static bool isHumongous(size_t word_size) {
return word_size >= VeryLargeInWords;
......
......@@ -47,7 +47,6 @@ public: \
}
class MainBodySummary;
class PopPreambleSummary;
class PauseSummary: public CHeapObj {
define_num_seq(total)
......@@ -55,7 +54,6 @@ class PauseSummary: public CHeapObj {
public:
virtual MainBodySummary* main_body_summary() { return NULL; }
virtual PopPreambleSummary* pop_preamble_summary() { return NULL; }
};
class MainBodySummary: public CHeapObj {
......@@ -75,36 +73,13 @@ class MainBodySummary: public CHeapObj {
define_num_seq(clear_ct) // parallel only
};
class PopPreambleSummary: public CHeapObj {
define_num_seq(pop_preamble)
define_num_seq(pop_update_rs)
define_num_seq(pop_scan_rs)
define_num_seq(pop_closure_app)
define_num_seq(pop_evacuation)
define_num_seq(pop_other)
};
class NonPopSummary: public PauseSummary,
public MainBodySummary {
public:
virtual MainBodySummary* main_body_summary() { return this; }
};
class PopSummary: public PauseSummary,
public MainBodySummary,
public PopPreambleSummary {
class Summary: public PauseSummary,
public MainBodySummary {
public:
virtual MainBodySummary* main_body_summary() { return this; }
virtual PopPreambleSummary* pop_preamble_summary() { return this; }
};
class NonPopAbandonedSummary: public PauseSummary {
};
class PopAbandonedSummary: public PauseSummary,
public PopPreambleSummary {
public:
virtual PopPreambleSummary* pop_preamble_summary() { return this; }
class AbandonedSummary: public PauseSummary {
};
class G1CollectorPolicy: public CollectorPolicy {
......@@ -146,10 +121,6 @@ protected:
double _cur_satb_drain_time_ms;
double _cur_clear_ct_time_ms;
bool _satb_drain_time_set;
double _cur_popular_preamble_start_ms;
double _cur_popular_preamble_time_ms;
double _cur_popular_compute_rc_time_ms;
double _cur_popular_evac_time_ms;
double _cur_CH_strong_roots_end_sec;
double _cur_CH_strong_roots_dur_ms;
......@@ -173,10 +144,8 @@ protected:
TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
NonPopSummary* _non_pop_summary;
PopSummary* _pop_summary;
NonPopAbandonedSummary* _non_pop_abandoned_summary;
PopAbandonedSummary* _pop_abandoned_summary;
Summary* _summary;
AbandonedSummary* _abandoned_summary;
NumberSeq* _all_pause_times_ms;
NumberSeq* _all_full_gc_times_ms;
......@@ -210,18 +179,6 @@ protected:
double* _par_last_obj_copy_times_ms;
double* _par_last_termination_times_ms;
// there are two pases during popular pauses, so we need to store
// somewhere the results of the first pass
double* _pop_par_last_update_rs_start_times_ms;
double* _pop_par_last_update_rs_times_ms;
double* _pop_par_last_update_rs_processed_buffers;
double* _pop_par_last_scan_rs_start_times_ms;
double* _pop_par_last_scan_rs_times_ms;
double* _pop_par_last_closure_app_times_ms;
double _pop_compute_rc_start;
double _pop_evac_start;
// indicates that we are in young GC mode
bool _in_young_gc_mode;
......@@ -634,8 +591,7 @@ protected:
NumberSeq* calc_other_times_ms) const;
void print_summary (PauseSummary* stats) const;
void print_abandoned_summary(PauseSummary* non_pop_summary,
PauseSummary* pop_summary) const;
void print_abandoned_summary(PauseSummary* summary) const;
void print_summary (int level, const char* str, NumberSeq* seq) const;
void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
......@@ -856,9 +812,6 @@ public:
virtual void record_collection_pause_start(double start_time_sec,
size_t start_used);
virtual void record_popular_pause_preamble_start();
virtual void record_popular_pause_preamble_end();
// Must currently be called while the world is stopped.
virtual void record_concurrent_mark_init_start();
virtual void record_concurrent_mark_init_end();
......@@ -881,7 +834,7 @@ public:
virtual void record_collection_pause_end_CH_strong_roots();
virtual void record_collection_pause_end_G1_strong_roots();
virtual void record_collection_pause_end(bool popular, bool abandoned);
virtual void record_collection_pause_end(bool abandoned);
// Record the fact that a full collection occurred.
virtual void record_full_collection_start();
......@@ -990,12 +943,6 @@ public:
_cur_aux_times_ms[i] += ms;
}
void record_pop_compute_rc_start();
void record_pop_compute_rc_end();
void record_pop_evac_start();
void record_pop_evac_end();
// Record the fact that "bytes" bytes allocated in a region.
void record_before_bytes(size_t bytes);
void record_after_bytes(size_t bytes);
......@@ -1008,9 +955,7 @@ public:
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
// If "pop_region" is non-NULL, it is a popular region that has already
// been added to the collection set.
virtual void choose_collection_set(HeapRegion* pop_region = NULL) = 0;
virtual void choose_collection_set() = 0;
void clear_collection_set() { _collection_set = NULL; }
......@@ -1018,9 +963,6 @@ public:
// current collection set.
HeapRegion* collection_set() { return _collection_set; }
// Sets the collection set to the given single region.
virtual void set_single_region_collection_set(HeapRegion* hr);
// The number of elements in the current collection set.
size_t collection_set_size() { return _collection_set_size; }
......@@ -1203,7 +1145,7 @@ class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
// If the estimated is less then desirable, resize if possible.
void expand_if_possible(size_t numRegions);
virtual void choose_collection_set(HeapRegion* pop_region = NULL);
virtual void choose_collection_set();
virtual void record_collection_pause_start(double start_time_sec,
size_t start_used);
virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
......@@ -1214,9 +1156,8 @@ public:
G1CollectorPolicy_BestRegionsFirst() {
_collectionSetChooser = new CollectionSetChooser();
}
void record_collection_pause_end(bool popular, bool abandoned);
void record_collection_pause_end(bool abandoned);
bool should_do_collection_pause(size_t word_size);
virtual void set_single_region_collection_set(HeapRegion* hr);
// This is not needed any more, after the CSet choosing code was
// changed to use the pause prediction work. But let's leave the
// hook in just in case.
......
......@@ -157,7 +157,6 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
class G1PrepareCompactClosure: public HeapRegionClosure {
ModRefBarrierSet* _mrbs;
CompactPoint _cp;
bool _popular_only;
void free_humongous_region(HeapRegion* hr) {
HeapWord* bot = hr->bottom();
......@@ -172,17 +171,11 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
}
public:
G1PrepareCompactClosure(CompactibleSpace* cs, bool popular_only) :
G1PrepareCompactClosure(CompactibleSpace* cs) :
_cp(NULL, cs, cs->initialize_threshold()),
_mrbs(G1CollectedHeap::heap()->mr_bs()),
_popular_only(popular_only)
_mrbs(G1CollectedHeap::heap()->mr_bs())
{}
bool doHeapRegion(HeapRegion* hr) {
if (_popular_only && !hr->popular())
return true; // terminate early
else if (!_popular_only && hr->popular())
return false; // skip this one.
if (hr->isHumongous()) {
if (hr->startsHumongous()) {
oop obj = oop(hr->bottom());
......@@ -203,20 +196,15 @@ public:
return false;
}
};
// Stolen verbatim from g1CollectedHeap.cpp
// Finds the first HeapRegion.
class FindFirstRegionClosure: public HeapRegionClosure {
HeapRegion* _a_region;
bool _find_popular;
public:
FindFirstRegionClosure(bool find_popular) :
_a_region(NULL), _find_popular(find_popular) {}
FindFirstRegionClosure() : _a_region(NULL) {}
bool doHeapRegion(HeapRegion* r) {
if (r->popular() == _find_popular) {
_a_region = r;
return true;
} else {
return false;
}
_a_region = r;
return true;
}
HeapRegion* result() { return _a_region; }
};
......@@ -242,30 +230,15 @@ void G1MarkSweep::mark_sweep_phase2() {
TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
GenMarkSweep::trace("2");
// First we compact the popular regions.
if (G1NumPopularRegions > 0) {
CompactibleSpace* sp = g1h->first_compactible_space();
FindFirstRegionClosure cl(true /*find_popular*/);
g1h->heap_region_iterate(&cl);
HeapRegion *r = cl.result();
assert(r->popular(), "should have found a popular region.");
assert(r == sp, "first popular heap region should "
"== first compactible space");
G1PrepareCompactClosure blk(sp, true/*popular_only*/);
g1h->heap_region_iterate(&blk);
}
// Now we do the regular regions.
FindFirstRegionClosure cl(false /*find_popular*/);
FindFirstRegionClosure cl;
g1h->heap_region_iterate(&cl);
HeapRegion *r = cl.result();
assert(!r->popular(), "should have founda non-popular region.");
CompactibleSpace* sp = r;
if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
sp = r->next_compaction_space();
}
G1PrepareCompactClosure blk(sp, false/*popular_only*/);
G1PrepareCompactClosure blk(sp);
g1h->heap_region_iterate(&blk);
CompactPoint perm_cp(pg, NULL, NULL);
......
......@@ -580,9 +580,7 @@ public:
virtual void do_oop(oop* p) {
HeapRegion* to = _g1->heap_region_containing(*p);
if (to->in_collection_set()) {
if (to->rem_set()->add_reference(p, 0)) {
_g1->schedule_popular_region_evac(to);
}
to->rem_set()->add_reference(p, 0);
}
}
};
......@@ -1024,9 +1022,8 @@ void HRInto_G1RemSet::print_summary_info() {
gclog_or_tty->print_cr(" %d occupied cards represented.",
blk.occupied());
gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
" %s, cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
(blk.max_mem_sz_region()->popular() ? "POP" : ""),
(blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
(blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
gclog_or_tty->print_cr(" Did %d coarsenings.",
......
......@@ -65,7 +65,6 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
HeapRegion* to = _g1->heap_region_containing(obj);
// The test below could be optimized by applying a bit op to to and from.
if (to != NULL && from != NULL && from != to) {
bool update_delayed = false;
// There is a tricky infinite loop if we keep pushing
// self forwarding pointers onto our _new_refs list.
// The _par_traversal_in_progress flag is true during the collection pause,
......@@ -77,10 +76,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do().
update_delayed = true;
}
if (!to->popular() && !update_delayed) {
} else {
#if G1_REM_SET_LOGGING
gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
" for region [" PTR_FORMAT ", " PTR_FORMAT ")",
......@@ -88,9 +84,7 @@ inline void HRInto_G1RemSet::par_write_ref(HeapRegion* from, oop* p, int tid) {
to->bottom(), to->end());
#endif
assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
if (to->rem_set()->add_reference(p, tid)) {
_g1->schedule_popular_region_evac(to);
}
to->rem_set()->add_reference(p, tid);
}
}
}
......
......@@ -185,15 +185,9 @@
product(intx, G1InefficientPausePct, 80, \
"Threshold of an 'inefficient' pauses (as % of cum efficiency.") \
\
product(intx, G1RSPopLimit, 32768, \
"Limit that defines popularity. Should go away! XXX") \
\
develop(bool, G1RSCountHisto, false, \
"If true, print a histogram of RS occupancies after each pause") \
\
product(intx, G1ObjPopLimit, 256, \
"Limit that defines popularity for an object.") \
\
product(bool, G1TraceFileOverwrite, false, \
"Allow the trace file to be overwritten") \
\
......@@ -201,16 +195,6 @@
"When > 0, print the occupancies of the <n> best and worst" \
"regions.") \
\
develop(bool, G1TracePopularity, false, \
"When true, provide detailed tracing of popularity.") \
\
product(bool, G1SummarizePopularity, false, \
"When true, provide end-of-run-summarization of popularity.") \
\
product(intx, G1NumPopularRegions, 1, \
"Number of regions reserved to hold popular objects. " \
"Should go away later.") \
\
develop(bool, G1PrintParCleanupStats, false, \
"When true, print extra stats about parallel cleanup.") \
\
......
......@@ -104,7 +104,6 @@ public:
HeapRegion* to = _g1h->heap_region_containing(*p);
if (from != NULL && to != NULL &&
from != to &&
!to->popular() &&
!to->isHumongous()) {
jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
jbyte cv_field = *_bs->byte_for_const(p);
......@@ -285,8 +284,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
}
zero_marked_bytes();
set_sort_index(-1);
if ((uintptr_t)bottom() >= (uintptr_t)g1h->popular_object_boundary())
set_popular(false);
_offsets.resize(HeapRegion::GrainWords);
init_top_at_mark_start();
......@@ -371,7 +368,6 @@ HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
_next_in_special_set(NULL), _orig_end(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
_popularity(NotPopular),
_young_type(NotYoung), _next_young_region(NULL),
_young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
_rem_set(NULL), _zfs(NotZeroFilled)
......
......@@ -238,15 +238,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// See "sort_index" method. -1 means is not in the array.
int _sort_index;
// Means it has (or at least had) a very large RS, and should not be
// considered for membership in a collection set.
enum PopularityState {
NotPopular,
PopularPending,
Popular
};
PopularityState _popularity;
// <PREDICTION>
double _gc_efficiency;
// </PREDICTION>
......@@ -433,10 +424,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
_next_in_special_set = r;
}
bool is_reserved() {
return popular();
}
bool is_on_free_list() {
return _is_on_free_list;
}
......@@ -609,23 +596,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
init_top_at_mark_start();
}
bool popular() { return _popularity == Popular; }
void set_popular(bool b) {
if (b) {
_popularity = Popular;
} else {
_popularity = NotPopular;
}
}
bool popular_pending() { return _popularity == PopularPending; }
void set_popular_pending(bool b) {
if (b) {
_popularity = PopularPending;
} else {
_popularity = NotPopular;
}
}
// <PREDICTION>
void calc_gc_efficiency(void);
double gc_efficiency() { return _gc_efficiency;}
......
......@@ -188,32 +188,6 @@ private:
// the _outgoing_region_map.
void clear_outgoing_entries();
#if MAYBE
// Audit the given card index.
void audit_card(size_t card_num, HeapRegion* hr, u2* rc_arr,
HeapRegionRemSet* empty_cards, size_t* one_obj_cards);
// Assumes that "audit_stage1" has been called for "hr", to set up
// "shadow" and "new_rs" appropriately. Identifies individual popular
// objects; returns "true" if any are found.
bool audit_find_pop(HeapRegion* hr, u2* rc_arr);
// Assumes that "audit_stage1" has been called for "hr", to set up
// "shadow" and "new_rs" appropriately. Identifies individual popular
// objects, and determines the number of entries in "new_rs" if any such
// popular objects are ignored. If this is sufficiently small, returns
// "false" to indicate that a constraint should not be introduced.
// Otherwise, returns "true" to indicate that we should go ahead with
// adding the constraint.
bool audit_stag(HeapRegion* hr, u2* rc_arr);
u2* alloc_rc_array();
SeqHeapRegionRemSet* audit_post(u2* rc_arr, size_t multi_obj_crds,
SeqHeapRegionRemSet* empty_cards);
#endif
enum ParIterState { Unclaimed, Claimed, Complete };
ParIterState _iter_state;
......@@ -261,16 +235,14 @@ public:
/* Used in the sequential case. Returns "true" iff this addition causes
the size limit to be reached. */
bool add_reference(oop* from) {
void add_reference(oop* from) {
_other_regions.add_reference(from);
return false;
}
/* Used in the parallel case. Returns "true" iff this addition causes
the size limit to be reached. */
bool add_reference(oop* from, int tid) {
void add_reference(oop* from, int tid) {
_other_regions.add_reference(from, tid);
return false;
}
// Records the fact that the current region contains an outgoing
......@@ -338,20 +310,6 @@ public:
}
void print() const;
#if MAYBE
// We are about to introduce a constraint, requiring the collection time
// of the region owning this RS to be <= "hr", and forgetting pointers
// from the owning region to "hr." Before doing so, examines this rem
// set for pointers to "hr", possibly identifying some popular objects.,
// and possibly finding some cards to no longer contain pointers to "hr",
//
// These steps may prevent the the constraint from being necessary; in
// which case returns a set of cards now thought to contain no pointers
// into HR. In the normal (I assume) case, returns NULL, indicating that
// we should go ahead and add the constraint.
virtual SeqHeapRegionRemSet* audit(HeapRegion* hr) = 0;
#endif
// Called during a stop-world phase to perform any deferred cleanups.
// The second version may be called by parallel threads after then finish
// collection work.
......
......@@ -74,7 +74,6 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
// [first, cur)
HeapRegion* curhr = _regions.at(cur);
if (curhr->is_empty()
&& !curhr->is_reserved()
&& (first == cur
|| (_regions.at(cur-1)->end() ==
curhr->bottom()))) {
......@@ -121,35 +120,27 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
}
}
void HeapRegionSeq::print_empty_runs(bool reserved_are_empty) {
void HeapRegionSeq::print_empty_runs() {
int empty_run = 0;
int n_empty = 0;
bool at_least_one_reserved = false;
int empty_run_start;
for (int i = 0; i < _regions.length(); i++) {
HeapRegion* r = _regions.at(i);
if (r->continuesHumongous()) continue;
if (r->is_empty() && (reserved_are_empty || !r->is_reserved())) {
if (r->is_empty()) {
assert(!r->isHumongous(), "H regions should not be empty.");
if (empty_run == 0) empty_run_start = i;
empty_run++;
n_empty++;
if (r->is_reserved()) {
at_least_one_reserved = true;
}
} else {
if (empty_run > 0) {
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
if (reserved_are_empty && at_least_one_reserved)
gclog_or_tty->print("(R)");
empty_run = 0;
at_least_one_reserved = false;
}
}
}
if (empty_run > 0) {
gclog_or_tty->print(" %d:%d", empty_run_start, empty_run);
if (reserved_are_empty && at_least_one_reserved) gclog_or_tty->print("(R)");
}
gclog_or_tty->print_cr(" [tot = %d]", n_empty);
}
......@@ -193,7 +184,6 @@ size_t HeapRegionSeq::free_suffix() {
int cur = first;
while (cur >= 0 &&
(_regions.at(cur)->is_empty()
&& !_regions.at(cur)->is_reserved()
&& (first == cur
|| (_regions.at(cur+1)->bottom() ==
_regions.at(cur)->end())))) {
......
......@@ -104,8 +104,7 @@ class HeapRegionSeq: public CHeapObj {
void print();
// Prints out runs of empty regions. If the arg is "true" reserved
// (popular regions are considered "empty".
void print_empty_runs(bool reserved_are_empty);
// Prints out runs of empty regions.
void print_empty_runs();
};
......@@ -43,16 +43,9 @@ void VM_G1IncCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
g1h->do_collection_pause_at_safepoint(NULL);
g1h->do_collection_pause_at_safepoint();
}
void VM_G1PopRegionCollectionPause::doit() {
JvmtiGCForAllocationMarker jgcm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->do_collection_pause_at_safepoint(_pop_region);
}
void VM_CGC_Operation::doit() {
gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
......
......@@ -77,20 +77,6 @@ class VM_G1IncCollectionPause: public VM_GC_Operation {
}
};
class VM_G1PopRegionCollectionPause: public VM_GC_Operation {
HeapRegion* _pop_region;
public:
VM_G1PopRegionCollectionPause(int gc_count_before, HeapRegion* pop_region) :
VM_GC_Operation(gc_count_before),
_pop_region(pop_region)
{}
virtual VMOp_Type type() const { return VMOp_G1PopRegionCollectionPause; }
virtual void doit();
virtual const char* name() const {
return "garbage-first popular region collection pause";
}
};
// Concurrent GC stop-the-world operations such as initial and final mark;
// consider sharing these with CMS's counterparts.
class VM_CGC_Operation: public VM_Operation {
......
......@@ -60,7 +60,7 @@ class GCCause : public AllStatic {
_old_generation_too_full_to_scavenge,
_adaptive_size_policy,
_g1_inc_collection_pause, _g1_pop_region_collection_pause,
_g1_inc_collection_pause,
_last_ditch_collection,
_last_gc_cause
......
......@@ -59,7 +59,6 @@
template(G1CollectFull) \
template(G1CollectForAllocation) \
template(G1IncCollectionPause) \
template(G1PopRegionCollectionPause) \
template(EnableBiasedLocking) \
template(RevokeBias) \
template(BulkRevokeBias) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册