提交 5c68cb67 编写于 作者: T tonyp

Merge

......@@ -44,7 +44,7 @@
//
// CMS Bit Map Wrapper
CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter):
CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
_bm((uintptr_t*)NULL,0),
_shifter(shifter) {
_bmStartWord = (HeapWord*)(rs.base());
......@@ -1530,10 +1530,42 @@ public:
FreeRegionList* local_cleanup_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
HRRSCleanupTask* hrrs_cleanup_task);
HRRSCleanupTask* hrrs_cleanup_task) :
_g1(g1), _worker_num(worker_num),
_max_live_bytes(0), _regions_claimed(0),
_freed_bytes(0),
_claimed_region_time(0.0), _max_region_time(0.0),
_local_cleanup_list(local_cleanup_list),
_old_proxy_set(old_proxy_set),
_humongous_proxy_set(humongous_proxy_set),
_hrrs_cleanup_task(hrrs_cleanup_task) { }
size_t freed_bytes() { return _freed_bytes; }
bool doHeapRegion(HeapRegion *r);
bool doHeapRegion(HeapRegion *hr) {
// We use a claim value of zero here because all regions
// were claimed with value 1 in the FinalCount task.
hr->reset_gc_time_stamp();
if (!hr->continuesHumongous()) {
double start = os::elapsedTime();
_regions_claimed++;
hr->note_end_of_marking();
_max_live_bytes += hr->max_live_bytes();
_g1->free_region_if_empty(hr,
&_freed_bytes,
_local_cleanup_list,
_old_proxy_set,
_humongous_proxy_set,
_hrrs_cleanup_task,
true /* par */);
double region_time = (os::elapsedTime() - start);
_claimed_region_time += region_time;
if (region_time > _max_region_time) {
_max_region_time = region_time;
}
}
return false;
}
size_t max_live_bytes() { return _max_live_bytes; }
size_t regions_claimed() { return _regions_claimed; }
......@@ -1644,47 +1676,6 @@ public:
};
G1NoteEndOfConcMarkClosure::
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
int worker_num,
FreeRegionList* local_cleanup_list,
OldRegionSet* old_proxy_set,
HumongousRegionSet* humongous_proxy_set,
HRRSCleanupTask* hrrs_cleanup_task)
: _g1(g1), _worker_num(worker_num),
_max_live_bytes(0), _regions_claimed(0),
_freed_bytes(0),
_claimed_region_time(0.0), _max_region_time(0.0),
_local_cleanup_list(local_cleanup_list),
_old_proxy_set(old_proxy_set),
_humongous_proxy_set(humongous_proxy_set),
_hrrs_cleanup_task(hrrs_cleanup_task) { }
bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
// We use a claim value of zero here because all regions
// were claimed with value 1 in the FinalCount task.
hr->reset_gc_time_stamp();
if (!hr->continuesHumongous()) {
double start = os::elapsedTime();
_regions_claimed++;
hr->note_end_of_marking();
_max_live_bytes += hr->max_live_bytes();
_g1->free_region_if_empty(hr,
&_freed_bytes,
_local_cleanup_list,
_old_proxy_set,
_humongous_proxy_set,
_hrrs_cleanup_task,
true /* par */);
double region_time = (os::elapsedTime() - start);
_claimed_region_time += region_time;
if (region_time > _max_region_time) {
_max_region_time = region_time;
}
}
return false;
}
void ConcurrentMark::cleanup() {
// world is stopped at this checkpoint
assert(SafepointSynchronize::is_at_safepoint(),
......@@ -1991,16 +1982,12 @@ class G1CMDrainMarkingStackClosure: public VoidClosure {
class G1CMParKeepAliveAndDrainClosure: public OopClosure {
ConcurrentMark* _cm;
CMTask* _task;
CMBitMap* _bitMap;
int _ref_counter_limit;
int _ref_counter;
public:
G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm,
CMTask* task,
CMBitMap* bitMap) :
_cm(cm), _task(task), _bitMap(bitMap),
_ref_counter_limit(G1RefProcDrainInterval)
{
G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
_cm(cm), _task(task),
_ref_counter_limit(G1RefProcDrainInterval) {
assert(_ref_counter_limit > 0, "sanity");
_ref_counter = _ref_counter_limit;
}
......@@ -2091,19 +2078,16 @@ class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
private:
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
CMBitMap* _bitmap;
WorkGang* _workers;
int _active_workers;
public:
G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMBitMap* bitmap,
WorkGang* workers,
int n_workers) :
_g1h(g1h), _cm(cm), _bitmap(bitmap),
_workers(workers), _active_workers(n_workers)
{ }
_g1h(g1h), _cm(cm),
_workers(workers), _active_workers(n_workers) { }
// Executes the given task using concurrent marking worker threads.
virtual void execute(ProcessTask& task);
......@@ -2115,21 +2099,18 @@ class G1CMRefProcTaskProxy: public AbstractGangTask {
ProcessTask& _proc_task;
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
CMBitMap* _bitmap;
public:
G1CMRefProcTaskProxy(ProcessTask& proc_task,
G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMBitMap* bitmap) :
ConcurrentMark* cm) :
AbstractGangTask("Process reference objects in parallel"),
_proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
{}
_proc_task(proc_task), _g1h(g1h), _cm(cm) { }
virtual void work(int i) {
CMTask* marking_task = _cm->task(i);
G1CMIsAliveClosure g1_is_alive(_g1h);
G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
_proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
......@@ -2139,7 +2120,7 @@ public:
void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
assert(_workers != NULL, "Need parallel worker threads.");
G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
// We need to reset the phase for each task execution so that
// the termination protocol of CMTask::do_marking_step works.
......@@ -2156,8 +2137,7 @@ class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
public:
G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
AbstractGangTask("Enqueue reference objects in parallel"),
_enq_task(enq_task)
{ }
_enq_task(enq_task) { }
virtual void work(int i) {
_enq_task.work(i);
......@@ -2210,7 +2190,7 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
G1CMRefProcTaskExecutor par_task_executor(g1h, this,
g1h->workers(), active_workers);
if (rp->processing_is_mt()) {
......@@ -3064,12 +3044,13 @@ void ConcurrentMark::complete_marking_in_collection_set() {
g1h->collection_set_iterate(&cmplt);
if (cmplt.completed()) break;
}
double end_time = os::elapsedTime();
double elapsed_time_ms = (end_time - start) * 1000.0;
g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
ClearMarksInHRClosure clr(nextMarkBitMap());
g1h->collection_set_iterate(&clr);
double end_time = os::elapsedTime();
double elapsed_time_ms = (end_time - start) * 1000.0;
g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
}
// The next two methods deal with the following optimisation. Some
......
......@@ -176,8 +176,7 @@ void YoungList::push_region(HeapRegion *hr) {
hr->set_next_young_region(_head);
_head = hr;
hr->set_young();
double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
_g1h->g1_policy()->set_region_eden(hr, (int) _length);
++_length;
}
......@@ -190,7 +189,6 @@ void YoungList::add_survivor_region(HeapRegion* hr) {
_survivor_tail = hr;
}
_survivor_head = hr;
++_survivor_length;
}
......@@ -315,16 +313,20 @@ YoungList::reset_auxilary_lists() {
_g1h->g1_policy()->note_start_adding_survivor_regions();
_g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
int young_index_in_cset = 0;
for (HeapRegion* curr = _survivor_head;
curr != NULL;
curr = curr->get_next_young_region()) {
_g1h->g1_policy()->set_region_survivors(curr);
_g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
// The region is a non-empty survivor so let's add it to
// the incremental collection set for the next evacuation
// pause.
_g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
young_index_in_cset += 1;
}
assert((size_t) young_index_in_cset == _survivor_length,
"post-condition");
_g1h->g1_policy()->note_stop_adding_survivor_regions();
_head = _survivor_head;
......@@ -3210,8 +3212,6 @@ G1CollectedHeap::doConcurrentMark() {
}
}
// <NEW PREDICTION>
double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
bool young) {
return _g1_policy->predict_region_elapsed_time_ms(hr, young);
......@@ -3251,7 +3251,7 @@ size_t G1CollectedHeap::cards_scanned() {
void
G1CollectedHeap::setup_surviving_young_words() {
guarantee( _surviving_young_words == NULL, "pre-condition" );
size_t array_length = g1_policy()->young_cset_length();
size_t array_length = g1_policy()->young_cset_region_length();
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
if (_surviving_young_words == NULL) {
vm_exit_out_of_memory(sizeof(size_t) * array_length,
......@@ -3268,7 +3268,7 @@ G1CollectedHeap::setup_surviving_young_words() {
void
G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
size_t array_length = g1_policy()->young_cset_length();
size_t array_length = g1_policy()->young_cset_region_length();
for (size_t i = 0; i < array_length; ++i)
_surviving_young_words[i] += surv_young_words[i];
}
......@@ -3280,8 +3280,6 @@ G1CollectedHeap::cleanup_surviving_young_words() {
_surviving_young_words = NULL;
}
// </NEW PREDICTION>
#ifdef ASSERT
class VerifyCSetClosure: public HeapRegionClosure {
public:
......@@ -4158,7 +4156,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num)
// non-young regions (where the age is -1)
// We also add a few elements at the beginning and at the end in
// an attempt to eliminate cache contention
size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
size_t array_length = PADDING_ELEM_NUM +
real_length +
PADDING_ELEM_NUM;
......@@ -5595,8 +5593,8 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
if (cur->is_young()) {
int index = cur->young_index_in_cset();
guarantee( index != -1, "invariant" );
guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
assert(index != -1, "invariant");
assert((size_t) index < policy->young_cset_region_length(), "invariant");
size_t words_survived = _surviving_young_words[index];
cur->record_surv_words_in_group(words_survived);
......@@ -5607,7 +5605,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
cur->set_next_young_region(NULL);
} else {
int index = cur->young_index_in_cset();
guarantee( index == -1, "invariant" );
assert(index == -1, "invariant");
}
assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
......@@ -5620,8 +5618,9 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
free_region(cur, &pre_used, &local_free_list, false /* par */);
} else {
cur->uninstall_surv_rate_group();
if (cur->is_young())
if (cur->is_young()) {
cur->set_young_index_in_cset(-1);
}
cur->set_not_young();
cur->set_evacuation_failed(false);
// The region is now considered to be old.
......@@ -5722,7 +5721,6 @@ void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
assert(heap_lock_held_for_gc(),
"the heap lock should already be held by or for this thread");
_young_list->push_region(hr);
g1_policy()->set_region_short_lived(hr);
}
class NoYoungRegionsClosure: public HeapRegionClosure {
......@@ -5880,7 +5878,6 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
HeapRegion* new_alloc_region = new_region(word_size,
false /* do_expand */);
if (new_alloc_region != NULL) {
g1_policy()->update_region_num(true /* next_is_young */);
set_region_short_lived_locked(new_alloc_region);
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
return new_alloc_region;
......
......@@ -1610,16 +1610,12 @@ public:
public:
void stop_conc_gc_threads();
// <NEW PREDICTION>
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
void check_if_region_is_too_expensive(double predicted_time_ms);
size_t pending_card_num();
size_t max_pending_card_num();
size_t cards_scanned();
// </NEW PREDICTION>
protected:
size_t _max_heap_capacity;
};
......
......@@ -85,9 +85,6 @@ public:
class G1CollectorPolicy: public CollectorPolicy {
private:
// The number of pauses during the execution.
long _n_pauses;
// either equal to the number of parallel threads, if ParallelGCThreads
// has been set, or 1 otherwise
int _parallel_gc_threads;
......@@ -127,18 +124,9 @@ private:
jlong _num_cc_clears; // number of times the card count cache has been cleared
#endif
// Statistics for recent GC pauses. See below for how indexed.
TruncatedSeq* _recent_rs_scan_times_ms;
// These exclude marking times.
TruncatedSeq* _recent_pause_times_ms;
TruncatedSeq* _recent_gc_times_ms;
TruncatedSeq* _recent_CS_bytes_used_before;
TruncatedSeq* _recent_CS_bytes_surviving;
TruncatedSeq* _recent_rs_sizes;
TruncatedSeq* _concurrent_mark_remark_times_ms;
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
......@@ -150,13 +138,6 @@ private:
NumberSeq* _all_stop_world_times_ms;
NumberSeq* _all_yield_times_ms;
size_t _region_num_young;
size_t _region_num_tenured;
size_t _prev_region_num_young;
size_t _prev_region_num_tenured;
NumberSeq* _all_mod_union_times_ms;
int _aux_num;
NumberSeq* _all_aux_times_ms;
double* _cur_aux_start_times_ms;
......@@ -194,7 +175,6 @@ private:
// locker is active. This should be >= _young_list_target_length;
size_t _young_list_max_length;
size_t _young_cset_length;
bool _last_young_gc_full;
unsigned _full_young_pause_num;
......@@ -217,8 +197,6 @@ private:
return _during_marking;
}
// <NEW PREDICTION>
private:
enum PredictionConstants {
TruncatedSeqLength = 10
......@@ -240,47 +218,32 @@ private:
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
TruncatedSeq* _pending_cards_seq;
TruncatedSeq* _scanned_cards_seq;
TruncatedSeq* _rs_lengths_seq;
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
TruncatedSeq* _young_gc_eff_seq;
TruncatedSeq* _max_conc_overhead_seq;
bool _using_new_ratio_calculations;
size_t _min_desired_young_length; // as set on the command line or default calculations
size_t _max_desired_young_length; // as set on the command line or default calculations
size_t _recorded_young_regions;
size_t _recorded_non_young_regions;
size_t _recorded_region_num;
size_t _eden_cset_region_length;
size_t _survivor_cset_region_length;
size_t _old_cset_region_length;
void init_cset_region_lengths(size_t eden_cset_region_length,
size_t survivor_cset_region_length);
size_t eden_cset_region_length() { return _eden_cset_region_length; }
size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
size_t old_cset_region_length() { return _old_cset_region_length; }
size_t _free_regions_at_end_of_collection;
size_t _recorded_rs_lengths;
size_t _max_rs_lengths;
size_t _recorded_marked_bytes;
size_t _recorded_young_bytes;
size_t _predicted_pending_cards;
size_t _predicted_cards_scanned;
size_t _predicted_rs_lengths;
size_t _predicted_bytes_to_copy;
double _predicted_survival_ratio;
double _predicted_rs_update_time_ms;
double _predicted_rs_scan_time_ms;
double _predicted_object_copy_time_ms;
double _predicted_constant_other_time_ms;
double _predicted_young_other_time_ms;
double _predicted_non_young_other_time_ms;
double _predicted_pause_time_ms;
double _vtime_diff_ms;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
......@@ -320,18 +283,21 @@ private:
double _pause_time_target_ms;
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
bool _within_target;
size_t _pending_cards;
size_t _max_pending_cards;
public:
void set_region_short_lived(HeapRegion* hr) {
void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
hr->set_young();
hr->install_surv_rate_group(_short_lived_surv_rate_group);
hr->set_young_index_in_cset(young_index_in_cset);
}
void set_region_survivors(HeapRegion* hr) {
void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
assert(hr->is_young() && hr->is_survivor(), "pre-condition");
hr->install_surv_rate_group(_survivor_surv_rate_group);
hr->set_young_index_in_cset(young_index_in_cset);
}
#ifndef PRODUCT
......@@ -343,10 +309,6 @@ public:
seq->davg() * confidence_factor(seq->num()));
}
size_t young_cset_length() {
return _young_cset_length;
}
void record_max_rs_lengths(size_t rs_lengths) {
_max_rs_lengths = rs_lengths;
}
......@@ -465,20 +427,12 @@ public:
size_t predict_bytes_to_copy(HeapRegion* hr);
double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
void start_recording_regions();
void record_cset_region_info(HeapRegion* hr, bool young);
void record_non_young_cset_region(HeapRegion* hr);
void set_recorded_young_regions(size_t n_regions);
void set_recorded_young_bytes(size_t bytes);
void set_recorded_rs_lengths(size_t rs_lengths);
void set_predicted_bytes_to_copy(size_t bytes);
void end_recording_regions();
void record_vtime_diff_ms(double vtime_diff_ms) {
_vtime_diff_ms = vtime_diff_ms;
}
size_t cset_region_length() { return young_cset_region_length() +
old_cset_region_length(); }
size_t young_cset_region_length() { return eden_cset_region_length() +
survivor_cset_region_length(); }
void record_young_free_cset_time_ms(double time_ms) {
_recorded_young_free_cset_time_ms = time_ms;
......@@ -494,8 +448,6 @@ public:
double predict_survivor_regions_evac_time();
// </NEW PREDICTION>
void cset_regions_freed() {
bool propagate = _last_young_gc_full && !_in_marking_window;
_short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
......@@ -576,7 +528,6 @@ private:
double max_sum (double* data1, double* data2);
int _last_satb_drain_processed_buffers;
int _last_update_rs_processed_buffers;
double _last_pause_time_ms;
size_t _bytes_in_collection_set_before_gc;
......@@ -596,10 +547,6 @@ private:
// set at the start of the pause.
HeapRegion* _collection_set;
// The number of regions in the collection set. Set from the incrementally
// built collection set at the start of an evacuation pause.
size_t _collection_set_size;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause.
......@@ -622,16 +569,6 @@ private:
// The tail of the incrementally built collection set.
HeapRegion* _inc_cset_tail;
// The number of regions in the incrementally built collection set.
// Used to set _collection_set_size at the start of an evacuation
// pause.
size_t _inc_cset_size;
// Used as the index in the surving young words structure
// which tracks the amount of space, for each young region,
// that survives the pause.
size_t _inc_cset_young_index;
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
......@@ -640,11 +577,6 @@ private:
// Used to record the highest end of heap region in collection set
HeapWord* _inc_cset_max_finger;
// The number of recorded used bytes in the young regions
// of the collection set. This is the sum of the used() bytes
// of retired young regions in the collection set.
size_t _inc_cset_recorded_young_bytes;
// The RSet lengths recorded for regions in the collection set
// (updated by the periodic sampling of the regions in the
// young list/collection set).
......@@ -655,68 +587,9 @@ private:
// regions in the young list/collection set).
double _inc_cset_predicted_elapsed_time_ms;
// The predicted bytes to copy for the regions in the collection
// set (updated by the periodic sampling of the regions in the
// young list/collection set).
size_t _inc_cset_predicted_bytes_to_copy;
// Stash a pointer to the g1 heap.
G1CollectedHeap* _g1;
// The average time in ms per collection pause, averaged over recent pauses.
double recent_avg_time_for_pauses_ms();
// The average time in ms for RS scanning, per pause, averaged
// over recent pauses. (Note the RS scanning time for a pause
// is itself an average of the RS scanning time for each worker
// thread.)
double recent_avg_time_for_rs_scan_ms();
// The number of "recent" GCs recorded in the number sequences
int number_of_recent_gcs();
// The average survival ratio, computed by the total number of bytes
// suriviving / total number of bytes before collection over the last
// several recent pauses.
double recent_avg_survival_fraction();
// The survival fraction of the most recent pause; if there have been no
// pauses, returns 1.0.
double last_survival_fraction();
// Returns a "conservative" estimate of the recent survival rate, i.e.,
// one that may be higher than "recent_avg_survival_fraction".
// This is conservative in several ways:
// If there have been few pauses, it will assume a potential high
// variance, and err on the side of caution.
// It puts a lower bound (currently 0.1) on the value it will return.
// To try to detect phase changes, if the most recent pause ("latest") has a
// higher-than average ("avg") survival rate, it returns that rate.
// "work" version is a utility function; young is restricted to young regions.
double conservative_avg_survival_fraction_work(double avg,
double latest);
// The arguments are the two sequences that keep track of the number of bytes
// surviving and the total number of bytes before collection, resp.,
// over the last evereal recent pauses
// Returns the survival rate for the category in the most recent pause.
// If there have been no pauses, returns 1.0.
double last_survival_fraction_work(TruncatedSeq* surviving,
TruncatedSeq* before);
// The arguments are the two sequences that keep track of the number of bytes
// surviving and the total number of bytes before collection, resp.,
// over the last several recent pauses
// Returns the average survival ration over the last several recent pauses
// If there have been no pauses, return 1.0
double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
TruncatedSeq* before);
double conservative_avg_survival_fraction() {
double avg = recent_avg_survival_fraction();
double latest = last_survival_fraction();
return conservative_avg_survival_fraction_work(avg, latest);
}
// The ratio of gc time to elapsed time, computed over recent pauses.
double _recent_avg_pause_time_ratio;
......@@ -724,9 +597,6 @@ private:
return _recent_avg_pause_time_ratio;
}
// Number of pauses between concurrent marking.
size_t _pauses_btwn_concurrent_mark;
// At the end of a pause we check the heap occupancy and we decide
// whether we will start a marking cycle during the next pause. If
// we decide that we want to do that, we will set this parameter to
......@@ -849,9 +719,6 @@ public:
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
// The number of collection pauses so far.
long n_pauses() const { return _n_pauses; }
// Update the heuristic info to record a collection pause of the given
// start time, where the given number of bytes were used at the start.
// This may involve changing the desired size of a collection set.
......@@ -905,10 +772,6 @@ public:
_last_satb_drain_processed_buffers = processed_buffers;
}
void record_mod_union_time(double ms) {
_all_mod_union_times_ms->add(ms);
}
void record_update_rs_time(int thread, double ms) {
_par_last_update_rs_times_ms[thread] = ms;
}
......@@ -1009,11 +872,8 @@ public:
void clear_collection_set() { _collection_set = NULL; }
// The number of elements in the current collection set.
size_t collection_set_size() { return _collection_set_size; }
// Add "hr" to the CS.
void add_to_collection_set(HeapRegion* hr);
// Add old region "hr" to the CSet.
void add_old_region_to_cset(HeapRegion* hr);
// Incremental CSet Support
......@@ -1023,9 +883,6 @@ public:
// The tail of the incrementally built collection set.
HeapRegion* inc_set_tail() { return _inc_cset_tail; }
// The number of elements in the incrementally built collection set.
size_t inc_cset_size() { return _inc_cset_size; }
// Initialize incremental collection set info.
void start_incremental_cset_building();
......@@ -1125,8 +982,6 @@ public:
return _young_list_max_length;
}
void update_region_num(bool young);
bool full_young_gcs() {
return _full_young_gcs;
}
......
......@@ -219,7 +219,7 @@ public:
HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
HeapRegion* result = _g1p->collection_set();
if (ParallelGCThreads > 0) {
size_t cs_size = _g1p->collection_set_size();
size_t cs_size = _g1p->cset_region_length();
int n_workers = _g1->workers()->total_workers();
size_t cs_spans = cs_size / n_workers;
size_t ind = cs_spans * worker_i;
......
......@@ -39,10 +39,6 @@
develop(intx, G1MarkingOverheadPercent, 0, \
"Overhead of concurrent marking") \
\
\
develop(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \
\
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
\
......@@ -58,9 +54,6 @@
develop(bool, G1TraceMarkStackOverflow, false, \
"If true, extra debugging code for CM restart for ovflw.") \
\
develop(intx, G1PausesBtwnConcMark, -1, \
"If positive, fixed number of pauses between conc markings") \
\
diagnostic(bool, G1SummarizeConcMark, false, \
"Summarize concurrent mark info") \
\
......
......@@ -416,7 +416,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
void add_to_marked_bytes(size_t incr_bytes) {
_next_marked_bytes = _next_marked_bytes + incr_bytes;
guarantee( _next_marked_bytes <= used(), "invariant" );
assert(_next_marked_bytes <= used(), "invariant" );
}
void zero_marked_bytes() {
......
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -96,7 +96,8 @@ void PSMarkSweepDecorator::precompact() {
* by the MarkSweepAlwaysCompactCount parameter. This is a significant
* performance improvement!
*/
bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
bool skip_dead = (MarkSweepAlwaysCompactCount < 1)
|| ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
size_t allowed_deadspace = 0;
if (skip_dead) {
......
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -533,7 +533,8 @@ protected:
* by the MarkSweepAlwaysCompactCount parameter. \
*/ \
int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
bool skip_dead = (MarkSweepAlwaysCompactCount < 1) \
||((invocations % MarkSweepAlwaysCompactCount) != 0); \
\
size_t allowed_deadspace = 0; \
if (skip_dead) { \
......
......@@ -168,10 +168,8 @@ GCStatInfo::GCStatInfo(int num_pools) {
// initialize the arrays for memory usage
_before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
_after_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
size_t len = num_pools * sizeof(MemoryUsage);
memset(_before_gc_usage_array, 0, len);
memset(_after_gc_usage_array, 0, len);
_usage_array_size = num_pools;
clear();
}
GCStatInfo::~GCStatInfo() {
......@@ -304,12 +302,8 @@ void GCMemoryManager::gc_end(bool recordPostGCUsage,
pool->set_last_collection_usage(usage);
LowMemoryDetector::detect_after_gc_memory(pool);
}
if(is_notification_enabled()) {
bool isMajorGC = this == MemoryService::get_major_gc_manager();
GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC",
GCCause::to_string(cause));
}
}
if (countCollection) {
_num_collections++;
// alternately update two objects making one public when complete
......@@ -321,6 +315,12 @@ void GCMemoryManager::gc_end(bool recordPostGCUsage,
// reset the current stat for diagnosability purposes
_current_gc_stat->clear();
}
if (is_notification_enabled()) {
bool isMajorGC = this == MemoryService::get_major_gc_manager();
GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC",
GCCause::to_string(cause));
}
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册