提交 347da4fb 编写于 作者: T tonyp

Merge

......@@ -39,7 +39,6 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
_next(next),
_cg1r(cg1r),
_vtime_accum(0.0),
_co_tracker(G1CRGroup),
_interval_ms(5.0)
{
create_and_start();
......@@ -76,9 +75,6 @@ void ConcurrentG1RefineThread::run() {
_vtime_start = os::elapsedVTime();
wait_for_universe_init();
_co_tracker.enable();
_co_tracker.start();
while (!_should_terminate) {
DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
// Wait for completed log buffers to exist.
......@@ -147,7 +143,6 @@ void ConcurrentG1RefineThread::run() {
}
break;
}
_co_tracker.update(false);
// Check if we need to activate the next thread.
if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) {
......@@ -168,7 +163,6 @@ void ConcurrentG1RefineThread::run() {
}
n_logs++;
}
_co_tracker.update(false);
_sts.leave();
if (os::supports_vtime()) {
......@@ -177,9 +171,6 @@ void ConcurrentG1RefineThread::run() {
_vtime_accum = 0.0;
}
}
_sts.join();
_co_tracker.update(true);
_sts.leave();
assert(_should_terminate, "just checking");
terminate();
......
......@@ -51,7 +51,6 @@ class ConcurrentG1RefineThread: public ConcurrentGCThread {
private:
ConcurrentG1Refine* _cg1r;
COTracker _co_tracker;
double _interval_ms;
void decreaseInterval(int processing_time_ms) {
......
......@@ -433,8 +433,7 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_total_counting_time(0.0),
_total_rs_scrub_time(0.0),
_parallel_workers(NULL),
_cleanup_co_tracker(G1CLGroup)
_parallel_workers(NULL)
{
CMVerboseLevel verbose_level =
(CMVerboseLevel) G1MarkingVerboseLevel;
......@@ -823,18 +822,6 @@ void ConcurrentMark::checkpointRootsInitialPost() {
// when marking is on. So, it's also called at the end of the
// initial-mark pause to update the heap end, if the heap expands
// during it. No need to call it here.
guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
size_t max_marking_threads =
MAX2((size_t) 1, parallel_marking_threads());
for (int i = 0; i < (int)_max_task_num; ++i) {
_tasks[i]->enable_co_tracker();
if (i < (int) max_marking_threads)
_tasks[i]->reset_co_tracker(marking_task_overhead());
else
_tasks[i]->reset_co_tracker(0.0);
}
}
// Checkpoint the roots into this generation from outside
......@@ -845,7 +832,6 @@ void ConcurrentMark::checkpointRootsInitial() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->record_concurrent_mark_init_start();
......@@ -876,7 +862,6 @@ void ConcurrentMark::checkpointRootsInitial() {
// Statistics.
double end = os::elapsedTime();
_init_times.add((end - start) * 1000.0);
GCOverheadReporter::recordSTWEnd(end);
g1p->record_concurrent_mark_init_end();
}
......@@ -1035,7 +1020,6 @@ public:
guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" );
CMTask* the_task = _cm->task(worker_i);
the_task->start_co_tracker();
the_task->record_start_time();
if (!_cm->has_aborted()) {
do {
......@@ -1061,8 +1045,6 @@ public:
double end_time2_sec = os::elapsedTime();
double elapsed_time2_sec = end_time2_sec - start_time_sec;
the_task->update_co_tracker();
#if 0
gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
"overhead %1.4lf",
......@@ -1079,7 +1061,6 @@ public:
ConcurrentGCThread::stsLeave();
double end_vtime = os::elapsedVTime();
the_task->update_co_tracker(true);
_cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
}
......@@ -1133,7 +1114,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
g1p->record_concurrent_mark_remark_start();
double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
checkpointRootsFinalWork();
......@@ -1173,11 +1153,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
_remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
_remark_times.add((now - start) * 1000.0);
GCOverheadReporter::recordSTWEnd(now);
for (int i = 0; i < (int)_max_task_num; ++i)
_tasks[i]->disable_co_tracker();
_cleanup_co_tracker.enable();
_cleanup_co_tracker.reset(cleanup_task_overhead());
g1p->record_concurrent_mark_remark_end();
}
......@@ -1188,7 +1163,6 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
CMBitMapRO* _bm;
ConcurrentMark* _cm;
COTracker* _co_tracker;
bool _changed;
bool _yield;
size_t _words_done;
......@@ -1216,12 +1190,10 @@ class CalcLiveObjectsClosure: public HeapRegionClosure {
public:
CalcLiveObjectsClosure(bool final,
CMBitMapRO *bm, ConcurrentMark *cm,
BitMap* region_bm, BitMap* card_bm,
COTracker* co_tracker) :
BitMap* region_bm, BitMap* card_bm) :
_bm(bm), _cm(cm), _changed(false), _yield(true),
_words_done(0), _tot_live(0), _tot_used(0),
_region_bm(region_bm), _card_bm(card_bm),
_final(final), _co_tracker(co_tracker),
_region_bm(region_bm), _card_bm(card_bm),_final(final),
_regions_done(0), _start_vtime_sec(0.0)
{
_bottom_card_num =
......@@ -1265,9 +1237,6 @@ public:
}
bool doHeapRegion(HeapRegion* hr) {
if (_co_tracker != NULL)
_co_tracker->update();
if (!_final && _regions_done == 0)
_start_vtime_sec = os::elapsedVTime();
......@@ -1396,12 +1365,6 @@ public:
if (elapsed_vtime_sec > (10.0 / 1000.0)) {
jlong sleep_time_ms =
(jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
#if 0
gclog_or_tty->print_cr("CL: elapsed %1.4lf ms, sleep %1.4lf ms, "
"overhead %1.4lf",
elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
_co_tracker->concOverhead(os::elapsedTime()));
#endif
os::sleep(Thread::current(), sleep_time_ms, false);
_start_vtime_sec = end_vtime_sec;
}
......@@ -1421,15 +1384,11 @@ public:
void ConcurrentMark::calcDesiredRegions() {
guarantee( _cleanup_co_tracker.enabled(), "invariant" );
_cleanup_co_tracker.start();
_region_bm.clear();
_card_bm.clear();
CalcLiveObjectsClosure calccl(false /*final*/,
nextMarkBitMap(), this,
&_region_bm, &_card_bm,
&_cleanup_co_tracker);
&_region_bm, &_card_bm);
G1CollectedHeap *g1h = G1CollectedHeap::heap();
g1h->heap_region_iterate(&calccl);
......@@ -1437,8 +1396,6 @@ void ConcurrentMark::calcDesiredRegions() {
calccl.reset();
g1h->heap_region_iterate(&calccl);
} while (calccl.changed());
_cleanup_co_tracker.update(true);
}
class G1ParFinalCountTask: public AbstractGangTask {
......@@ -1472,8 +1429,7 @@ public:
void work(int i) {
CalcLiveObjectsClosure calccl(true /*final*/,
_bm, _g1h->concurrent_mark(),
_region_bm, _card_bm,
NULL /* CO tracker */);
_region_bm, _card_bm);
calccl.no_yield();
if (ParallelGCThreads > 0) {
_g1h->heap_region_par_iterate_chunked(&calccl, i,
......@@ -1663,13 +1619,10 @@ void ConcurrentMark::cleanup() {
/* prev marking */ true);
}
_cleanup_co_tracker.disable();
G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
g1p->record_concurrent_mark_cleanup_start();
double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
// Do counting once more with the world stopped for good measure.
G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
......@@ -1774,7 +1727,6 @@ void ConcurrentMark::cleanup() {
// Statistics.
double end = os::elapsedTime();
_cleanup_times.add((end - start) * 1000.0);
GCOverheadReporter::recordSTWEnd(end);
// G1CollectedHeap::heap()->print();
// gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
......@@ -2625,24 +2577,6 @@ void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
_should_gray_objects = true;
}
void ConcurrentMark::disable_co_trackers() {
if (has_aborted()) {
if (_cleanup_co_tracker.enabled())
_cleanup_co_tracker.disable();
for (int i = 0; i < (int)_max_task_num; ++i) {
CMTask* task = _tasks[i];
if (task->co_tracker_enabled())
task->disable_co_tracker();
}
} else {
guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
for (int i = 0; i < (int)_max_task_num; ++i) {
CMTask* task = _tasks[i];
guarantee( !task->co_tracker_enabled(), "invariant" );
}
}
}
// abandon current marking iteration due to a Full GC
void ConcurrentMark::abort() {
// Clear all marks to force marking thread to do nothing
......@@ -4018,7 +3952,6 @@ CMTask::CMTask(int task_id,
CMTaskQueue* task_queue,
CMTaskQueueSet* task_queues)
: _g1h(G1CollectedHeap::heap()),
_co_tracker(G1CMGroup),
_task_id(task_id), _cm(cm),
_claimed(false),
_nextMarkBitMap(NULL), _hash_seed(17),
......
......@@ -407,8 +407,6 @@ protected:
// verbose level
CMVerboseLevel _verbose_level;
COTracker _cleanup_co_tracker;
// These two fields are used to implement the optimisation that
// avoids pushing objects on the global/region stack if there are
// no collection set regions above the lowest finger.
......@@ -720,8 +718,6 @@ public:
// Called to abort the marking cycle after a Full GC takes palce.
void abort();
void disable_co_trackers();
// This prints the global/local fingers. It is used for debugging.
NOT_PRODUCT(void print_finger();)
......@@ -773,9 +769,6 @@ private:
// number of calls to this task
int _calls;
// concurrent overhead over a single CPU for this task
COTracker _co_tracker;
// when the virtual timer reaches this time, the marking step should
// exit
double _time_target_ms;
......@@ -928,27 +921,6 @@ public:
void set_concurrent(bool concurrent) { _concurrent = concurrent; }
void enable_co_tracker() {
guarantee( !_co_tracker.enabled(), "invariant" );
_co_tracker.enable();
}
void disable_co_tracker() {
guarantee( _co_tracker.enabled(), "invariant" );
_co_tracker.disable();
}
bool co_tracker_enabled() {
return _co_tracker.enabled();
}
void reset_co_tracker(double starting_conc_overhead = 0.0) {
_co_tracker.reset(starting_conc_overhead);
}
void start_co_tracker() {
_co_tracker.start();
}
void update_co_tracker(bool force_end = false) {
_co_tracker.update(force_end);
}
// The main method of this class which performs a marking step
// trying not to exceed the given duration. However, it might exit
// prematurely, according to some conditions (i.e. SATB buffers are
......
......@@ -260,10 +260,6 @@ void ConcurrentMarkThread::run() {
}
}
_sts.join();
_cm->disable_co_trackers();
_sts.leave();
// we now want to allow clearing of the marking bitmap to be
// suspended by a collection pause.
_sts.join();
......
......@@ -35,8 +35,7 @@ int ConcurrentZFThread::_zf_waits = 0;
int ConcurrentZFThread::_regions_filled = 0;
ConcurrentZFThread::ConcurrentZFThread() :
ConcurrentGCThread(),
_co_tracker(G1ZFGroup)
ConcurrentGCThread()
{
create_and_start();
}
......@@ -71,8 +70,6 @@ void ConcurrentZFThread::run() {
Thread* thr_self = Thread::current();
_vtime_start = os::elapsedVTime();
wait_for_universe_init();
_co_tracker.enable();
_co_tracker.start();
G1CollectedHeap* g1 = G1CollectedHeap::heap();
_sts.join();
......@@ -135,10 +132,7 @@ void ConcurrentZFThread::run() {
}
_vtime_accum = (os::elapsedVTime() - _vtime_start);
_sts.join();
_co_tracker.update();
}
_co_tracker.update(false);
_sts.leave();
assert(_should_terminate, "just checking");
......
......@@ -42,8 +42,6 @@ class ConcurrentZFThread: public ConcurrentGCThread {
// Number of regions CFZ thread fills.
static int _regions_filled;
COTracker _co_tracker;
double _vtime_start; // Initial virtual time.
// These are static because the "print_summary_info" method is, and
......
......@@ -927,7 +927,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
double start = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start);
g1_policy()->record_full_collection_start();
gc_prologue(true);
......@@ -1049,7 +1048,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
}
double end = os::elapsedTime();
GCOverheadReporter::recordSTWEnd(end);
g1_policy()->record_full_collection_end();
#ifdef TRACESPINNING
......@@ -1610,9 +1608,6 @@ jint G1CollectedHeap::initialize() {
// Do later initialization work for concurrent refinement.
_cg1r->init();
const char* group_names[] = { "CR", "ZF", "CM", "CL" };
GCOverheadReporter::initGCOverheadReporter(4, group_names);
return JNI_OK;
}
......@@ -2431,8 +2426,6 @@ void G1CollectedHeap::print_tracing_info() const {
}
g1_policy()->print_yg_surv_rate_info();
GCOverheadReporter::printGCOverhead();
SpecializationStats::print();
}
......@@ -2669,7 +2662,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
double start_time_sec = os::elapsedTime();
GCOverheadReporter::recordSTWStart(start_time_sec);
size_t start_used_bytes = used();
g1_policy()->record_collection_pause_start(start_time_sec,
......@@ -2798,7 +2790,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
double end_time_sec = os::elapsedTime();
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
g1_policy()->record_pause_time_ms(pause_time_ms);
GCOverheadReporter::recordSTWEnd(end_time_sec);
g1_policy()->record_collection_pause_end(abandoned);
assert(regions_accounted_for(), "Region leakage.");
......
......@@ -993,8 +993,6 @@ void G1CollectorPolicy::record_full_collection_end() {
double full_gc_time_sec = end_sec - _cur_collection_start_sec;
double full_gc_time_ms = full_gc_time_sec * 1000.0;
checkpoint_conc_overhead();
_all_full_gc_times_ms->add(full_gc_time_ms);
update_recent_gc_times(end_sec, full_gc_time_ms);
......@@ -1164,7 +1162,6 @@ void G1CollectorPolicy::record_concurrent_mark_init_end() {
double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
_concurrent_mark_init_times_ms->add(elapsed_time_ms);
checkpoint_conc_overhead();
record_concurrent_mark_init_end_pre(elapsed_time_ms);
_mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
......@@ -1178,7 +1175,6 @@ void G1CollectorPolicy::record_concurrent_mark_remark_start() {
void G1CollectorPolicy::record_concurrent_mark_remark_end() {
double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
checkpoint_conc_overhead();
_concurrent_mark_remark_times_ms->add(elapsed_time_ms);
_cur_mark_stop_world_time_ms += elapsed_time_ms;
_prev_collection_pause_end_ms += elapsed_time_ms;
......@@ -1210,7 +1206,6 @@ record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
// The important thing about this is that it includes "os::elapsedTime".
void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
checkpoint_conc_overhead();
double end_time_sec = os::elapsedTime();
double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
......@@ -1425,8 +1420,6 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
}
#endif // PRODUCT
checkpoint_conc_overhead();
if (in_young_gc_mode()) {
last_pause_included_initial_mark = _should_initiate_conc_mark;
if (last_pause_included_initial_mark)
......@@ -2525,19 +2518,6 @@ region_num_to_mbs(int length) {
}
#endif // PRODUCT
void
G1CollectorPolicy::checkpoint_conc_overhead() {
double conc_overhead = 0.0;
if (G1AccountConcurrentOverhead)
conc_overhead = COTracker::totalPredConcOverhead();
_mmu_tracker->update_conc_overhead(conc_overhead);
#if 0
gclog_or_tty->print(" CO %1.4lf TARGET %1.4lf",
conc_overhead, _mmu_tracker->max_gc_time());
#endif
}
size_t G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) {
case GCAllocForSurvived:
......
......@@ -981,8 +981,6 @@ public:
void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; }
void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; }
void checkpoint_conc_overhead();
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
......
......@@ -37,21 +37,7 @@
G1MMUTracker::G1MMUTracker(double time_slice, double max_gc_time) :
_time_slice(time_slice),
_max_gc_time(max_gc_time),
_conc_overhead_time_sec(0.0) { }
void
G1MMUTracker::update_conc_overhead(double conc_overhead) {
double conc_overhead_time_sec = _time_slice * conc_overhead;
if (conc_overhead_time_sec > 0.9 * _max_gc_time) {
// We are screwed, as we only seem to have <10% of the soft
// real-time goal available for pauses. Let's admit defeat and
// allow something more generous as a pause target.
conc_overhead_time_sec = 0.75 * _max_gc_time;
}
_conc_overhead_time_sec = conc_overhead_time_sec;
}
_max_gc_time(max_gc_time) { }
G1MMUTrackerQueue::G1MMUTrackerQueue(double time_slice, double max_gc_time) :
G1MMUTracker(time_slice, max_gc_time),
......@@ -128,7 +114,7 @@ double G1MMUTrackerQueue::longest_pause_internal(double current_time) {
while( 1 ) {
double gc_time =
calculate_gc_time(current_time + target_time) + _conc_overhead_time_sec;
calculate_gc_time(current_time + target_time);
double diff = target_time + gc_time - _max_gc_time;
if (!is_double_leq_0(diff)) {
target_time -= diff;
......
......@@ -33,19 +33,15 @@ protected:
double _time_slice;
double _max_gc_time; // this is per time slice
double _conc_overhead_time_sec;
public:
G1MMUTracker(double time_slice, double max_gc_time);
void update_conc_overhead(double conc_overhead);
virtual void add_pause(double start, double end, bool gc_thread) = 0;
virtual double longest_pause(double current_time) = 0;
virtual double when_sec(double current_time, double pause_time) = 0;
double max_gc_time() {
return _max_gc_time - _conc_overhead_time_sec;
return _max_gc_time;
}
inline bool now_max_gc(double current_time) {
......
......@@ -37,10 +37,6 @@
develop(intx, G1MarkingOverheadPercent, 0, \
"Overhead of concurrent marking") \
\
develop(bool, G1AccountConcurrentOverhead, false, \
"Whether soft real-time compliance in G1 will take into account" \
"concurrent overhead") \
\
product(intx, G1YoungGenSize, 0, \
"Size of the G1 young generation, 0 is the adaptive policy") \
\
......
......@@ -64,14 +64,12 @@ concurrentG1RefineThread.cpp mutexLocker.hpp
concurrentG1RefineThread.cpp resourceArea.hpp
concurrentG1RefineThread.hpp concurrentGCThread.hpp
concurrentG1RefineThread.hpp coTracker.hpp
concurrentMark.cpp concurrentMark.hpp
concurrentMark.cpp concurrentMarkThread.inline.hpp
concurrentMark.cpp g1CollectedHeap.inline.hpp
concurrentMark.cpp g1CollectorPolicy.hpp
concurrentMark.cpp g1RemSet.hpp
concurrentMark.cpp gcOverheadReporter.hpp
concurrentMark.cpp genOopClosures.inline.hpp
concurrentMark.cpp heapRegionRemSet.hpp
concurrentMark.cpp heapRegionSeq.inline.hpp
......@@ -82,7 +80,6 @@ concurrentMark.cpp referencePolicy.hpp
concurrentMark.cpp resourceArea.hpp
concurrentMark.cpp symbolTable.hpp
concurrentMark.hpp coTracker.hpp
concurrentMark.hpp heapRegion.hpp
concurrentMark.hpp taskqueue.hpp
......@@ -107,7 +104,6 @@ concurrentZFThread.cpp mutexLocker.hpp
concurrentZFThread.cpp space.inline.hpp
concurrentZFThread.hpp concurrentGCThread.hpp
concurrentZFThread.hpp coTracker.hpp
dirtyCardQueue.cpp atomic.hpp
dirtyCardQueue.cpp dirtyCardQueue.hpp
......@@ -147,7 +143,6 @@ g1CollectedHeap.cpp g1RemSet.inline.hpp
g1CollectedHeap.cpp g1OopClosures.inline.hpp
g1CollectedHeap.cpp genOopClosures.inline.hpp
g1CollectedHeap.cpp gcLocker.inline.hpp
g1CollectedHeap.cpp gcOverheadReporter.hpp
g1CollectedHeap.cpp generationSpec.hpp
g1CollectedHeap.cpp heapRegionRemSet.hpp
g1CollectedHeap.cpp heapRegionSeq.inline.hpp
......
......@@ -35,12 +35,6 @@ concurrentGCThread.cpp systemDictionary.hpp
concurrentGCThread.hpp thread.hpp
coTracker.hpp globalDefinitions.hpp
coTracker.hpp numberSeq.hpp
coTracker.cpp coTracker.hpp
coTracker.cpp os.hpp
allocationStats.cpp allocationStats.hpp
allocationStats.cpp ostream.hpp
......@@ -54,13 +48,6 @@ gcAdaptivePolicyCounters.hpp gcPolicyCounters.hpp
gcAdaptivePolicyCounters.cpp resourceArea.hpp
gcAdaptivePolicyCounters.cpp gcAdaptivePolicyCounters.hpp
gcOverheadReporter.cpp allocation.inline.hpp
gcOverheadReporter.cpp concurrentGCThread.hpp
gcOverheadReporter.cpp coTracker.hpp
gcOverheadReporter.cpp gcOverheadReporter.hpp
gcOverheadReporter.cpp ostream.hpp
gcOverheadReporter.cpp thread_<os_family>.inline.hpp
gSpaceCounters.cpp generation.hpp
gSpaceCounters.cpp resourceArea.hpp
gSpaceCounters.cpp gSpaceCounters.hpp
......
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_coTracker.cpp.incl"
COTracker* COTracker::_head = NULL;
double COTracker::_cpu_number = -1.0;
void
COTracker::resetPeriod(double now_sec, double vnow_sec) {
guarantee( _enabled, "invariant" );
_period_start_time_sec = now_sec;
_period_start_vtime_sec = vnow_sec;
}
void
COTracker::setConcOverhead(double time_stamp_sec,
double conc_overhead) {
guarantee( _enabled, "invariant" );
_conc_overhead = conc_overhead;
_time_stamp_sec = time_stamp_sec;
if (conc_overhead > 0.001)
_conc_overhead_seq.add(conc_overhead);
}
void
COTracker::reset(double starting_conc_overhead) {
guarantee( _enabled, "invariant" );
double now_sec = os::elapsedTime();
setConcOverhead(now_sec, starting_conc_overhead);
}
void
COTracker::start() {
guarantee( _enabled, "invariant" );
resetPeriod(os::elapsedTime(), os::elapsedVTime());
}
void
COTracker::update(bool force_end) {
assert( _enabled, "invariant" );
double end_time_sec = os::elapsedTime();
double elapsed_time_sec = end_time_sec - _period_start_time_sec;
if (force_end || elapsed_time_sec > _update_period_sec) {
// reached the end of the period
double end_vtime_sec = os::elapsedVTime();
double elapsed_vtime_sec = end_vtime_sec - _period_start_vtime_sec;
double conc_overhead = elapsed_vtime_sec / elapsed_time_sec;
setConcOverhead(end_time_sec, conc_overhead);
resetPeriod(end_time_sec, end_vtime_sec);
}
}
void
COTracker::updateForSTW(double start_sec, double end_sec) {
if (!_enabled)
return;
// During a STW pause, no concurrent GC thread has done any
// work. So, we can safely adjust the start of the current period by
// adding the duration of the STW pause to it, so that the STW pause
// doesn't affect the reading of the concurrent overhead (it's
// basically like excluding the time of the STW pause from the
// concurrent overhead calculation).
double stw_duration_sec = end_sec - start_sec;
guarantee( stw_duration_sec > 0.0, "invariant" );
if (outOfDate(start_sec))
_conc_overhead = 0.0;
else
_time_stamp_sec = end_sec;
_period_start_time_sec += stw_duration_sec;
_conc_overhead_seq = NumberSeq();
guarantee( os::elapsedTime() > _period_start_time_sec, "invariant" );
}
double
COTracker::predConcOverhead() {
if (_enabled) {
// tty->print(" %1.2lf", _conc_overhead_seq.maximum());
return _conc_overhead_seq.maximum();
} else {
// tty->print(" DD");
return 0.0;
}
}
void
COTracker::resetPred() {
_conc_overhead_seq = NumberSeq();
}
COTracker::COTracker(int group)
: _enabled(false),
_group(group),
_period_start_time_sec(-1.0),
_period_start_vtime_sec(-1.0),
_conc_overhead(-1.0),
_time_stamp_sec(-1.0),
_next(NULL) {
// GCOverheadReportingPeriodMS indicates how frequently the
// concurrent overhead will be recorded by the GC Overhead
// Reporter. We want to take readings less often than that. If we
// took readings more often than some of them might be lost.
_update_period_sec = ((double) GCOverheadReportingPeriodMS) / 1000.0 * 1.25;
_next = _head;
_head = this;
if (_cpu_number < 0.0)
_cpu_number = (double) os::processor_count();
}
// statics
void
COTracker::updateAllForSTW(double start_sec, double end_sec) {
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
curr->updateForSTW(start_sec, end_sec);
}
}
double
COTracker::totalConcOverhead(double now_sec) {
double total_conc_overhead = 0.0;
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
double conc_overhead = curr->concOverhead(now_sec);
total_conc_overhead += conc_overhead;
}
return total_conc_overhead;
}
double
COTracker::totalConcOverhead(double now_sec,
size_t group_num,
double* co_per_group) {
double total_conc_overhead = 0.0;
for (size_t i = 0; i < group_num; ++i)
co_per_group[i] = 0.0;
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
size_t group = curr->_group;
assert( 0 <= group && group < group_num, "invariant" );
double conc_overhead = curr->concOverhead(now_sec);
co_per_group[group] += conc_overhead;
total_conc_overhead += conc_overhead;
}
return total_conc_overhead;
}
double
COTracker::totalPredConcOverhead() {
double total_pred_conc_overhead = 0.0;
for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
total_pred_conc_overhead += curr->predConcOverhead();
curr->resetPred();
}
return total_pred_conc_overhead / _cpu_number;
}
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
// COTracker keeps track of the concurrent overhead of a GC thread.
// A thread that needs to be tracked must, itself, start up its
// tracker with the start() method and then call the update() method
// at regular intervals. What the tracker does is to calculate the
// concurrent overhead of a process at a given update period. The
// tracker starts and when is detects that it has exceeded the given
// period, it calculates the duration of the period in wall-clock time
// and the duration of the period in vtime (i.e. how much time the
// concurrent processes really took up during this period). The ratio
// of the latter over the former is the concurrent overhead of that
// process for that period over a single CPU. This overhead is stored
// on the tracker, "timestamped" with the wall-clock time of the end
// of the period. When the concurrent overhead of this process needs
// to be queried, this last "reading" provides a good approximation
// (we assume that the concurrent overhead of a particular thread
// stays largely constant over time). The timestamp is necessary to
// detect when the process has stopped working and the recorded
// reading hasn't been updated for some time.
// Each concurrent GC thread is considered to be part of a "group"
// (i.e. any available concurrent marking threads are part of the
// "concurrent marking thread group"). A COTracker is associated with
// a single group at construction-time. It's up to each collector to
// decide how groups will be mapped to such an id (ids should start
// from 0 and be consecutive; there's a hardcoded max group num
// defined on the GCOverheadTracker class). The notion of a group has
// been introduced to be able to identify how much overhead was
// imposed by each group, instead of getting a single value that
// covers all concurrent overhead.
class COTracker {
private:
// It indicates whether this tracker is enabled or not. When the
// tracker is disabled, then it returns 0.0 as the latest concurrent
// overhead and several methods (reset, start, and update) are not
// supposed to be called on it. This enabling / disabling facility
// is really provided to make a bit more explicit in the code when a
// particulary tracker of a processes that doesn't run all the time
// (e.g. concurrent marking) is supposed to be used and not it's not.
bool _enabled;
// The ID of the group associated with this tracker.
int _group;
// The update period of the tracker. A new value for the concurrent
// overhead of the associated process will be made at intervals no
// smaller than this.
double _update_period_sec;
// The start times (both wall-block time and vtime) of the current
// interval.
double _period_start_time_sec;
double _period_start_vtime_sec;
// Number seq of the concurrent overhead readings within a period
NumberSeq _conc_overhead_seq;
// The latest reading of the concurrent overhead (over a single CPU)
// imposed by the associated concurrent thread, made available at
// the indicated wall-clock time.
double _conc_overhead;
double _time_stamp_sec;
// The number of CPUs that the host machine has (for convenience
// really, as we'd have to keep translating it into a double)
static double _cpu_number;
// Fields that keep a list of all trackers created. This is useful,
// since it allows us to sum up the concurrent overhead without
// having to write code for a specific collector to broadcast a
// request to all its concurrent processes.
COTracker* _next;
static COTracker* _head;
// It indicates that a new period is starting by updating the
// _period_start_time_sec and _period_start_vtime_sec fields.
void resetPeriod(double now_sec, double vnow_sec);
// It updates the latest concurrent overhead reading, taken at a
// given wall-clock time.
void setConcOverhead(double time_stamp_sec, double conc_overhead);
// It determines whether the time stamp of the latest concurrent
// overhead reading is out of date or not.
bool outOfDate(double now_sec) {
// The latest reading is considered out of date, if it was taken
// 1.2x the update period.
return (now_sec - _time_stamp_sec) > 1.2 * _update_period_sec;
}
public:
// The constructor which associates the tracker with a group ID.
COTracker(int group);
// Methods to enable / disable the tracker and query whether it is enabled.
void enable() { _enabled = true; }
void disable() { _enabled = false; }
bool enabled() { return _enabled; }
// It resets the tracker and sets concurrent overhead reading to be
// the given parameter and the associated time stamp to be now.
void reset(double starting_conc_overhead = 0.0);
// The tracker starts tracking. IT should only be called from the
// concurrent thread that is tracked by this tracker.
void start();
// It updates the tracker and, if the current period is longer than
// the update period, the concurrent overhead reading will be
// updated. force_end being true indicates that it's the last call
// to update() by this process before the tracker is disabled (the
// tracker can be re-enabled later if necessary). It should only be
// called from the concurrent thread that is tracked by this tracker
// and while the thread has joined the STS.
void update(bool force_end = false);
// It adjusts the contents of the tracker to take into account a STW
// pause.
void updateForSTW(double start_sec, double end_sec);
// It returns the last concurrent overhead reading over a single
// CPU. If the reading is out of date, or the tracker is disabled,
// it returns 0.0.
double concCPUOverhead(double now_sec) {
if (!_enabled || outOfDate(now_sec))
return 0.0;
else
return _conc_overhead;
}
// It returns the last concurrent overhead reading over all CPUs
// that the host machine has. If the reading is out of date, or the
// tracker is disabled, it returns 0.0.
double concOverhead(double now_sec) {
return concCPUOverhead(now_sec) / _cpu_number;
}
double predConcOverhead();
void resetPred();
// statics
// It notifies all trackers about a STW pause.
static void updateAllForSTW(double start_sec, double end_sec);
// It returns the sum of the concurrent overhead readings of all
// available (and enabled) trackers for the given time stamp. The
// overhead is over all the CPUs of the host machine.
static double totalConcOverhead(double now_sec);
// Like the previous method, but it also sums up the overheads per
// group number. The length of the co_per_group array must be at
// least as large group_num
static double totalConcOverhead(double now_sec,
size_t group_num,
double* co_per_group);
static double totalPredConcOverhead();
};
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
# include "incls/_precompiled.incl"
# include "incls/_gcOverheadReporter.cpp.incl"
class COReportingThread : public ConcurrentGCThread {
private:
GCOverheadReporter* _reporter;
public:
COReportingThread(GCOverheadReporter* reporter) : _reporter(reporter) {
guarantee( _reporter != NULL, "precondition" );
create_and_start();
}
virtual void run() {
initialize_in_thread();
wait_for_universe_init();
int period_ms = GCOverheadReportingPeriodMS;
while ( true ) {
os::sleep(Thread::current(), period_ms, false);
_sts.join();
double now_sec = os::elapsedTime();
_reporter->collect_and_record_conc_overhead(now_sec);
_sts.leave();
}
terminate();
}
};
GCOverheadReporter* GCOverheadReporter::_reporter = NULL;
GCOverheadReporter::GCOverheadReporter(size_t group_num,
const char* group_names[],
size_t length)
: _group_num(group_num), _prev_end_sec(0.0) {
guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
"precondition" );
_base = NEW_C_HEAP_ARRAY(GCOverheadReporterEntry, length);
_top = _base + length;
_curr = _base;
for (size_t i = 0; i < group_num; ++i) {
guarantee( group_names[i] != NULL, "precondition" );
_group_names[i] = group_names[i];
}
}
void
GCOverheadReporter::add(double start_sec, double end_sec,
double* conc_overhead,
double stw_overhead) {
assert( _curr <= _top, "invariant" );
if (_curr == _top) {
guarantee( false, "trace full" );
return;
}
_curr->_start_sec = start_sec;
_curr->_end_sec = end_sec;
for (size_t i = 0; i < _group_num; ++i) {
_curr->_conc_overhead[i] =
(conc_overhead != NULL) ? conc_overhead[i] : 0.0;
}
_curr->_stw_overhead = stw_overhead;
++_curr;
}
void
GCOverheadReporter::collect_and_record_conc_overhead(double end_sec) {
double start_sec = _prev_end_sec;
guarantee( end_sec > start_sec, "invariant" );
double conc_overhead[MaxGCOverheadGroupNum];
COTracker::totalConcOverhead(end_sec, _group_num, conc_overhead);
add_conc_overhead(start_sec, end_sec, conc_overhead);
_prev_end_sec = end_sec;
}
void
GCOverheadReporter::record_stw_start(double start_sec) {
guarantee( start_sec > _prev_end_sec, "invariant" );
collect_and_record_conc_overhead(start_sec);
}
void
GCOverheadReporter::record_stw_end(double end_sec) {
double start_sec = _prev_end_sec;
COTracker::updateAllForSTW(start_sec, end_sec);
add_stw_overhead(start_sec, end_sec, 1.0);
_prev_end_sec = end_sec;
}
void
GCOverheadReporter::print() const {
tty->print_cr("");
tty->print_cr("GC Overhead (%d entries)", _curr - _base);
tty->print_cr("");
GCOverheadReporterEntry* curr = _base;
while (curr < _curr) {
double total = curr->_stw_overhead;
for (size_t i = 0; i < _group_num; ++i)
total += curr->_conc_overhead[i];
tty->print("OVERHEAD %12.8lf %12.8lf ",
curr->_start_sec, curr->_end_sec);
for (size_t i = 0; i < _group_num; ++i)
tty->print("%s %12.8lf ", _group_names[i], curr->_conc_overhead[i]);
tty->print_cr("STW %12.8lf TOT %12.8lf", curr->_stw_overhead, total);
++curr;
}
tty->print_cr("");
}
// statics
void
GCOverheadReporter::initGCOverheadReporter(size_t group_num,
const char* group_names[]) {
guarantee( _reporter == NULL, "should only be called once" );
guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
"precondition" );
guarantee( group_names != NULL, "pre-condition" );
if (GCOverheadReporting) {
_reporter = new GCOverheadReporter(group_num, group_names);
new COReportingThread(_reporter);
}
}
void
GCOverheadReporter::recordSTWStart(double start_sec) {
if (_reporter != NULL)
_reporter->record_stw_start(start_sec);
}
void
GCOverheadReporter::recordSTWEnd(double end_sec) {
if (_reporter != NULL)
_reporter->record_stw_end(end_sec);
}
void
GCOverheadReporter::printGCOverhead() {
if (_reporter != NULL)
_reporter->print();
}
/*
* Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
// Keeps track of the GC overhead (both concurrent and STW). It stores
// it in a large array and then prints it to tty at the end of the
// execution.
// See coTracker.hpp for the explanation on what groups are.
// Let's set a maximum number of concurrent overhead groups, to
// statically allocate any arrays we need and not to have to
// malloc/free them. This is just a bit more convenient.
enum {
MaxGCOverheadGroupNum = 4
};
typedef struct {
double _start_sec;
double _end_sec;
double _conc_overhead[MaxGCOverheadGroupNum];
double _stw_overhead;
} GCOverheadReporterEntry;
class GCOverheadReporter {
friend class COReportingThread;
private:
enum PrivateConstants {
DefaultReporterLength = 128 * 1024
};
// Reference to the single instance of this class.
static GCOverheadReporter* _reporter;
// These three references point to the array that contains the GC
// overhead entries (_base is the base of the array, _top is the
// address passed the last entry of the array, _curr is the next
// entry to be used).
GCOverheadReporterEntry* _base;
GCOverheadReporterEntry* _top;
GCOverheadReporterEntry* _curr;
// The number of concurrent overhead groups.
size_t _group_num;
// The wall-clock time of the end of the last recorded period of GC
// overhead.
double _prev_end_sec;
// Names for the concurrent overhead groups.
const char* _group_names[MaxGCOverheadGroupNum];
// Add a new entry to the large array. conc_overhead being NULL is
// equivalent to an array full of 0.0s. conc_overhead should have a
// length of at least _group_num.
void add(double start_sec, double end_sec,
double* conc_overhead,
double stw_overhead);
// Add an entry that represents concurrent GC overhead.
// conc_overhead must be at least of length _group_num.
// conc_overhead being NULL is equivalent to an array full of 0.0s.
void add_conc_overhead(double start_sec, double end_sec,
double* conc_overhead) {
add(start_sec, end_sec, conc_overhead, 0.0);
}
// Add an entry that represents STW GC overhead.
void add_stw_overhead(double start_sec, double end_sec,
double stw_overhead) {
add(start_sec, end_sec, NULL, stw_overhead);
}
// It records the start of a STW pause (i.e. it records the
// concurrent overhead up to that point)
void record_stw_start(double start_sec);
// It records the end of a STW pause (i.e. it records the overhead
// associated with the pause and adjusts all the trackers to reflect
// the pause)
void record_stw_end(double end_sec);
// It queries all the trackers of their concurrent overhead and
// records it.
void collect_and_record_conc_overhead(double end_sec);
// It prints the contents of the GC overhead array
void print() const;
// Constructor. The same preconditions for group_num and group_names
// from initGCOverheadReporter apply here too.
GCOverheadReporter(size_t group_num,
const char* group_names[],
size_t length = DefaultReporterLength);
public:
// statics
// It initialises the GCOverheadReporter and launches the concurrent
// overhead reporting thread. Both actions happen only if the
// GCOverheadReporting parameter is set. The length of the
// group_names array should be >= group_num and group_num should be
// <= MaxGCOverheadGroupNum. Entries group_namnes[0..group_num-1]
// should not be NULL.
static void initGCOverheadReporter(size_t group_num,
const char* group_names[]);
// The following three are provided for convenience and they are
// wrappers around record_stw_start(start_sec), record_stw_end(end_sec),
// and print(). Each of these checks whether GC overhead reporting
// is on (i.e. _reporter != NULL) and, if it is, calls the
// corresponding method. Saves from repeating this pattern again and
// again from the places where they need to be called.
static void recordSTWStart(double start_sec);
static void recordSTWEnd(double end_sec);
static void printGCOverhead();
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册