提交 cbabf512 编写于 作者: J johnc

8010780: G1: Eden occupancy/capacity output wrong after a full GC

Summary: Move the calculation and recording of eden capacity to the start of a GC and print a detailed heap transition for full GCs.
Reviewed-by: tschatzl, jmasa
上级 5ae8940b
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -1322,234 +1322,240 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, ...@@ -1322,234 +1322,240 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); {
TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); TraceCollectorStats tcs(g1mm()->full_collection_counters());
TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
double start = os::elapsedTime();
g1_policy()->record_full_collection_start(); double start = os::elapsedTime();
g1_policy()->record_full_collection_start();
// Note: When we have a more flexible GC logging framework that
// allows us to add optional attributes to a GC log record we
// could consider timing and reporting how long we wait in the
// following two methods.
wait_while_free_regions_coming();
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
_cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished();
append_secondary_free_list_if_not_empty_with_lock();
// Note: When we have a more flexible GC logging framework that gc_prologue(true);
// allows us to add optional attributes to a GC log record we increment_total_collections(true /* full gc */);
// could consider timing and reporting how long we wait in the increment_old_marking_cycles_started();
// following two methods.
wait_while_free_regions_coming();
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
_cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished();
append_secondary_free_list_if_not_empty_with_lock();
gc_prologue(true); assert(used() == recalculate_used(), "Should be equal");
increment_total_collections(true /* full gc */);
increment_old_marking_cycles_started();
size_t g1h_prev_used = used(); verify_before_gc();
assert(used() == recalculate_used(), "Should be equal");
verify_before_gc(); pre_full_gc_dump();
pre_full_gc_dump(); COMPILER2_PRESENT(DerivedPointerTable::clear());
COMPILER2_PRESENT(DerivedPointerTable::clear()); // Disable discovery and empty the discovered lists
// for the CM ref processor.
ref_processor_cm()->disable_discovery();
ref_processor_cm()->abandon_partial_discovery();
ref_processor_cm()->verify_no_references_recorded();
// Disable discovery and empty the discovered lists // Abandon current iterations of concurrent marking and concurrent
// for the CM ref processor. // refinement, if any are in progress. We have to do this before
ref_processor_cm()->disable_discovery(); // wait_until_scan_finished() below.
ref_processor_cm()->abandon_partial_discovery(); concurrent_mark()->abort();
ref_processor_cm()->verify_no_references_recorded();
// Abandon current iterations of concurrent marking and concurrent // Make sure we'll choose a new allocation region afterwards.
// refinement, if any are in progress. We have to do this before release_mutator_alloc_region();
// wait_until_scan_finished() below. abandon_gc_alloc_regions();
concurrent_mark()->abort(); g1_rem_set()->cleanupHRRS();
// Make sure we'll choose a new allocation region afterwards. // We should call this after we retire any currently active alloc
release_mutator_alloc_region(); // regions so that all the ALLOC / RETIRE events are generated
abandon_gc_alloc_regions(); // before the start GC event.
g1_rem_set()->cleanupHRRS(); _hr_printer.start_gc(true /* full */, (size_t) total_collections());
// We should call this after we retire any currently active alloc // We may have added regions to the current incremental collection
// regions so that all the ALLOC / RETIRE events are generated // set between the last GC or pause and now. We need to clear the
// before the start GC event. // incremental collection set and then start rebuilding it afresh
_hr_printer.start_gc(true /* full */, (size_t) total_collections()); // after this full GC.
abandon_collection_set(g1_policy()->inc_cset_head());
g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building();
// We may have added regions to the current incremental collection tear_down_region_sets(false /* free_list_only */);
// set between the last GC or pause and now. We need to clear the g1_policy()->set_gcs_are_young(true);
// incremental collection set and then start rebuilding it afresh
// after this full GC.
abandon_collection_set(g1_policy()->inc_cset_head());
g1_policy()->clear_incremental_cset();
g1_policy()->stop_incremental_cset_building();
tear_down_region_sets(false /* free_list_only */); // See the comments in g1CollectedHeap.hpp and
g1_policy()->set_gcs_are_young(true); // G1CollectedHeap::ref_processing_init() about
// how reference processing currently works in G1.
// See the comments in g1CollectedHeap.hpp and // Temporarily make discovery by the STW ref processor single threaded (non-MT).
// G1CollectedHeap::ref_processing_init() about ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
// how reference processing currently works in G1.
// Temporarily make discovery by the STW ref processor single threaded (non-MT). // Temporarily clear the STW ref processor's _is_alive_non_header field.
ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
// Temporarily clear the STW ref processor's _is_alive_non_header field. ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); // Do collection work
ref_processor_stw()->setup_policy(do_clear_all_soft_refs); {
HandleMark hm; // Discard invalid handles created during gc
G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
}
// Do collection work assert(free_regions() == 0, "we should not have added any free regions");
{ rebuild_region_sets(false /* free_list_only */);
HandleMark hm; // Discard invalid handles created during gc
G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
}
assert(free_regions() == 0, "we should not have added any free regions"); // Enqueue any discovered reference objects that have
rebuild_region_sets(false /* free_list_only */); // not been removed from the discovered lists.
ref_processor_stw()->enqueue_discovered_references();
// Enqueue any discovered reference objects that have COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
// not been removed from the discovered lists.
ref_processor_stw()->enqueue_discovered_references();
COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); MemoryService::track_memory_usage();
MemoryService::track_memory_usage(); verify_after_gc();
verify_after_gc(); assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); // Delete metaspaces for unloaded class loaders and clean up loader_data graph
ref_processor_stw()->verify_no_references_recorded(); ClassLoaderDataGraph::purge();
// Delete metaspaces for unloaded class loaders and clean up loader_data graph // Note: since we've just done a full GC, concurrent
ClassLoaderDataGraph::purge(); // marking is no longer active. Therefore we need not
// re-enable reference discovery for the CM ref processor.
// That will be done at the start of the next marking cycle.
assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
ref_processor_cm()->verify_no_references_recorded();
// Note: since we've just done a full GC, concurrent reset_gc_time_stamp();
// marking is no longer active. Therefore we need not // Since everything potentially moved, we will clear all remembered
// re-enable reference discovery for the CM ref processor. // sets, and clear all cards. Later we will rebuild remebered
// That will be done at the start of the next marking cycle. // sets. We will also reset the GC time stamps of the regions.
assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); clear_rsets_post_compaction();
ref_processor_cm()->verify_no_references_recorded(); check_gc_time_stamps();
reset_gc_time_stamp(); // Resize the heap if necessary.
// Since everything potentially moved, we will clear all remembered resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
// sets, and clear all cards. Later we will rebuild remebered
// sets. We will also reset the GC time stamps of the regions.
clear_rsets_post_compaction();
check_gc_time_stamps();
// Resize the heap if necessary. if (_hr_printer.is_active()) {
resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); // We should do this after we potentially resize the heap so
// that all the COMMIT / UNCOMMIT events are generated before
// the end GC event.
if (_hr_printer.is_active()) { print_hrs_post_compaction();
// We should do this after we potentially resize the heap so _hr_printer.end_gc(true /* full */, (size_t) total_collections());
// that all the COMMIT / UNCOMMIT events are generated before }
// the end GC event.
print_hrs_post_compaction(); if (_cg1r->use_cache()) {
_hr_printer.end_gc(true /* full */, (size_t) total_collections()); _cg1r->clear_and_record_card_counts();
} _cg1r->clear_hot_cache();
}
if (_cg1r->use_cache()) { // Rebuild remembered sets of all regions.
_cg1r->clear_and_record_card_counts(); if (G1CollectedHeap::use_parallel_gc_threads()) {
_cg1r->clear_hot_cache(); uint n_workers =
} AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(),
"If not dynamic should be using all the workers");
workers()->set_active_workers(n_workers);
// Set parallel threads in the heap (_n_par_threads) only
// before a parallel phase and always reset it to 0 after
// the phase so that the number of parallel threads does
// no get carried forward to a serial phase where there
// may be code that is "possibly_parallel".
set_par_threads(n_workers);
ParRebuildRSTask rebuild_rs_task(this);
assert(check_heap_region_claim_values(
HeapRegion::InitialClaimValue), "sanity check");
assert(UseDynamicNumberOfGCThreads ||
workers()->active_workers() == workers()->total_workers(),
"Unless dynamic should use total workers");
// Use the most recent number of active workers
assert(workers()->active_workers() > 0,
"Active workers not properly set");
set_par_threads(workers()->active_workers());
workers()->run_task(&rebuild_rs_task);
set_par_threads(0);
assert(check_heap_region_claim_values(
HeapRegion::RebuildRSClaimValue), "sanity check");
reset_heap_region_claim_values();
} else {
RebuildRSOutOfRegionClosure rebuild_rs(this);
heap_region_iterate(&rebuild_rs);
}
// Rebuild remembered sets of all regions. if (true) { // FIXME
if (G1CollectedHeap::use_parallel_gc_threads()) { MetaspaceGC::compute_new_size();
uint n_workers = }
AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
assert(UseDynamicNumberOfGCThreads ||
n_workers == workers()->total_workers(),
"If not dynamic should be using all the workers");
workers()->set_active_workers(n_workers);
// Set parallel threads in the heap (_n_par_threads) only
// before a parallel phase and always reset it to 0 after
// the phase so that the number of parallel threads does
// no get carried forward to a serial phase where there
// may be code that is "possibly_parallel".
set_par_threads(n_workers);
ParRebuildRSTask rebuild_rs_task(this); #ifdef TRACESPINNING
assert(check_heap_region_claim_values( ParallelTaskTerminator::print_termination_counts();
HeapRegion::InitialClaimValue), "sanity check"); #endif
assert(UseDynamicNumberOfGCThreads ||
workers()->active_workers() == workers()->total_workers(),
"Unless dynamic should use total workers");
// Use the most recent number of active workers
assert(workers()->active_workers() > 0,
"Active workers not properly set");
set_par_threads(workers()->active_workers());
workers()->run_task(&rebuild_rs_task);
set_par_threads(0);
assert(check_heap_region_claim_values(
HeapRegion::RebuildRSClaimValue), "sanity check");
reset_heap_region_claim_values();
} else {
RebuildRSOutOfRegionClosure rebuild_rs(this);
heap_region_iterate(&rebuild_rs);
}
if (G1Log::fine()) { // Discard all rset updates
print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); JavaThread::dirty_card_queue_set().abandon_logs();
} assert(!G1DeferredRSUpdate
|| (G1DeferredRSUpdate &&
(dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
if (true) { // FIXME _young_list->reset_sampled_info();
MetaspaceGC::compute_new_size(); // At this point there should be no regions in the
} // entire heap tagged as young.
assert(check_young_list_empty(true /* check_heap */),
"young list should be empty at this point");
// Start a new incremental collection set for the next pause // Update the number of full collections that have been completed.
assert(g1_policy()->collection_set() == NULL, "must be"); increment_old_marking_cycles_completed(false /* concurrent */);
g1_policy()->start_incremental_cset_building();
// Clear the _cset_fast_test bitmap in anticipation of adding _hrs.verify_optional();
// regions to the incremental collection set for the next verify_region_sets_optional();
// evacuation pause.
clear_cset_fast_test();
init_mutator_alloc_region(); // Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be");
g1_policy()->start_incremental_cset_building();
double end = os::elapsedTime(); // Clear the _cset_fast_test bitmap in anticipation of adding
g1_policy()->record_full_collection_end(); // regions to the incremental collection set for the next
// evacuation pause.
clear_cset_fast_test();
#ifdef TRACESPINNING init_mutator_alloc_region();
ParallelTaskTerminator::print_termination_counts();
#endif
gc_epilogue(true); double end = os::elapsedTime();
g1_policy()->record_full_collection_end();
// Discard all rset updates if (G1Log::fine()) {
JavaThread::dirty_card_queue_set().abandon_logs(); g1_policy()->print_heap_transition();
assert(!G1DeferredRSUpdate }
|| (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
_young_list->reset_sampled_info(); // We must call G1MonitoringSupport::update_sizes() in the same scoping level
// At this point there should be no regions in the // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// entire heap tagged as young. // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
assert( check_young_list_empty(true /* check_heap */), // before any GC notifications are raised.
"young list should be empty at this point"); g1mm()->update_sizes();
// Update the number of full collections that have been completed. gc_epilogue(true);
increment_old_marking_cycles_completed(false /* concurrent */); }
_hrs.verify_optional(); if (G1Log::finer()) {
verify_region_sets_optional(); g1_policy()->print_detailed_heap_transition();
}
print_heap_after_gc(); print_heap_after_gc();
// We must call G1MonitoringSupport::update_sizes() in the same scoping level post_full_gc_dump();
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm()->update_sizes();
} }
post_full_gc_dump();
return true; return true;
} }
...@@ -3829,7 +3835,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3829,7 +3835,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// The elapsed time induced by the start time below deliberately elides // The elapsed time induced by the start time below deliberately elides
// the possible verification above. // the possible verification above.
double sample_start_time_sec = os::elapsedTime(); double sample_start_time_sec = os::elapsedTime();
size_t start_used_bytes = used();
#if YOUNG_LIST_VERBOSE #if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
...@@ -3837,8 +3842,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { ...@@ -3837,8 +3842,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE #endif // YOUNG_LIST_VERBOSE
g1_policy()->record_collection_pause_start(sample_start_time_sec, g1_policy()->record_collection_pause_start(sample_start_time_sec);
start_used_bytes);
double scan_wait_start = os::elapsedTime(); double scan_wait_start = os::elapsedTime();
// We have to wait until the CM threads finish scanning the // We have to wait until the CM threads finish scanning the
......
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -406,7 +406,6 @@ void G1CollectorPolicy::init() { ...@@ -406,7 +406,6 @@ void G1CollectorPolicy::init() {
} }
_free_regions_at_end_of_collection = _g1->free_regions(); _free_regions_at_end_of_collection = _g1->free_regions();
update_young_list_target_length(); update_young_list_target_length();
_prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
// We may immediately start allocating regions and placing them on the // We may immediately start allocating regions and placing them on the
// collection set list. Initialize the per-collection set info // collection set list. Initialize the per-collection set info
...@@ -746,6 +745,7 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head, ...@@ -746,6 +745,7 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head,
void G1CollectorPolicy::record_full_collection_start() { void G1CollectorPolicy::record_full_collection_start() {
_full_collection_start_sec = os::elapsedTime(); _full_collection_start_sec = os::elapsedTime();
record_heap_size_info_at_start();
// Release the future to-space so that it is available for compaction into. // Release the future to-space so that it is available for compaction into.
_g1->set_full_collection(); _g1->set_full_collection();
} }
...@@ -788,8 +788,7 @@ void G1CollectorPolicy::record_stop_world_start() { ...@@ -788,8 +788,7 @@ void G1CollectorPolicy::record_stop_world_start() {
_stop_world_start = os::elapsedTime(); _stop_world_start = os::elapsedTime();
} }
void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
size_t start_used) {
// We only need to do this here as the policy will only be applied // We only need to do this here as the policy will only be applied
// to the GC we're about to start. so, no point is calculating this // to the GC we're about to start. so, no point is calculating this
// every time we calculate / recalculate the target young length. // every time we calculate / recalculate the target young length.
...@@ -803,19 +802,14 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, ...@@ -803,19 +802,14 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_trace_gen0_time_data.record_start_collection(s_w_t_ms); _trace_gen0_time_data.record_start_collection(s_w_t_ms);
_stop_world_start = 0.0; _stop_world_start = 0.0;
record_heap_size_info_at_start();
phase_times()->record_cur_collection_start_sec(start_time_sec); phase_times()->record_cur_collection_start_sec(start_time_sec);
_cur_collection_pause_used_at_start_bytes = start_used;
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
_pending_cards = _g1->pending_card_num(); _pending_cards = _g1->pending_card_num();
_collection_set_bytes_used_before = 0; _collection_set_bytes_used_before = 0;
_bytes_copied_during_gc = 0; _bytes_copied_during_gc = 0;
YoungList* young_list = _g1->young_list();
_eden_bytes_before_gc = young_list->eden_used_bytes();
_survivor_bytes_before_gc = young_list->survivor_used_bytes();
_capacity_before_gc = _g1->capacity();
_last_gc_was_young = false; _last_gc_was_young = false;
// do that for any other surv rate groups // do that for any other surv rate groups
...@@ -1153,6 +1147,21 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { ...@@ -1153,6 +1147,21 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
byte_size_in_proper_unit((double)(bytes)), \ byte_size_in_proper_unit((double)(bytes)), \
proper_unit_for_byte_size((bytes)) proper_unit_for_byte_size((bytes))
void G1CollectorPolicy::record_heap_size_info_at_start() {
YoungList* young_list = _g1->young_list();
_eden_bytes_before_gc = young_list->eden_used_bytes();
_survivor_bytes_before_gc = young_list->survivor_used_bytes();
_capacity_before_gc = _g1->capacity();
_cur_collection_pause_used_at_start_bytes = _g1->used();
_cur_collection_pause_used_regions_at_start = _g1->used_regions();
size_t eden_capacity_before_gc =
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
_prev_eden_capacity = eden_capacity_before_gc;
}
void G1CollectorPolicy::print_heap_transition() { void G1CollectorPolicy::print_heap_transition() {
_g1->print_size_transition(gclog_or_tty, _g1->print_size_transition(gclog_or_tty,
_cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity()); _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
...@@ -1183,8 +1192,6 @@ void G1CollectorPolicy::print_detailed_heap_transition() { ...@@ -1183,8 +1192,6 @@ void G1CollectorPolicy::print_detailed_heap_transition() {
EXT_SIZE_PARAMS(_capacity_before_gc), EXT_SIZE_PARAMS(_capacity_before_gc),
EXT_SIZE_PARAMS(used), EXT_SIZE_PARAMS(used),
EXT_SIZE_PARAMS(capacity)); EXT_SIZE_PARAMS(capacity));
_prev_eden_capacity = eden_capacity;
} }
void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
......
/* /*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -671,34 +671,36 @@ public: ...@@ -671,34 +671,36 @@ public:
bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
// Update the heuristic info to record a collection pause of the given // Record the start and end of an evacuation pause.
// start time, where the given number of bytes were used at the start. void record_collection_pause_start(double start_time_sec);
// This may involve changing the desired size of a collection set. void record_collection_pause_end(double pause_time_ms);
void record_stop_world_start(); // Record the start and end of a full collection.
void record_full_collection_start();
void record_collection_pause_start(double start_time_sec, size_t start_used); void record_full_collection_end();
// Must currently be called while the world is stopped. // Must currently be called while the world is stopped.
void record_concurrent_mark_init_end(double void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
mark_init_elapsed_time_ms);
// Record start and end of remark.
void record_concurrent_mark_remark_start(); void record_concurrent_mark_remark_start();
void record_concurrent_mark_remark_end(); void record_concurrent_mark_remark_end();
// Record start, end, and completion of cleanup.
void record_concurrent_mark_cleanup_start(); void record_concurrent_mark_cleanup_start();
void record_concurrent_mark_cleanup_end(int no_of_gc_threads); void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
void record_concurrent_mark_cleanup_completed(); void record_concurrent_mark_cleanup_completed();
void record_concurrent_pause(); // Records the information about the heap size for reporting in
// print_detailed_heap_transition
void record_heap_size_info_at_start();
void record_collection_pause_end(double pause_time); // Print heap sizing transition (with less and more detail).
void print_heap_transition(); void print_heap_transition();
void print_detailed_heap_transition(); void print_detailed_heap_transition();
// Record the fact that a full collection occurred. void record_stop_world_start();
void record_full_collection_start(); void record_concurrent_pause();
void record_full_collection_end();
// Record how much space we copied during a GC. This is typically // Record how much space we copied during a GC. This is typically
// called when a GC alloc region is being retired. // called when a GC alloc region is being retired.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册