提交 a5e75e63 编写于 作者: B brutisso

6976060: G1: humongous object allocations should initiate marking cycles when necessary

Reviewed-by: tonyp, johnc
上级 a30e9e27
...@@ -1045,10 +1045,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1045,10 +1045,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// regions, we'll first try to do the allocation without doing a // regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap. // collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size); result = humongous_obj_allocate(word_size);
if (result != NULL) {
return result;
}
if (result == NULL) {
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
should_try_gc = false; should_try_gc = false;
} else { } else {
...@@ -1057,6 +1055,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1057,6 +1055,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
should_try_gc = true; should_try_gc = true;
} }
} }
}
if (result != NULL) {
if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation")) {
// We need to release the Heap_lock before we try to call collect
collect(GCCause::_g1_humongous_allocation);
}
return result;
}
if (should_try_gc) { if (should_try_gc) {
// If we failed to allocate the humongous object, we should try to // If we failed to allocate the humongous object, we should try to
...@@ -1111,7 +1118,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size, ...@@ -1111,7 +1118,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
return _mutator_alloc_region.attempt_allocation_locked(word_size, return _mutator_alloc_region.attempt_allocation_locked(word_size,
false /* bot_updates */); false /* bot_updates */);
} else { } else {
return humongous_obj_allocate(word_size); HeapWord* result = humongous_obj_allocate(word_size);
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
g1_policy()->set_initiate_conc_mark_if_possible();
}
return result;
} }
ShouldNotReachHere(); ShouldNotReachHere();
...@@ -2295,7 +2306,8 @@ size_t G1CollectedHeap::unsafe_max_alloc() { ...@@ -2295,7 +2306,8 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
return return
((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
cause == GCCause::_g1_humongous_allocation);
} }
#ifndef PRODUCT #ifndef PRODUCT
......
...@@ -355,6 +355,7 @@ private: ...@@ -355,6 +355,7 @@ private:
// explicitly started if: // explicitly started if:
// (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
// (c) cause == _g1_humongous_allocation
bool should_do_concurrent_full_gc(GCCause::Cause cause); bool should_do_concurrent_full_gc(GCCause::Cause cause);
// Keeps track of how many "full collections" (i.e., Full GCs or // Keeps track of how many "full collections" (i.e., Full GCs or
...@@ -1172,6 +1173,10 @@ public: ...@@ -1172,6 +1173,10 @@ public:
_old_set.remove(hr); _old_set.remove(hr);
} }
size_t non_young_capacity_bytes() {
return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
}
void set_free_regions_coming(); void set_free_regions_coming();
void reset_free_regions_coming(); void reset_free_regions_coming();
bool free_regions_coming() { return _free_regions_coming; } bool free_regions_coming() { return _free_regions_coming; }
......
...@@ -213,8 +213,6 @@ G1CollectorPolicy::G1CollectorPolicy() : ...@@ -213,8 +213,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_survivor_bytes_before_gc(0), _survivor_bytes_before_gc(0),
_capacity_before_gc(0), _capacity_before_gc(0),
_prev_collection_pause_used_at_end_bytes(0),
_eden_cset_region_length(0), _eden_cset_region_length(0),
_survivor_cset_region_length(0), _survivor_cset_region_length(0),
_old_cset_region_length(0), _old_cset_region_length(0),
...@@ -1140,6 +1138,45 @@ double G1CollectorPolicy::max_sum(double* data1, double* data2) { ...@@ -1140,6 +1138,45 @@ double G1CollectorPolicy::max_sum(double* data1, double* data2) {
return ret; return ret;
} }
bool G1CollectorPolicy::need_to_start_conc_mark(const char* source) {
if (_g1->mark_in_progress()) {
return false;
}
size_t marking_initiating_used_threshold =
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
size_t cur_used_bytes = _g1->non_young_capacity_bytes();
if (cur_used_bytes > marking_initiating_used_threshold) {
if (gcs_are_young()) {
ergo_verbose4(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
ergo_format_byte("occupancy")
ergo_format_byte_perc("threshold")
ergo_format_str("source"),
cur_used_bytes,
marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent,
source);
return true;
} else {
ergo_verbose4(ErgoConcCycles,
"do not request concurrent cycle initiation",
ergo_format_reason("still doing mixed collections")
ergo_format_byte("occupancy")
ergo_format_byte_perc("threshold")
ergo_format_str("source"),
cur_used_bytes,
marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent,
source);
}
}
return false;
}
// Anything below that is considered to be zero // Anything below that is considered to be zero
#define MIN_TIMER_GRANULARITY 0.0000001 #define MIN_TIMER_GRANULARITY 0.0000001
...@@ -1166,44 +1203,16 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { ...@@ -1166,44 +1203,16 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
#endif // PRODUCT #endif // PRODUCT
last_pause_included_initial_mark = during_initial_mark_pause(); last_pause_included_initial_mark = during_initial_mark_pause();
if (last_pause_included_initial_mark) if (last_pause_included_initial_mark) {
record_concurrent_mark_init_end(0.0); record_concurrent_mark_init_end(0.0);
}
size_t marking_initiating_used_threshold = if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
if (!_g1->mark_in_progress() && !_last_young_gc) {
assert(!last_pause_included_initial_mark, "invariant");
if (cur_used_bytes > marking_initiating_used_threshold) {
if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
assert(!during_initial_mark_pause(), "we should not see this here");
ergo_verbose3(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
ergo_format_byte("occupancy")
ergo_format_byte_perc("threshold"),
cur_used_bytes,
marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent);
// Note: this might have already been set, if during the last // Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of // pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK. // this pause we decided to postpone it. That's OK.
set_initiate_conc_mark_if_possible(); set_initiate_conc_mark_if_possible();
} else {
ergo_verbose2(ErgoConcCycles,
"do not request concurrent cycle initiation",
ergo_format_reason("occupancy lower than previous occupancy")
ergo_format_byte("occupancy")
ergo_format_byte("previous occupancy"),
cur_used_bytes,
_prev_collection_pause_used_at_end_bytes);
}
} }
}
_prev_collection_pause_used_at_end_bytes = cur_used_bytes;
_mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0, _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
end_time_sec, false); end_time_sec, false);
......
...@@ -177,7 +177,6 @@ private: ...@@ -177,7 +177,6 @@ private:
double _cur_collection_start_sec; double _cur_collection_start_sec;
size_t _cur_collection_pause_used_at_start_bytes; size_t _cur_collection_pause_used_at_start_bytes;
size_t _cur_collection_pause_used_regions_at_start; size_t _cur_collection_pause_used_regions_at_start;
size_t _prev_collection_pause_used_at_end_bytes;
double _cur_collection_par_time_ms; double _cur_collection_par_time_ms;
double _cur_satb_drain_time_ms; double _cur_satb_drain_time_ms;
double _cur_clear_ct_time_ms; double _cur_clear_ct_time_ms;
...@@ -800,6 +799,8 @@ public: ...@@ -800,6 +799,8 @@ public:
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
bool need_to_start_conc_mark(const char* source);
// Update the heuristic info to record a collection pause of the given // Update the heuristic info to record a collection pause of the given
// start time, where the given number of bytes were used at the start. // start time, where the given number of bytes were used at the start.
// This may involve changing the desired size of a collection set. // This may involve changing the desired size of a collection set.
......
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -74,8 +74,9 @@ void VM_G1IncCollectionPause::doit() { ...@@ -74,8 +74,9 @@ void VM_G1IncCollectionPause::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark || assert(!_should_initiate_conc_mark ||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)), (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
"only a GC locker or a System.gc() induced GC should start a cycle"); _gc_cause == GCCause::_g1_humongous_allocation),
"only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
if (_word_size > 0) { if (_word_size > 0) {
// An allocation has been requested. So, try to do that first. // An allocation has been requested. So, try to do that first.
......
/* /*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -84,6 +84,9 @@ const char* GCCause::to_string(GCCause::Cause cause) { ...@@ -84,6 +84,9 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _g1_inc_collection_pause: case _g1_inc_collection_pause:
return "G1 Evacuation Pause"; return "G1 Evacuation Pause";
case _g1_humongous_allocation:
return "G1 Humongous Allocation";
case _last_ditch_collection: case _last_ditch_collection:
return "Last ditch collection"; return "Last ditch collection";
......
/* /*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -66,6 +66,7 @@ class GCCause : public AllStatic { ...@@ -66,6 +66,7 @@ class GCCause : public AllStatic {
_adaptive_size_policy, _adaptive_size_policy,
_g1_inc_collection_pause, _g1_inc_collection_pause,
_g1_humongous_allocation,
_last_ditch_collection, _last_ditch_collection,
_last_gc_cause _last_gc_cause
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册