提交 7864cfc8 编写于 作者: B brutisso

7132311: G1: assert((s == klass->oop_size(this)) ||...

7132311: G1: assert((s == klass->oop_size(this)) || (Universe::heap()->is_gc_active() && ((is_typeArray()...
Summary: Move the check for when to call collect() to before we do a humongous object allocation
Reviewed-by: stefank, tonyp
上级 9b3f1ad9
...@@ -1029,6 +1029,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1029,6 +1029,15 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
assert(isHumongous(word_size), "attempt_allocation_humongous() " assert(isHumongous(word_size), "attempt_allocation_humongous() "
"should only be called for humongous allocations"); "should only be called for humongous allocations");
// Humongous objects can exhaust the heap quickly, so we should check if we
// need to start a marking cycle at each humongous object allocation. We do
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
collect(GCCause::_g1_humongous_allocation);
}
// We will loop until a) we manage to successfully perform the // We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which // allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll // fails to perform the allocation. b) is the only case when we'll
...@@ -1045,8 +1054,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1045,8 +1054,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
// regions, we'll first try to do the allocation without doing a // regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap. // collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size); result = humongous_obj_allocate(word_size);
if (result != NULL) {
return result;
}
if (result == NULL) {
if (GC_locker::is_active_and_needs_gc()) { if (GC_locker::is_active_and_needs_gc()) {
should_try_gc = false; should_try_gc = false;
} else { } else {
...@@ -1055,21 +1066,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ...@@ -1055,21 +1066,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
should_try_gc = true; should_try_gc = true;
} }
} }
}
if (result != NULL) {
if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation")) {
// We need to release the Heap_lock before we try to call collect().
// The result will not be stored in any object before this method
// returns, so the GC might miss it. Thus, we create a handle to the result
// and fake an object at that place.
CollectedHeap::fill_with_object(result, word_size, false);
Handle h((oop)result);
collect(GCCause::_g1_humongous_allocation);
assert(result == (HeapWord*)h(), "Humongous objects should not be moved by collections");
}
return result;
}
if (should_try_gc) { if (should_try_gc) {
// If we failed to allocate the humongous object, we should try to // If we failed to allocate the humongous object, we should try to
......
...@@ -1138,36 +1138,41 @@ double G1CollectorPolicy::max_sum(double* data1, double* data2) { ...@@ -1138,36 +1138,41 @@ double G1CollectorPolicy::max_sum(double* data1, double* data2) {
return ret; return ret;
} }
bool G1CollectorPolicy::need_to_start_conc_mark(const char* source) { bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
if (_g1->mark_in_progress()) { if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
return false; return false;
} }
size_t marking_initiating_used_threshold = size_t marking_initiating_used_threshold =
(_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
size_t cur_used_bytes = _g1->non_young_capacity_bytes(); size_t cur_used_bytes = _g1->non_young_capacity_bytes();
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
if (cur_used_bytes > marking_initiating_used_threshold) { if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
if (gcs_are_young()) { if (gcs_are_young()) {
ergo_verbose4(ErgoConcCycles, ergo_verbose5(ErgoConcCycles,
"request concurrent cycle initiation", "request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold") ergo_format_reason("occupancy higher than threshold")
ergo_format_byte("occupancy") ergo_format_byte("occupancy")
ergo_format_byte("allocation request")
ergo_format_byte_perc("threshold") ergo_format_byte_perc("threshold")
ergo_format_str("source"), ergo_format_str("source"),
cur_used_bytes, cur_used_bytes,
alloc_byte_size,
marking_initiating_used_threshold, marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent, (double) InitiatingHeapOccupancyPercent,
source); source);
return true; return true;
} else { } else {
ergo_verbose4(ErgoConcCycles, ergo_verbose5(ErgoConcCycles,
"do not request concurrent cycle initiation", "do not request concurrent cycle initiation",
ergo_format_reason("still doing mixed collections") ergo_format_reason("still doing mixed collections")
ergo_format_byte("occupancy") ergo_format_byte("occupancy")
ergo_format_byte("allocation request")
ergo_format_byte_perc("threshold") ergo_format_byte_perc("threshold")
ergo_format_str("source"), ergo_format_str("source"),
cur_used_bytes, cur_used_bytes,
alloc_byte_size,
marking_initiating_used_threshold, marking_initiating_used_threshold,
(double) InitiatingHeapOccupancyPercent, (double) InitiatingHeapOccupancyPercent,
source); source);
......
...@@ -799,7 +799,7 @@ public: ...@@ -799,7 +799,7 @@ public:
GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
bool need_to_start_conc_mark(const char* source); bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
// Update the heuristic info to record a collection pause of the given // Update the heuristic info to record a collection pause of the given
// start time, where the given number of bytes were used at the start. // start time, where the given number of bytes were used at the start.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册