提交 d1361411 编写于 作者: S sla

Merge

......@@ -86,7 +86,7 @@ ifeq ($(INCLUDE_ALL_GCS), false)
concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
......
......@@ -4539,7 +4539,7 @@ HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(false) { }
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
: _g1h(g1h),
_refs(g1h->task_queue(queue_num)),
_dcq(&g1h->dirty_card_queue_set()),
......@@ -4549,7 +4549,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
_term_attempts(0),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
_age_table(false),
_age_table(false), _scanner(g1h, this, rp),
_strong_roots_time(0), _term_time(0),
_alloc_buffer_waste(0), _undo_waste(0) {
// we allocate G1YoungSurvRateNumRegions plus one entries, since
......@@ -4658,14 +4658,10 @@ void G1ParScanThreadState::trim_queue() {
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
G1ParScanThreadState* par_scan_state) :
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
_par_scan_state(par_scan_state),
_worker_id(par_scan_state->queue_num()),
_during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
_mark_in_progress(_g1->mark_in_progress()) { }
_g1(g1), _par_scan_state(par_scan_state),
_worker_id(par_scan_state->queue_num()) { }
template <G1Barrier barrier, bool do_mark_object>
void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) {
void G1ParCopyHelper::mark_object(oop obj) {
#ifdef ASSERT
HeapRegion* hr = _g1->heap_region_containing(obj);
assert(hr != NULL, "sanity");
......@@ -4676,9 +4672,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::mark_object(oop obj) {
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
}
template <G1Barrier barrier, bool do_mark_object>
void G1ParCopyClosure<barrier, do_mark_object>
::mark_forwarded_object(oop from_obj, oop to_obj) {
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
#ifdef ASSERT
assert(from_obj->is_forwarded(), "from obj should be forwarded");
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
......@@ -4700,27 +4694,25 @@ void G1ParCopyClosure<barrier, do_mark_object>
_cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
}
template <G1Barrier barrier, bool do_mark_object>
oop G1ParCopyClosure<barrier, do_mark_object>
::copy_to_survivor_space(oop old) {
oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
size_t word_sz = old->size();
HeapRegion* from_region = _g1->heap_region_containing_raw(old);
HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
// +1 to make the -1 indexes valid...
int young_index = from_region->young_index_in_cset()+1;
assert( (from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
G1CollectorPolicy* g1p = _g1->g1_policy();
G1CollectorPolicy* g1p = _g1h->g1_policy();
markOop m = old->mark();
int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
: m->age();
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1->evacuation_should_fail()) {
if (_g1h->evacuation_should_fail()) {
if (obj_ptr != NULL) {
_par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
undo_allocation(alloc_purpose, obj_ptr, word_sz);
obj_ptr = NULL;
}
}
......@@ -4729,7 +4721,7 @@ oop G1ParCopyClosure<barrier, do_mark_object>
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
return _g1->handle_evacuation_failure_par(_par_scan_state, old);
return _g1h->handle_evacuation_failure_par(this, old);
}
oop obj = oop(obj_ptr);
......@@ -4762,12 +4754,12 @@ oop G1ParCopyClosure<barrier, do_mark_object>
m = m->incr_age();
obj->set_mark(m);
}
_par_scan_state->age_table()->add(obj, word_sz);
age_table()->add(obj, word_sz);
} else {
obj->set_mark(m);
}
size_t* surv_young_words = _par_scan_state->surviving_young_words();
size_t* surv_young_words = surviving_young_words();
surv_young_words[young_index] += word_sz;
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
......@@ -4776,15 +4768,15 @@ oop G1ParCopyClosure<barrier, do_mark_object>
// length field of the from-space object.
arrayOop(obj)->set_length(0);
oop* old_p = set_partial_array_mask(old);
_par_scan_state->push_on_queue(old_p);
push_on_queue(old_p);
} else {
// No point in using the slower heap_region_containing() method,
// given that we know obj is in the heap.
_scanner.set_region(_g1->heap_region_containing_raw(obj));
_scanner.set_region(_g1h->heap_region_containing_raw(obj));
obj->oop_iterate_backwards(&_scanner);
}
} else {
_par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
undo_allocation(alloc_purpose, obj_ptr, word_sz);
obj = forward_ptr;
}
return obj;
......@@ -4799,19 +4791,23 @@ void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
template <G1Barrier barrier, bool do_mark_object>
template <class T>
void G1ParCopyClosure<barrier, do_mark_object>
::do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (oopDesc::is_null(heap_oop)) {
return;
}
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
// here the null check is implicit in the cset_fast_test() test
if (_g1->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
forwardee = _par_scan_state->copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
......@@ -4828,12 +4824,12 @@ void G1ParCopyClosure<barrier, do_mark_object>
// The object is not in collection set. If we're a root scanning
// closure during an initial mark pause (i.e. do_mark_object will
// be true) then attempt to mark the object.
if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
if (do_mark_object) {
mark_object(obj);
}
}
if (barrier == G1BarrierEvac && obj != NULL) {
if (barrier == G1BarrierEvac) {
_par_scan_state->update_rs(_from, p, _worker_id);
}
}
......@@ -5030,7 +5026,7 @@ public:
ReferenceProcessor* rp = _g1h->ref_processor_stw();
G1ParScanThreadState pss(_g1h, worker_id);
G1ParScanThreadState pss(_g1h, worker_id, rp);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
......@@ -5473,7 +5469,7 @@ public:
G1STWIsAliveClosure is_alive(_g1h);
G1ParScanThreadState pss(_g1h, worker_id);
G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
......@@ -5585,7 +5581,7 @@ public:
ResourceMark rm;
HandleMark hm;
G1ParScanThreadState pss(_g1h, worker_id);
G1ParScanThreadState pss(_g1h, worker_id, NULL);
G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
......@@ -5711,7 +5707,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
// JNI refs.
// Use only a single queue for this PSS.
G1ParScanThreadState pss(this, 0);
G1ParScanThreadState pss(this, 0, NULL);
// We do not embed a reference processor in the copying/scanning
// closures while we're actually processing the discovered
......
......@@ -606,6 +606,11 @@ protected:
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
HeapRegion* alloc_region,
bool par,
size_t word_size);
// Ensure that no further allocations can happen in "r", bearing in mind
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
......@@ -698,23 +703,20 @@ public:
}
// This is a fast test on whether a reference points into the
// collection set or not. It does not assume that the reference
// points into the heap; if it doesn't, it will return false.
// collection set or not. Assume that the reference
// points into the heap.
bool in_cset_fast_test(oop obj) {
assert(_in_cset_fast_test != NULL, "sanity");
if (_g1_committed.contains((HeapWord*) obj)) {
// no need to subtract the bottom of the heap from obj,
// _in_cset_fast_test is biased
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
bool ret = _in_cset_fast_test[index];
// let's make sure the result is consistent with what the slower
// test returns
assert( ret || !obj_in_cs(obj), "sanity");
assert(!ret || obj_in_cs(obj), "sanity");
return ret;
} else {
return false;
}
assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
// no need to subtract the bottom of the heap from obj,
// _in_cset_fast_test is biased
uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
bool ret = _in_cset_fast_test[index];
// let's make sure the result is consistent with what the slower
// test returns
assert( ret || !obj_in_cs(obj), "sanity");
assert(!ret || obj_in_cs(obj), "sanity");
return ret;
}
void clear_cset_fast_test() {
......@@ -1786,95 +1788,6 @@ public:
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
bool is_retired() {
return _retired;
}
};
class G1ParGCAllocBufferContainer {
protected:
static int const _priority_max = 2;
G1ParGCAllocBuffer* _priority_buffer[_priority_max];
public:
G1ParGCAllocBufferContainer(size_t gclab_word_size) {
for (int pr = 0; pr < _priority_max; ++pr) {
_priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
}
}
~G1ParGCAllocBufferContainer() {
for (int pr = 0; pr < _priority_max; ++pr) {
assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
delete _priority_buffer[pr];
}
}
HeapWord* allocate(size_t word_sz) {
HeapWord* obj;
for (int pr = 0; pr < _priority_max; ++pr) {
obj = _priority_buffer[pr]->allocate(word_sz);
if (obj != NULL) return obj;
}
return obj;
}
bool contains(void* addr) {
for (int pr = 0; pr < _priority_max; ++pr) {
if (_priority_buffer[pr]->contains(addr)) return true;
}
return false;
}
void undo_allocation(HeapWord* obj, size_t word_sz) {
bool finish_undo;
for (int pr = 0; pr < _priority_max; ++pr) {
if (_priority_buffer[pr]->contains(obj)) {
_priority_buffer[pr]->undo_allocation(obj, word_sz);
finish_undo = true;
}
}
if (!finish_undo) ShouldNotReachHere();
}
size_t words_remaining() {
size_t result = 0;
for (int pr = 0; pr < _priority_max; ++pr) {
result += _priority_buffer[pr]->words_remaining();
}
return result;
}
size_t words_remaining_in_retired_buffer() {
G1ParGCAllocBuffer* retired = _priority_buffer[0];
return retired->words_remaining();
}
void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
for (int pr = 0; pr < _priority_max; ++pr) {
_priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
}
}
void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
retired_and_set->retire(end_of_gc, retain);
retired_and_set->set_buf(buf);
retired_and_set->set_word_size(word_sz);
adjust_priority_order();
}
private:
void adjust_priority_order() {
G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
int last = _priority_max - 1;
for (int pr = 0; pr < last; ++pr) {
_priority_buffer[pr] = _priority_buffer[pr + 1];
}
_priority_buffer[last] = retired_and_set;
}
};
class G1ParScanThreadState : public StackObj {
......@@ -1885,11 +1798,13 @@ protected:
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocBufferContainer _surviving_alloc_buffer;
G1ParGCAllocBufferContainer _tenured_alloc_buffer;
G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
ageTable _age_table;
G1ParScanClosure _scanner;
size_t _alloc_buffer_waste;
size_t _undo_waste;
......@@ -1942,7 +1857,7 @@ protected:
}
public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
~G1ParScanThreadState() {
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
......@@ -1951,7 +1866,7 @@ public:
RefToScanQueue* refs() { return _refs; }
ageTable* age_table() { return &_age_table; }
G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return _alloc_buffers[purpose];
}
......@@ -1981,13 +1896,15 @@ public:
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) return NULL; // Let caller handle allocation failure.
add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
......@@ -2077,6 +1994,8 @@ public:
}
}
oop copy_to_survivor_space(oop const obj);
template <class T> void deal_with_reference(T* ref_to_scan) {
if (has_partial_array_mask(ref_to_scan)) {
_partial_scan_cl->do_oop_nv(ref_to_scan);
......@@ -2099,6 +2018,7 @@ public:
}
}
public:
void trim_queue();
};
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
_cm(_g1->concurrent_mark()) {}
......@@ -48,12 +48,8 @@ public:
class G1ParClosureSuper : public OopsInHeapRegionClosure {
protected:
G1CollectedHeap* _g1;
G1RemSet* _g1_rem;
ConcurrentMark* _cm;
G1ParScanThreadState* _par_scan_state;
uint _worker_id;
bool _during_initial_mark;
bool _mark_in_progress;
public:
G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
bool apply_to_weak_ref_discovered_field() { return true; }
......@@ -133,23 +129,10 @@ public:
// Add back base class for metadata
class G1ParCopyHelper : public G1ParClosureSuper {
protected:
Klass* _scanned_klass;
ConcurrentMark* _cm;
public:
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
_scanned_klass(NULL),
G1ParClosureSuper(g1, par_scan_state) {}
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
template <class T> void do_klass_barrier(T* p, oop new_obj);
};
template <G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper {
G1ParScanClosure _scanner;
template <class T> void do_oop_work(T* p);
protected:
// Mark the object if it's not already marked. This is used to mark
// objects pointed to by roots that are guaranteed not to move
// during the GC (i.e., non-CSet objects). It is MT-safe.
......@@ -159,22 +142,26 @@ protected:
// objects pointed to by roots that have been forwarded during a
// GC. It is MT-safe.
void mark_forwarded_object(oop from_obj, oop to_obj);
public:
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
oop copy_to_survivor_space(oop obj);
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
template <class T> void do_klass_barrier(T* p, oop new_obj);
};
template <G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper {
private:
template <class T> void do_oop_work(T* p);
public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
ReferenceProcessor* rp) :
_scanner(g1, par_scan_state, rp),
G1ParCopyHelper(g1, par_scan_state) {
assert(_ref_processor == NULL, "sanity");
}
G1ParScanClosure* scanner() { return &_scanner; }
template <class T> void do_oop_nv(T* p) {
do_oop_work(p);
}
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
......
......@@ -82,7 +82,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
_par_scan_state->push_on_queue(p);
} else {
_par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
_par_scan_state->update_rs(_from, p, _worker_id);
}
}
}
......
......@@ -158,7 +158,7 @@ public:
// Fills in the unallocated portion of the buffer with a garbage object.
// If "end_of_gc" is TRUE, is after the last use in the GC. IF "retain"
// is true, attempt to re-use the unused portion in the next GC.
virtual void retire(bool end_of_gc, bool retain);
void retire(bool end_of_gc, bool retain);
void print() PRODUCT_RETURN;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册