提交 fbe7fbc8 编写于 作者: B brutisso

Merge

/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
......@@ -183,12 +184,11 @@ CMMarkStack::CMMarkStack(ConcurrentMark* cm) :
void CMMarkStack::allocate(size_t size) {
_base = NEW_C_HEAP_ARRAY(oop, size);
if (_base == NULL) {
vm_exit_during_initialization("Failed to allocate "
"CM region mark stack");
vm_exit_during_initialization("Failed to allocate CM region mark stack");
}
_index = 0;
_capacity = (jint) size;
_oops_do_bound = -1;
_saved_index = -1;
NOT_PRODUCT(_max_depth = 0);
}
......@@ -283,7 +283,6 @@ bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
}
}
CMRegionStack::CMRegionStack() : _base(NULL) {}
void CMRegionStack::allocate(size_t size) {
......@@ -302,6 +301,8 @@ CMRegionStack::~CMRegionStack() {
}
void CMRegionStack::push_lock_free(MemRegion mr) {
guarantee(false, "push_lock_free(): don't call this any more");
assert(mr.word_size() > 0, "Precondition");
while (true) {
jint index = _index;
......@@ -325,6 +326,8 @@ void CMRegionStack::push_lock_free(MemRegion mr) {
// marking / remark phases. Should only be called in tandem with
// other lock-free pops.
MemRegion CMRegionStack::pop_lock_free() {
guarantee(false, "pop_lock_free(): don't call this any more");
while (true) {
jint index = _index;
......@@ -390,6 +393,8 @@ MemRegion CMRegionStack::pop_with_lock() {
#endif
bool CMRegionStack::invalidate_entries_into_cset() {
guarantee(false, "invalidate_entries_into_cset(): don't call this any more");
bool result = false;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
for (int i = 0; i < _oops_do_bound; ++i) {
......@@ -438,14 +443,29 @@ bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) {
return res;
}
void CMMarkStack::note_start_of_gc() {
assert(_saved_index == -1,
"note_start_of_gc()/end_of_gc() bracketed incorrectly");
_saved_index = _index;
}
void CMMarkStack::note_end_of_gc() {
// This is intentionally a guarantee, instead of an assert. If we
// accidentally add something to the mark stack during GC, it
// will be a correctness issue so it's better if we crash. we'll
// only check this once per GC anyway, so it won't be a performance
// issue in any way.
guarantee(_saved_index == _index,
err_msg("saved index: %d index: %d", _saved_index, _index));
_saved_index = -1;
}
void CMMarkStack::oops_do(OopClosure* f) {
if (_index == 0) return;
assert(_oops_do_bound != -1 && _oops_do_bound <= _index,
"Bound must be set.");
for (int i = 0; i < _oops_do_bound; i++) {
assert(_saved_index == _index,
err_msg("saved index: %d index: %d", _saved_index, _index));
for (int i = 0; i < _index; i += 1) {
f->do_oop(&_base[i]);
}
_oops_do_bound = -1;
}
bool ConcurrentMark::not_yet_marked(oop obj) const {
......@@ -783,7 +803,7 @@ class NoteStartOfMarkHRClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion* r) {
if (!r->continuesHumongous()) {
r->note_start_of_marking(true);
r->note_start_of_marking();
}
return false;
}
......@@ -804,6 +824,10 @@ void ConcurrentMark::checkpointRootsInitialPre() {
// Initialise marking structures. This has to be done in a STW phase.
reset();
// For each region note start of marking.
NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl);
}
......@@ -818,10 +842,6 @@ void ConcurrentMark::checkpointRootsInitialPost() {
// every remark and we'll eventually not need to cause one.
force_overflow_stw()->init();
// For each region note start of marking.
NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl);
// Start Concurrent Marking weak-reference discovery.
ReferenceProcessor* rp = g1h->ref_processor_cm();
// enable ("weak") refs discovery
......@@ -946,22 +966,9 @@ bool ForceOverflowSettings::should_force() {
}
#endif // !PRODUCT
void ConcurrentMark::grayRoot(oop p) {
HeapWord* addr = (HeapWord*) p;
// We can't really check against _heap_start and _heap_end, since it
// is possible during an evacuation pause with piggy-backed
// initial-mark that the committed space is expanded during the
// pause without CM observing this change. So the assertions below
// is a bit conservative; but better than nothing.
assert(_g1h->g1_committed().contains(addr),
"address should be within the heap bounds");
if (!_nextMarkBitMap->isMarked(addr)) {
_nextMarkBitMap->parMark(addr);
}
}
void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
guarantee(false, "grayRegionIfNecessary(): don't call this any more");
// The objects on the region have already been marked "in bulk" by
// the caller. We only need to decide whether to push the region on
// the region stack or not.
......@@ -1007,6 +1014,8 @@ void ConcurrentMark::grayRegionIfNecessary(MemRegion mr) {
}
void ConcurrentMark::markAndGrayObjectIfNecessary(oop p) {
guarantee(false, "markAndGrayObjectIfNecessary(): don't call this any more");
// The object is not marked by the caller. We need to at least mark
// it and maybe push in on the stack.
......@@ -1224,7 +1233,6 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
true /* expected_active */);
if (VerifyDuringGC) {
HandleMark hm; // handle scope
gclog_or_tty->print(" VerifyDuringGC:(after)");
Universe::heap()->prepare_for_verify();
......@@ -1879,10 +1887,6 @@ void ConcurrentMark::cleanup() {
double end = os::elapsedTime();
_cleanup_times.add((end - start) * 1000.0);
// G1CollectedHeap::heap()->print();
// gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
// G1CollectedHeap::heap()->get_gc_time_stamp());
if (PrintGC || PrintGCDetails) {
g1h->print_size_transition(gclog_or_tty,
start_used_bytes,
......@@ -2669,6 +2673,8 @@ void ConcurrentMark::deal_with_reference(oop obj) {
}
void ConcurrentMark::drainAllSATBBuffers() {
guarantee(false, "drainAllSATBBuffers(): don't call this any more");
CMGlobalObjectClosure oc(this);
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
satb_mq_set.set_closure(&oc);
......@@ -2687,12 +2693,6 @@ void ConcurrentMark::drainAllSATBBuffers() {
assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
}
void ConcurrentMark::markPrev(oop p) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*)p);
}
void ConcurrentMark::clear(oop p) {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
......@@ -2702,13 +2702,21 @@ void ConcurrentMark::clear(oop p) {
_nextMarkBitMap->clear(addr);
}
void ConcurrentMark::clearRangeBothMaps(MemRegion mr) {
void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
}
void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
_nextMarkBitMap->clearRange(mr);
}
void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
clearRangePrevBitmap(mr);
clearRangeNextBitmap(mr);
}
HeapRegion*
ConcurrentMark::claim_region(int task_num) {
// "checkpoint" the finger
......@@ -2803,6 +2811,9 @@ ConcurrentMark::claim_region(int task_num) {
}
bool ConcurrentMark::invalidate_aborted_regions_in_cset() {
guarantee(false, "invalidate_aborted_regions_in_cset(): "
"don't call this any more");
bool result = false;
for (int i = 0; i < (int)_max_task_num; ++i) {
CMTask* the_task = _tasks[i];
......@@ -2854,24 +2865,135 @@ void ConcurrentMark::oops_do(OopClosure* cl) {
// ...then over the contents of the all the task queues.
queue->oops_do(cl);
}
}
#ifndef PRODUCT
enum VerifyNoCSetOopsPhase {
VerifyNoCSetOopsStack,
VerifyNoCSetOopsQueues,
VerifyNoCSetOopsSATBCompleted,
VerifyNoCSetOopsSATBThread
};
// Invalidate any entries, that are in the region stack, that
// point into the collection set
if (_regionStack.invalidate_entries_into_cset()) {
// otherwise, any gray objects copied during the evacuation pause
// might not be visited.
assert(_should_gray_objects, "invariant");
class VerifyNoCSetOopsClosure : public OopClosure, public ObjectClosure {
private:
G1CollectedHeap* _g1h;
VerifyNoCSetOopsPhase _phase;
int _info;
const char* phase_str() {
switch (_phase) {
case VerifyNoCSetOopsStack: return "Stack";
case VerifyNoCSetOopsQueues: return "Queue";
case VerifyNoCSetOopsSATBCompleted: return "Completed SATB Buffers";
case VerifyNoCSetOopsSATBThread: return "Thread SATB Buffers";
default: ShouldNotReachHere();
}
return NULL;
}
// Invalidate any aborted regions, recorded in the individual CM
// tasks, that point into the collection set.
if (invalidate_aborted_regions_in_cset()) {
// otherwise, any gray objects copied during the evacuation pause
// might not be visited.
assert(_should_gray_objects, "invariant");
void do_object_work(oop obj) {
guarantee(!_g1h->obj_in_cs(obj),
err_msg("obj: "PTR_FORMAT" in CSet, phase: %s, info: %d",
(void*) obj, phase_str(), _info));
}
public:
VerifyNoCSetOopsClosure() : _g1h(G1CollectedHeap::heap()) { }
void set_phase(VerifyNoCSetOopsPhase phase, int info = -1) {
_phase = phase;
_info = info;
}
virtual void do_oop(oop* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
do_object_work(obj);
}
virtual void do_oop(narrowOop* p) {
// We should not come across narrow oops while scanning marking
// stacks and SATB buffers.
ShouldNotReachHere();
}
virtual void do_object(oop obj) {
do_object_work(obj);
}
};
void ConcurrentMark::verify_no_cset_oops(bool verify_stacks,
bool verify_enqueued_buffers,
bool verify_thread_buffers,
bool verify_fingers) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
if (!G1CollectedHeap::heap()->mark_in_progress()) {
return;
}
VerifyNoCSetOopsClosure cl;
if (verify_stacks) {
// Verify entries on the global mark stack
cl.set_phase(VerifyNoCSetOopsStack);
_markStack.oops_do(&cl);
// Verify entries on the task queues
for (int i = 0; i < (int) _max_task_num; i += 1) {
cl.set_phase(VerifyNoCSetOopsQueues, i);
OopTaskQueue* queue = _task_queues->queue(i);
queue->oops_do(&cl);
}
}
SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
// Verify entries on the enqueued SATB buffers
if (verify_enqueued_buffers) {
cl.set_phase(VerifyNoCSetOopsSATBCompleted);
satb_qs.iterate_completed_buffers_read_only(&cl);
}
// Verify entries on the per-thread SATB buffers
if (verify_thread_buffers) {
cl.set_phase(VerifyNoCSetOopsSATBThread);
satb_qs.iterate_thread_buffers_read_only(&cl);
}
if (verify_fingers) {
// Verify the global finger
HeapWord* global_finger = finger();
if (global_finger != NULL && global_finger < _heap_end) {
// The global finger always points to a heap region boundary. We
// use heap_region_containing_raw() to get the containing region
// given that the global finger could be pointing to a free region
// which subsequently becomes continues humongous. If that
// happens, heap_region_containing() will return the bottom of the
// corresponding starts humongous region and the check below will
// not hold any more.
HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger);
guarantee(global_finger == global_hr->bottom(),
err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT,
global_finger, HR_FORMAT_PARAMS(global_hr)));
}
// Verify the task fingers
assert(parallel_marking_threads() <= _max_task_num, "sanity");
for (int i = 0; i < (int) parallel_marking_threads(); i += 1) {
CMTask* task = _tasks[i];
HeapWord* task_finger = task->finger();
if (task_finger != NULL && task_finger < _heap_end) {
// See above note on the global finger verification.
HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger);
guarantee(task_finger == task_hr->bottom() ||
!task_hr->in_collection_set(),
err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT,
task_finger, HR_FORMAT_PARAMS(task_hr)));
}
}
}
}
#endif // PRODUCT
void ConcurrentMark::clear_marking_state(bool clear_overflow) {
_markStack.setEmpty();
......@@ -3080,19 +3202,6 @@ public:
}
};
class SetClaimValuesInCSetHRClosure: public HeapRegionClosure {
jint _claim_value;
public:
SetClaimValuesInCSetHRClosure(jint claim_value) :
_claim_value(claim_value) { }
bool doHeapRegion(HeapRegion* hr) {
hr->set_claim_value(_claim_value);
return false;
}
};
class G1ParCompleteMarkInCSetTask: public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
......@@ -3112,6 +3221,9 @@ public:
};
void ConcurrentMark::complete_marking_in_collection_set() {
guarantee(false, "complete_marking_in_collection_set(): "
"don't call this any more");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (!g1h->mark_in_progress()) {
......@@ -3135,9 +3247,8 @@ void ConcurrentMark::complete_marking_in_collection_set() {
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::CompleteMarkCSetClaimValue), "sanity");
// Now reset the claim values in the regions in the collection set.
SetClaimValuesInCSetHRClosure set_cv_cl(HeapRegion::InitialClaimValue);
g1h->collection_set_iterate(&set_cv_cl);
// Reset the claim values in the regions in the collection set.
g1h->reset_cset_heap_region_claim_values();
assert(g1h->check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
......@@ -3160,6 +3271,8 @@ void ConcurrentMark::complete_marking_in_collection_set() {
// newCSet().
void ConcurrentMark::newCSet() {
guarantee(false, "newCSet(): don't call this any more");
if (!concurrent_marking_in_progress()) {
// nothing to do if marking is not in progress
return;
......@@ -3198,6 +3311,8 @@ void ConcurrentMark::newCSet() {
}
void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
guarantee(false, "registerCSetRegion(): don't call this any more");
if (!concurrent_marking_in_progress()) return;
HeapWord* region_end = hr->end();
......@@ -3209,6 +3324,9 @@ void ConcurrentMark::registerCSetRegion(HeapRegion* hr) {
// Resets the region fields of active CMTasks whose values point
// into the collection set.
void ConcurrentMark::reset_active_task_region_fields_in_cset() {
guarantee(false, "reset_active_task_region_fields_in_cset(): "
"don't call this any more");
assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
assert(parallel_marking_threads() <= _max_task_num, "sanity");
......@@ -3919,6 +4037,10 @@ void CMTask::drain_satb_buffers() {
}
void CMTask::drain_region_stack(BitMapClosure* bc) {
assert(_cm->region_stack_empty(), "region stack should be empty");
assert(_aborted_region.is_empty(), "aborted region should be empty");
return;
if (has_aborted()) return;
assert(_region_finger == NULL,
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -166,10 +166,10 @@ class CMBitMap : public CMBitMapRO {
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
class CMMarkStack VALUE_OBJ_CLASS_SPEC {
ConcurrentMark* _cm;
oop* _base; // bottom of stack
jint _index; // one more than last occupied index
jint _capacity; // max #elements
jint _oops_do_bound; // Number of elements to include in next iteration.
oop* _base; // bottom of stack
jint _index; // one more than last occupied index
jint _capacity; // max #elements
jint _saved_index; // value of _index saved at start of GC
NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
bool _overflow;
......@@ -247,16 +247,12 @@ class CMMarkStack VALUE_OBJ_CLASS_SPEC {
void setEmpty() { _index = 0; clear_overflow(); }
// Record the current size; a subsequent "oops_do" will iterate only over
// indices valid at the time of this call.
void set_oops_do_bound(jint bound = -1) {
if (bound == -1) {
_oops_do_bound = _index;
} else {
_oops_do_bound = bound;
}
}
jint oops_do_bound() { return _oops_do_bound; }
// Record the current index.
void note_start_of_gc();
// Make sure that we have not added any entries to the stack during GC.
void note_end_of_gc();
// iterate over the oops in the mark stack, up to the bound recorded via
// the call above.
void oops_do(OopClosure* f);
......@@ -724,10 +720,9 @@ public:
// G1CollectedHeap
// This notifies CM that a root during initial-mark needs to be
// grayed and it's MT-safe. Currently, we just mark it. But, in the
// future, we can experiment with pushing it on the stack and we can
// do this without changing G1CollectedHeap.
void grayRoot(oop p);
// grayed. It is MT-safe.
inline void grayRoot(oop obj, size_t word_size);
// It's used during evacuation pauses to gray a region, if
// necessary, and it's MT-safe. It assumes that the caller has
// marked any objects on that region. If _should_gray_objects is
......@@ -735,6 +730,7 @@ public:
// pushed on the region stack, if it is located below the global
// finger, otherwise we do nothing.
void grayRegionIfNecessary(MemRegion mr);
// It's used during evacuation pauses to mark and, if necessary,
// gray a single object and it's MT-safe. It assumes the caller did
// not mark the object. If _should_gray_objects is true and we're
......@@ -791,24 +787,40 @@ public:
// Mark in the previous bitmap. NB: this is usually read-only, so use
// this carefully!
void markPrev(oop p);
inline void markPrev(oop p);
inline void markNext(oop p);
void clear(oop p);
// Clears marks for all objects in the given range, for both prev and
// next bitmaps. NB: the previous bitmap is usually read-only, so use
// this carefully!
void clearRangeBothMaps(MemRegion mr);
// Clears marks for all objects in the given range, for the prev,
// next, or both bitmaps. NB: the previous bitmap is usually
// read-only, so use this carefully!
void clearRangePrevBitmap(MemRegion mr);
void clearRangeNextBitmap(MemRegion mr);
void clearRangeBothBitmaps(MemRegion mr);
// Notify data structures that a GC has started.
void note_start_of_gc() {
_markStack.note_start_of_gc();
}
// Record the current top of the mark and region stacks; a
// subsequent oops_do() on the mark stack and
// invalidate_entries_into_cset() on the region stack will iterate
// only over indices valid at the time of this call.
void set_oops_do_bound() {
_markStack.set_oops_do_bound();
_regionStack.set_oops_do_bound();
// Notify data structures that a GC is finished.
void note_end_of_gc() {
_markStack.note_end_of_gc();
}
// Iterate over the oops in the mark stack and all local queues. It
// also calls invalidate_entries_into_cset() on the region stack.
void oops_do(OopClosure* f);
// Verify that there are no CSet oops on the stacks (taskqueues /
// global mark stack), enqueued SATB buffers, per-thread SATB
// buffers, and fingers (global / per-task). The boolean parameters
// decide which of the above data structures to verify. If marking
// is not in progress, it's a no-op.
void verify_no_cset_oops(bool verify_stacks,
bool verify_enqueued_buffers,
bool verify_thread_buffers,
bool verify_fingers) PRODUCT_RETURN;
// It is called at the end of an evacuation pause during marking so
// that CM is notified of where the new end of the heap is. It
// doesn't do anything if concurrent_marking_in_progress() is false,
......@@ -1166,6 +1178,7 @@ public:
// It keeps picking SATB buffers and processing them until no SATB
// buffers are available.
void drain_satb_buffers();
// It keeps popping regions from the region stack and processing
// them until the region stack is empty.
void drain_region_stack(BitMapClosure* closure);
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -153,4 +153,46 @@ inline void CMTask::deal_with_reference(oop obj) {
}
}
inline void ConcurrentMark::markPrev(oop p) {
assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
// Note we are overriding the read-only view of the prev map here, via
// the cast.
((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
}
inline void ConcurrentMark::markNext(oop p) {
assert(!_nextMarkBitMap->isMarked((HeapWord*) p), "sanity");
_nextMarkBitMap->mark((HeapWord*) p);
}
inline void ConcurrentMark::grayRoot(oop obj, size_t word_size) {
HeapWord* addr = (HeapWord*) obj;
// Currently we don't do anything with word_size but we will use it
// in the very near future in the liveness calculation piggy-backing
// changes.
#ifdef ASSERT
HeapRegion* hr = _g1h->heap_region_containing(addr);
assert(hr != NULL, "sanity");
assert(!hr->is_survivor(), "should not allocate survivors during IM");
assert(addr < hr->next_top_at_mark_start(),
err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT,
addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
// We cannot assert that word_size == obj->size() given that obj
// might not be in a consistent state (another thread might be in
// the process of copying it). So the best thing we can do is to
// assert that word_size is under an upper bound which is its
// containing region's capacity.
assert(word_size * HeapWordSize <= hr->capacity(),
err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
word_size * HeapWordSize, hr->capacity(),
HR_FORMAT_PARAMS(hr)));
#endif // ASSERT
if (!_nextMarkBitMap->isMarked(addr)) {
_nextMarkBitMap->parMark(addr);
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -285,6 +285,14 @@ private:
// Typically, it is not full so we should re-use it during the next GC.
HeapRegion* _retained_old_gc_alloc_region;
// It specifies whether we should attempt to expand the heap after a
// region allocation failure. If heap expansion fails we set this to
// false so that we don't re-attempt the heap expansion (it's likely
// that subsequent expansion attempts will also fail if one fails).
// Currently, it is only consulted during GC and it's reset at the
// start of each GC.
bool _expand_heap_after_alloc_failure;
// It resets the mutator alloc region before new allocations can take place.
void init_mutator_alloc_region();
......@@ -861,8 +869,7 @@ protected:
void finalize_for_evac_failure();
// An attempt to evacuate "obj" has failed; take necessary steps.
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
bool should_mark_root);
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
void handle_evacuation_failure_common(oop obj, markOop m);
// ("Weak") Reference processing support.
......@@ -954,7 +961,7 @@ protected:
unsigned int* _worker_cset_start_region_time_stamp;
enum G1H_process_strong_roots_tasks {
G1H_PS_mark_stack_oops_do,
G1H_PS_filter_satb_buffers,
G1H_PS_refProcessor_oops_do,
// Leave this one last.
G1H_PS_NumElements
......@@ -1305,6 +1312,10 @@ public:
// It resets all the region claim values to the default.
void reset_heap_region_claim_values();
// Resets the claim values of regions in the current
// collection set to the default.
void reset_cset_heap_region_claim_values();
#ifdef ASSERT
bool check_heap_region_claim_values(jint claim_value);
......@@ -1740,10 +1751,8 @@ public:
_gclab_word_size(gclab_word_size),
_real_start_word(NULL),
_real_end_word(NULL),
_start_word(NULL)
{
guarantee( size_in_words() >= bitmap_size_in_words(),
"just making sure");
_start_word(NULL) {
guarantee(false, "GCLabBitMap::GCLabBitmap(): don't call this any more");
}
inline unsigned heapWordToOffset(HeapWord* addr) {
......@@ -1797,6 +1806,8 @@ public:
}
void set_buffer(HeapWord* start) {
guarantee(false, "set_buffer(): don't call this any more");
guarantee(use_local_bitmaps, "invariant");
clear();
......@@ -1820,6 +1831,8 @@ public:
#endif // PRODUCT
void retire() {
guarantee(false, "retire(): don't call this any more");
guarantee(use_local_bitmaps, "invariant");
assert(fields_well_formed(), "invariant");
......@@ -1853,32 +1866,18 @@ public:
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
bool _retired;
bool _should_mark_objects;
GCLabBitMap _bitmap;
public:
G1ParGCAllocBuffer(size_t gclab_word_size);
inline bool mark(HeapWord* addr) {
guarantee(use_local_bitmaps, "invariant");
assert(_should_mark_objects, "invariant");
return _bitmap.mark(addr);
}
inline void set_buf(HeapWord* buf) {
if (use_local_bitmaps && _should_mark_objects) {
_bitmap.set_buffer(buf);
}
void set_buf(HeapWord* buf) {
ParGCAllocBuffer::set_buf(buf);
_retired = false;
}
inline void retire(bool end_of_gc, bool retain) {
void retire(bool end_of_gc, bool retain) {
if (_retired)
return;
if (use_local_bitmaps && _should_mark_objects) {
_bitmap.retire();
}
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -281,7 +281,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
_par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
_par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
_par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
_par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_times_ms = new double[_parallel_gc_threads];
_par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
......@@ -905,10 +905,19 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
}
// We only need to do this here as the policy will only be applied
// to the GC we're about to start. so, no point is calculating this
// every time we calculate / recalculate the target young length.
update_survivors_policy();
if (!during_initial_mark_pause()) {
// We only need to do this here as the policy will only be applied
// to the GC we're about to start. so, no point is calculating this
// every time we calculate / recalculate the target young length.
update_survivors_policy();
} else {
// The marking phase has a "we only copy implicitly live
// objects during marking" invariant. The easiest way to ensure it
// holds is not to allocate any survivor regions and tenure all
// objects. In the future we might change this and handle survivor
// regions specially during marking.
tenure_all_objects();
}
assert(_g1->used() == _g1->recalculate_used(),
err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
......@@ -939,7 +948,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
for (int i = 0; i < _parallel_gc_threads; ++i) {
_par_last_gc_worker_start_times_ms[i] = -1234.0;
_par_last_ext_root_scan_times_ms[i] = -1234.0;
_par_last_mark_stack_scan_times_ms[i] = -1234.0;
_par_last_satb_filtering_times_ms[i] = -1234.0;
_par_last_update_rs_times_ms[i] = -1234.0;
_par_last_update_rs_processed_buffers[i] = -1234.0;
_par_last_scan_rs_times_ms[i] = -1234.0;
......@@ -1227,7 +1236,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
// of the PrintGCDetails output, in the non-parallel case.
double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
double update_rs_time = avg_value(_par_last_update_rs_times_ms);
double update_rs_processed_buffers =
sum_of_values(_par_last_update_rs_processed_buffers);
......@@ -1236,7 +1245,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
double termination_time = avg_value(_par_last_termination_times_ms);
double known_time = ext_root_scan_time +
mark_stack_scan_time +
satb_filtering_time +
update_rs_time +
scan_rs_time +
obj_copy_time;
......@@ -1282,7 +1291,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
body_summary->record_satb_filtering_time_ms(satb_filtering_time);
body_summary->record_update_rs_time_ms(update_rs_time);
body_summary->record_scan_rs_time_ms(scan_rs_time);
body_summary->record_obj_copy_time_ms(obj_copy_time);
......@@ -1376,16 +1385,12 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
(last_pause_included_initial_mark) ? " (initial-mark)" : "",
elapsed_ms / 1000.0);
if (print_marking_info) {
print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
}
if (parallel) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
if (print_marking_info) {
print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
}
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
......@@ -1399,7 +1404,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
_par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
_par_last_mark_stack_scan_times_ms[i] +
_par_last_satb_filtering_times_ms[i] +
_par_last_update_rs_times_ms[i] +
_par_last_scan_rs_times_ms[i] +
_par_last_obj_copy_times_ms[i] +
......@@ -1412,7 +1417,7 @@ void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
} else {
print_stats(1, "Ext Root Scanning", ext_root_scan_time);
if (print_marking_info) {
print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
print_stats(1, "SATB Filtering", satb_filtering_time);
}
print_stats(1, "Update RS", update_rs_time);
print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
......@@ -1983,11 +1988,10 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
if (summary->get_total_seq()->num() > 0) {
print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
if (body_summary != NULL) {
print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
if (parallel) {
print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
print_summary(2, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
print_summary(2, "Update RS", body_summary->get_update_rs_seq());
print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
......@@ -1996,7 +2000,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
{
NumberSeq* other_parts[] = {
body_summary->get_ext_root_scan_seq(),
body_summary->get_mark_stack_scan_seq(),
body_summary->get_satb_filtering_seq(),
body_summary->get_update_rs_seq(),
body_summary->get_scan_rs_seq(),
body_summary->get_obj_copy_seq(),
......@@ -2009,7 +2013,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
}
} else {
print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
print_summary(1, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
print_summary(1, "Update RS", body_summary->get_update_rs_seq());
print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
......@@ -2036,7 +2040,7 @@ void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
body_summary->get_satb_drain_seq(),
body_summary->get_update_rs_seq(),
body_summary->get_ext_root_scan_seq(),
body_summary->get_mark_stack_scan_seq(),
body_summary->get_satb_filtering_seq(),
body_summary->get_scan_rs_seq(),
body_summary->get_obj_copy_seq()
};
......@@ -2433,9 +2437,6 @@ void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
assert(_inc_cset_build_state == Active, "Precondition");
assert(!hr->is_young(), "non-incremental add of young region");
if (_g1->mark_in_progress())
_g1->concurrent_mark()->registerCSetRegion(hr);
assert(!hr->in_collection_set(), "should not already be in the CSet");
hr->set_in_collection_set(true);
hr->set_next_in_collection_set(_collection_set);
......@@ -2705,9 +2706,6 @@ void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
// Clear the fields that point to the survivor list - they are all young now.
young_list->clear_survivors();
if (_g1->mark_in_progress())
_g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
_collection_set = _inc_cset_head;
_collection_set_bytes_used_before = _inc_cset_bytes_used_before;
time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -67,7 +67,7 @@ class MainBodySummary: public CHeapObj {
define_num_seq(satb_drain) // optional
define_num_seq(parallel) // parallel only
define_num_seq(ext_root_scan)
define_num_seq(mark_stack_scan)
define_num_seq(satb_filtering)
define_num_seq(update_rs)
define_num_seq(scan_rs)
define_num_seq(obj_copy)
......@@ -215,7 +215,7 @@ private:
double* _par_last_gc_worker_start_times_ms;
double* _par_last_ext_root_scan_times_ms;
double* _par_last_mark_stack_scan_times_ms;
double* _par_last_satb_filtering_times_ms;
double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers;
double* _par_last_scan_rs_times_ms;
......@@ -841,8 +841,8 @@ public:
_par_last_ext_root_scan_times_ms[worker_i] = ms;
}
void record_mark_stack_scan_time(int worker_i, double ms) {
_par_last_mark_stack_scan_times_ms[worker_i] = ms;
void record_satb_filtering_time(int worker_i, double ms) {
_par_last_satb_filtering_times_ms[worker_i] = ms;
}
void record_satb_drain_time(double ms) {
......@@ -1146,6 +1146,11 @@ public:
_survivor_surv_rate_group->stop_adding_regions();
}
void tenure_all_objects() {
_max_survivor_regions = 0;
_tenuring_threshold = 0;
}
void record_survivor_regions(size_t regions,
HeapRegion* head,
HeapRegion* tail) {
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
#include "gc_implementation/g1/concurrentMark.inline.hpp"
#include "gc_implementation/g1/dirtyCardQueue.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1_globals.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "utilities/workgroup.hpp"
// Closures and tasks associated with any self-forwarding pointers
// installed as a result of an evacuation failure.
class UpdateRSetDeferred : public OopsInHeapRegionClosure {
private:
G1CollectedHeap* _g1;
DirtyCardQueue *_dcq;
CardTableModRefBS* _ct_bs;
public:
UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
_g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
assert(_from->is_in_reserved(p), "paranoia");
if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
!_from->is_survivor()) {
size_t card_index = _ct_bs->index_for(p);
if (_ct_bs->mark_card_deferred(card_index)) {
_dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
}
}
}
};
class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
private:
G1CollectedHeap* _g1;
ConcurrentMark* _cm;
HeapRegion* _hr;
size_t _marked_bytes;
OopsInHeapRegionClosure *_update_rset_cl;
bool _during_initial_mark;
bool _during_conc_mark;
public:
RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm,
HeapRegion* hr,
OopsInHeapRegionClosure* update_rset_cl,
bool during_initial_mark,
bool during_conc_mark) :
_g1(g1), _cm(cm), _hr(hr), _marked_bytes(0),
_update_rset_cl(update_rset_cl),
_during_initial_mark(during_initial_mark),
_during_conc_mark(during_conc_mark) { }
size_t marked_bytes() { return _marked_bytes; }
// <original comment>
// The original idea here was to coalesce evacuated and dead objects.
// However that caused complications with the block offset table (BOT).
// In particular if there were two TLABs, one of them partially refined.
// |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~|
// The BOT entries of the unrefined part of TLAB_2 point to the start
// of TLAB_2. If the last object of the TLAB_1 and the first object
// of TLAB_2 are coalesced, then the cards of the unrefined part
// would point into middle of the filler object.
// The current approach is to not coalesce and leave the BOT contents intact.
// </original comment>
//
// We now reset the BOT when we start the object iteration over the
// region and refine its entries for every object we come across. So
// the above comment is not really relevant and we should be able
// to coalesce dead objects if we want to.
void do_object(oop obj) {
HeapWord* obj_addr = (HeapWord*) obj;
assert(_hr->is_in(obj_addr), "sanity");
size_t obj_size = obj->size();
_hr->update_bot_for_object(obj_addr, obj_size);
if (obj->is_forwarded() && obj->forwardee() == obj) {
// The object failed to move.
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
_cm->markPrev(obj);
if (_during_initial_mark) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
// initial-mark (since, normally, we only mark objects pointed
// to by roots if we succeed in copying them). By marking all
// self-forwarded objects we ensure that we mark any that are
// still pointed to be roots. During concurrent marking, and
// after initial-mark, we don't need to mark any objects
// explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS.
_cm->markNext(obj);
}
_marked_bytes += (obj_size * HeapWordSize);
obj->set_mark(markOopDesc::prototype());
// While we were processing RSet buffers during the collection,
// we actually didn't scan any cards on the collection set,
// since we didn't want to update remembered sets with entries
// that point into the collection set, given that live objects
// from the collection set are about to move and such entries
// will be stale very soon.
// This change also dealt with a reliability issue which
// involved scanning a card in the collection set and coming
// across an array that was being chunked and looking malformed.
// The problem is that, if evacuation fails, we might have
// remembered set entries missing given that we skipped cards on
// the collection set. So, we'll recreate such entries now.
obj->oop_iterate(_update_rset_cl);
assert(_cm->isPrevMarked(obj), "Should be marked!");
} else {
// The object has been either evacuated or is dead. Fill it with a
// dummy object.
MemRegion mr((HeapWord*) obj, obj_size);
CollectedHeap::fill_with_object(mr);
}
}
};
class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
OopsInHeapRegionClosure *_update_rset_cl;
public:
RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
OopsInHeapRegionClosure* update_rset_cl) :
_g1h(g1h), _update_rset_cl(update_rset_cl),
_cm(_g1h->concurrent_mark()) { }
bool doHeapRegion(HeapRegion *hr) {
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
bool during_conc_mark = _g1h->mark_in_progress();
assert(!hr->isHumongous(), "sanity");
assert(hr->in_collection_set(), "bad CS");
if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
if (hr->evacuation_failed()) {
RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, _update_rset_cl,
during_initial_mark,
during_conc_mark);
MemRegion mr(hr->bottom(), hr->end());
// We'll recreate the prev marking info so we'll first clear
// the prev bitmap range for this region. We never mark any
// CSet objects explicitly so the next bitmap range should be
// cleared anyway.
_cm->clearRangePrevBitmap(mr);
hr->note_self_forwarding_removal_start(during_initial_mark,
during_conc_mark);
// In the common case (i.e. when there is no evacuation
// failure) we make sure that the following is done when
// the region is freed so that it is "ready-to-go" when it's
// re-allocated. However, when evacuation failure happens, a
// region will remain in the heap and might ultimately be added
// to a CSet in the future. So we have to be careful here and
// make sure the region's RSet is ready for parallel iteration
// whenever this might be required in the future.
hr->rem_set()->reset_for_par_iteration();
hr->reset_bot();
_update_rset_cl->set_region(hr);
hr->object_iterate(&rspc);
hr->note_self_forwarding_removal_end(during_initial_mark,
during_conc_mark,
rspc.marked_bytes());
}
}
return false;
}
};
class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
public:
G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
AbstractGangTask("G1 Remove Self-forwarding Pointers"),
_g1h(g1h) { }
void work(uint worker_id) {
UpdateRSetImmediate immediate_update(_g1h->g1_rem_set());
DirtyCardQueue dcq(&_g1h->dirty_card_queue_set());
UpdateRSetDeferred deferred_update(_g1h, &dcq);
OopsInHeapRegionClosure *update_rset_cl = &deferred_update;
if (!G1DeferredRSUpdate) {
update_rset_cl = &immediate_update;
}
RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, update_rset_cl);
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
_g1h->collection_set_iterate_from(hr, &rsfp_cl);
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -121,17 +121,25 @@ public:
class G1ParCopyHelper : public G1ParClosureSuper {
G1ParScanClosure *_scanner;
protected:
template <class T> void mark_object(T* p);
oop copy_to_survivor_space(oop obj, bool should_mark_root,
bool should_mark_copy);
// Mark the object if it's not already marked. This is used to mark
// objects pointed to by roots that are guaranteed not to move
// during the GC (i.e., non-CSet objects). It is MT-safe.
void mark_object(oop obj);
// Mark the object if it's not already marked. This is used to mark
// objects pointed to by roots that have been forwarded during a
// GC. It is MT-safe.
void mark_forwarded_object(oop from_obj, oop to_obj);
oop copy_to_survivor_space(oop obj);
public:
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
G1ParScanClosure *scanner) :
G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
};
template<bool do_gen_barrier, G1Barrier barrier,
bool do_mark_object>
template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper {
G1ParScanClosure _scanner;
......@@ -140,9 +148,8 @@ class G1ParCopyClosure : public G1ParCopyHelper {
public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
ReferenceProcessor* rp) :
_scanner(g1, par_scan_state, rp),
G1ParCopyHelper(g1, par_scan_state, &_scanner)
{
_scanner(g1, par_scan_state, rp),
G1ParCopyHelper(g1, par_scan_state, &_scanner) {
assert(_ref_processor == NULL, "sanity");
}
......
......@@ -295,7 +295,7 @@
"Percentage (0-100) of the heap size to use as minimum " \
"young gen size.") \
\
develop(uintx, G1DefaultMaxNewGenPercent, 50, \
develop(uintx, G1DefaultMaxNewGenPercent, 80, \
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.")
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -575,6 +575,40 @@ void HeapRegion::oop_before_save_marks_iterate(OopClosure* cl) {
oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
}
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
bool during_conc_mark) {
// We always recreate the prev marking info and we'll explicitly
// mark all objects we find to be self-forwarded on the prev
// bitmap. So all objects need to be below PTAMS.
_prev_top_at_mark_start = top();
_prev_marked_bytes = 0;
if (during_initial_mark) {
// During initial-mark, we'll also explicitly mark all objects
// we find to be self-forwarded on the next bitmap. So all
// objects need to be below NTAMS.
_next_top_at_mark_start = top();
set_top_at_conc_mark_count(bottom());
_next_marked_bytes = 0;
} else if (during_conc_mark) {
// During concurrent mark, all objects in the CSet (including
// the ones we find to be self-forwarded) are implicitly live.
// So all objects need to be above NTAMS.
_next_top_at_mark_start = bottom();
set_top_at_conc_mark_count(bottom());
_next_marked_bytes = 0;
}
}
void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
bool during_conc_mark,
size_t marked_bytes) {
assert(0 <= marked_bytes && marked_bytes <= used(),
err_msg("marked: "SIZE_FORMAT" used: "SIZE_FORMAT,
marked_bytes, used()));
_prev_marked_bytes = marked_bytes;
}
HeapWord*
HeapRegion::object_iterate_mem_careful(MemRegion mr,
ObjectClosure* cl) {
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -373,7 +373,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
ScrubRemSetClaimValue = 3,
ParVerifyClaimValue = 4,
RebuildRSClaimValue = 5,
CompleteMarkCSetClaimValue = 6
CompleteMarkCSetClaimValue = 6,
ParEvacFailureClaimValue = 7
};
inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
......@@ -582,37 +583,33 @@ class HeapRegion: public G1OffsetTableContigSpace {
// that the collector is about to start or has finished (concurrently)
// marking the heap.
// Note the start of a marking phase. Record the
// start of the unmarked area of the region here.
void note_start_of_marking(bool during_initial_mark) {
init_top_at_conc_mark_count();
_next_marked_bytes = 0;
if (during_initial_mark && is_young() && !is_survivor())
_next_top_at_mark_start = bottom();
else
_next_top_at_mark_start = top();
}
// Note the end of a marking phase. Install the start of
// the unmarked area that was captured at start of marking.
void note_end_of_marking() {
_prev_top_at_mark_start = _next_top_at_mark_start;
_prev_marked_bytes = _next_marked_bytes;
_next_marked_bytes = 0;
guarantee(_prev_marked_bytes <=
(size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
"invariant");
}
// After an evacuation, we need to update _next_top_at_mark_start
// to be the current top. Note this is only valid if we have only
// ever evacuated into this region. If we evacuate, allocate, and
// then evacuate we are in deep doodoo.
void note_end_of_copying() {
assert(top() >= _next_top_at_mark_start, "Increase only");
_next_top_at_mark_start = top();
}
// Notify the region that concurrent marking is starting. Initialize
// all fields related to the next marking info.
inline void note_start_of_marking();
// Notify the region that concurrent marking has finished. Copy the
// (now finalized) next marking info fields into the prev marking
// info fields.
inline void note_end_of_marking();
// Notify the region that it will be used as to-space during a GC
// and we are about to start copying objects into it.
inline void note_start_of_copying(bool during_initial_mark);
// Notify the region that it ceases being to-space during a GC and
// we will not copy objects into it any more.
inline void note_end_of_copying(bool during_initial_mark);
// Notify the region that we are about to start processing
// self-forwarded objects during evac failure handling.
void note_self_forwarding_removal_start(bool during_initial_mark,
bool during_conc_mark);
// Notify the region that we have finished processing self-forwarded
// objects during evac failure handling.
void note_self_forwarding_removal_end(bool during_initial_mark,
bool during_conc_mark,
size_t marked_bytes);
// Returns "false" iff no object in the region was allocated when the
// last mark phase ended.
......
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -55,4 +55,71 @@ G1OffsetTableContigSpace::block_start_const(const void* p) const {
return _offsets.block_start_const(p);
}
inline void HeapRegion::note_start_of_marking() {
init_top_at_conc_mark_count();
_next_marked_bytes = 0;
_next_top_at_mark_start = top();
}
inline void HeapRegion::note_end_of_marking() {
_prev_top_at_mark_start = _next_top_at_mark_start;
_prev_marked_bytes = _next_marked_bytes;
_next_marked_bytes = 0;
assert(_prev_marked_bytes <=
(size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
HeapWordSize, "invariant");
}
inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
if (during_initial_mark) {
if (is_survivor()) {
assert(false, "should not allocate survivors during IM");
} else {
// During initial-mark we'll explicitly mark any objects on old
// regions that are pointed to by roots. Given that explicit
// marks only make sense under NTAMS it'd be nice if we could
// check that condition if we wanted to. Given that we don't
// know where the top of this region will end up, we simply set
// NTAMS to the end of the region so all marks will be below
// NTAMS. We'll set it to the actual top when we retire this region.
_next_top_at_mark_start = end();
}
} else {
if (is_survivor()) {
// This is how we always allocate survivors.
assert(_next_top_at_mark_start == bottom(), "invariant");
} else {
// We could have re-used this old region as to-space over a
// couple of GCs since the start of the concurrent marking
// cycle. This means that [bottom,NTAMS) will contain objects
// copied up to and including initial-mark and [NTAMS, top)
// will contain objects copied during the concurrent marking cycle.
assert(top() >= _next_top_at_mark_start, "invariant");
}
}
}
inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
if (during_initial_mark) {
if (is_survivor()) {
assert(false, "should not allocate survivors during IM");
} else {
// See the comment for note_start_of_copying() for the details
// on this.
assert(_next_top_at_mark_start == end(), "pre-condition");
_next_top_at_mark_start = top();
}
} else {
if (is_survivor()) {
// This is how we always allocate survivors.
assert(_next_top_at_mark_start == bottom(), "invariant");
} else {
// See the comment for note_start_of_copying() for the details
// on this.
assert(top() >= _next_top_at_mark_start, "invariant");
}
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -70,7 +70,7 @@ public:
// given PtrQueueSet.
PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
// Release any contained resources.
void flush();
virtual void flush();
// Calls flush() when destroyed.
~PtrQueue() { flush(); }
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -31,6 +31,14 @@
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
void ObjPtrQueue::flush() {
// The buffer might contain refs into the CSet. We have to filter it
// first before we flush it, otherwise we might end up with an
// enqueued buffer with refs into the CSet which breaks our invariants.
filter();
PtrQueue::flush();
}
// This method removes entries from an SATB buffer that will not be
// useful to the concurrent marking threads. An entry is removed if it
// satisfies one of the following conditions:
......@@ -44,38 +52,27 @@
// process it again).
//
// The rest of the entries will be retained and are compacted towards
// the top of the buffer. If with this filtering we clear a large
// enough chunk of the buffer we can re-use it (instead of enqueueing
// it) and we can just allow the mutator to carry on executing.
bool ObjPtrQueue::should_enqueue_buffer() {
assert(_lock == NULL || _lock->owned_by_self(),
"we should have taken the lock before calling this");
// A value of 0 means "don't filter SATB buffers".
if (G1SATBBufferEnqueueingThresholdPercent == 0) {
return true;
}
// the top of the buffer. Note that, because we do not allow old
// regions in the CSet during marking, all objects on the CSet regions
// are young (eden or survivors) and therefore implicitly live. So any
// references into the CSet will be removed during filtering.
void ObjPtrQueue::filter() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// This method should only be called if there is a non-NULL buffer
// that is full.
assert(_index == 0, "pre-condition");
assert(_buf != NULL, "pre-condition");
void** buf = _buf;
size_t sz = _sz;
if (buf == NULL) {
// nothing to do
return;
}
// Used for sanity checking at the end of the loop.
debug_only(size_t entries = 0; size_t retained = 0;)
size_t i = sz;
size_t new_index = sz;
// Given that we are expecting _index == 0, we could have changed
// the loop condition to (i > 0). But we are using _index for
// generality.
while (i > _index) {
assert(i > 0, "we should have at least one more entry to process");
i -= oopSize;
......@@ -103,20 +100,56 @@ bool ObjPtrQueue::should_enqueue_buffer() {
debug_only(retained += 1;)
}
}
#ifdef ASSERT
size_t entries_calc = (sz - _index) / oopSize;
assert(entries == entries_calc, "the number of entries we counted "
"should match the number of entries we calculated");
size_t retained_calc = (sz - new_index) / oopSize;
assert(retained == retained_calc, "the number of retained entries we counted "
"should match the number of retained entries we calculated");
size_t perc = retained_calc * 100 / entries_calc;
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
#endif // ASSERT
_index = new_index;
}
// This method will first apply the above filtering to the buffer. If
// post-filtering a large enough chunk of the buffer has been cleared
// we can re-use the buffer (instead of enqueueing it) and we can just
// allow the mutator to carry on executing using the same buffer
// instead of replacing it.
bool ObjPtrQueue::should_enqueue_buffer() {
assert(_lock == NULL || _lock->owned_by_self(),
"we should have taken the lock before calling this");
// Even if G1SATBBufferEnqueueingThresholdPercent == 0 we have to
// filter the buffer given that this will remove any references into
// the CSet as we currently assume that no such refs will appear in
// enqueued buffers.
// This method should only be called if there is a non-NULL buffer
// that is full.
assert(_index == 0, "pre-condition");
assert(_buf != NULL, "pre-condition");
filter();
size_t sz = _sz;
size_t all_entries = sz / oopSize;
size_t retained_entries = (sz - _index) / oopSize;
size_t perc = retained_entries * 100 / all_entries;
bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
return should_enqueue;
}
void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz);
}
}
void ObjPtrQueue::apply_closure_and_empty(ObjectClosure* cl) {
if (_buf != NULL) {
apply_closure_to_buffer(cl, _buf, _index, _sz);
_index = _sz;
......@@ -135,6 +168,21 @@ void ObjPtrQueue::apply_closure_to_buffer(ObjectClosure* cl,
}
}
#ifndef PRODUCT
// Helpful for debugging
void ObjPtrQueue::print(const char* name) {
print(name, _buf, _index, _sz);
}
void ObjPtrQueue::print(const char* name,
void** buf, size_t index, size_t sz) {
gclog_or_tty->print_cr(" SATB BUFFER [%s] buf: "PTR_FORMAT" "
"index: "SIZE_FORMAT" sz: "SIZE_FORMAT,
name, buf, index, sz);
}
#endif // PRODUCT
#ifdef ASSERT
void ObjPtrQueue::verify_oops_in_buffer() {
if (_buf == NULL) return;
......@@ -150,12 +198,9 @@ void ObjPtrQueue::verify_oops_in_buffer() {
#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
#endif // _MSC_VER
SATBMarkQueueSet::SATBMarkQueueSet() :
PtrQueueSet(),
_closure(NULL), _par_closures(NULL),
_shared_satb_queue(this, true /*perm*/)
{}
PtrQueueSet(), _closure(NULL), _par_closures(NULL),
_shared_satb_queue(this, true /*perm*/) { }
void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
int process_completed_threshold,
......@@ -167,7 +212,6 @@ void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
}
}
void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) {
DEBUG_ONLY(t->satb_mark_queue().verify_oops_in_buffer();)
t->satb_mark_queue().handle_zero_index();
......@@ -228,6 +272,13 @@ void SATBMarkQueueSet::set_active_all_threads(bool b,
}
}
void SATBMarkQueueSet::filter_thread_buffers() {
for(JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().filter();
}
shared_satb_queue()->filter();
}
void SATBMarkQueueSet::set_closure(ObjectClosure* closure) {
_closure = closure;
}
......@@ -239,9 +290,9 @@ void SATBMarkQueueSet::set_par_closure(int i, ObjectClosure* par_closure) {
void SATBMarkQueueSet::iterate_closure_all_threads() {
for(JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().apply_closure(_closure);
t->satb_mark_queue().apply_closure_and_empty(_closure);
}
shared_satb_queue()->apply_closure(_closure);
shared_satb_queue()->apply_closure_and_empty(_closure);
}
void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
......@@ -250,7 +301,7 @@ void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
for(JavaThread* t = Threads::first(); t; t = t->next()) {
if (t->claim_oops_do(true, parity)) {
t->satb_mark_queue().apply_closure(_par_closures[worker]);
t->satb_mark_queue().apply_closure_and_empty(_par_closures[worker]);
}
}
......@@ -264,7 +315,7 @@ void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_oops_do(true, parity)) {
shared_satb_queue()->apply_closure(_par_closures[worker]);
shared_satb_queue()->apply_closure_and_empty(_par_closures[worker]);
}
}
......@@ -292,6 +343,61 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
}
}
void SATBMarkQueueSet::iterate_completed_buffers_read_only(ObjectClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
assert(cl != NULL, "pre-condition");
BufferNode* nd = _completed_buffers_head;
while (nd != NULL) {
void** buf = BufferNode::make_buffer_from_node(nd);
ObjPtrQueue::apply_closure_to_buffer(cl, buf, 0, _sz);
nd = nd->next();
}
}
void SATBMarkQueueSet::iterate_thread_buffers_read_only(ObjectClosure* cl) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
assert(cl != NULL, "pre-condition");
for (JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().apply_closure(cl);
}
shared_satb_queue()->apply_closure(cl);
}
#ifndef PRODUCT
// Helpful for debugging
#define SATB_PRINTER_BUFFER_SIZE 256
void SATBMarkQueueSet::print_all(const char* msg) {
char buffer[SATB_PRINTER_BUFFER_SIZE];
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
gclog_or_tty->cr();
gclog_or_tty->print_cr("SATB BUFFERS [%s]", msg);
BufferNode* nd = _completed_buffers_head;
int i = 0;
while (nd != NULL) {
void** buf = BufferNode::make_buffer_from_node(nd);
jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i);
ObjPtrQueue::print(buffer, buf, 0, _sz);
nd = nd->next();
i += 1;
}
for (JavaThread* t = Threads::first(); t; t = t->next()) {
jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name());
t->satb_mark_queue().print(buffer);
}
shared_satb_queue()->print("Shared");
gclog_or_tty->cr();
}
#endif // PRODUCT
void SATBMarkQueueSet::abandon_partial_marking() {
BufferNode* buffers_to_delete = NULL;
{
......@@ -316,5 +422,5 @@ void SATBMarkQueueSet::abandon_partial_marking() {
for (JavaThread* t = Threads::first(); t; t = t->next()) {
t->satb_mark_queue().reset();
}
shared_satb_queue()->reset();
shared_satb_queue()->reset();
}
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -29,9 +29,26 @@
class ObjectClosure;
class JavaThread;
class SATBMarkQueueSet;
// A ptrQueue whose elements are "oops", pointers to object heads.
class ObjPtrQueue: public PtrQueue {
friend class SATBMarkQueueSet;
private:
// Filter out unwanted entries from the buffer.
void filter();
// Apply the closure to all elements.
void apply_closure(ObjectClosure* cl);
// Apply the closure to all elements and empty the buffer;
void apply_closure_and_empty(ObjectClosure* cl);
// Apply the closure to all elements of "buf", down to "index" (inclusive.)
static void apply_closure_to_buffer(ObjectClosure* cl,
void** buf, size_t index, size_t sz);
public:
ObjPtrQueue(PtrQueueSet* qset, bool perm = false) :
// SATB queues are only active during marking cycles. We create
......@@ -41,23 +58,23 @@ public:
// field to true. This is done in JavaThread::initialize_queues().
PtrQueue(qset, perm, false /* active */) { }
// Overrides PtrQueue::flush() so that it can filter the buffer
// before it is flushed.
virtual void flush();
// Overrides PtrQueue::should_enqueue_buffer(). See the method's
// definition for more information.
virtual bool should_enqueue_buffer();
// Apply the closure to all elements, and reset the index to make the
// buffer empty.
void apply_closure(ObjectClosure* cl);
// Apply the closure to all elements of "buf", down to "index" (inclusive.)
static void apply_closure_to_buffer(ObjectClosure* cl,
void** buf, size_t index, size_t sz);
#ifndef PRODUCT
// Helpful for debugging
void print(const char* name);
static void print(const char* name, void** buf, size_t index, size_t sz);
#endif // PRODUCT
void verify_oops_in_buffer() NOT_DEBUG_RETURN;
};
class SATBMarkQueueSet: public PtrQueueSet {
ObjectClosure* _closure;
ObjectClosure** _par_closures; // One per ParGCThread.
......@@ -88,6 +105,9 @@ public:
// set itself, has an active value same as expected_active.
void set_active_all_threads(bool b, bool expected_active);
// Filter all the currently-active SATB buffers.
void filter_thread_buffers();
// Register "blk" as "the closure" for all queues. Only one such closure
// is allowed. The "apply_closure_to_completed_buffer" method will apply
// this closure to a completed buffer, and "iterate_closure_all_threads"
......@@ -98,10 +118,9 @@ public:
// closures, one for each parallel GC thread.
void set_par_closure(int i, ObjectClosure* closure);
// If there is a registered closure for buffers, apply it to all entries
// in all currently-active buffers. This should only be applied at a
// safepoint. (Currently must not be called in parallel; this should
// change in the future.)
// Apply the registered closure to all entries on each
// currently-active buffer and then empty the buffer. It should only
// be called serially and at a safepoint.
void iterate_closure_all_threads();
// Parallel version of the above.
void par_iterate_closure_all_threads(int worker);
......@@ -117,11 +136,21 @@ public:
return apply_closure_to_completed_buffer_work(true, worker);
}
// Apply the given closure on enqueued and currently-active buffers
// respectively. Both methods are read-only, i.e., they do not
// modify any of the buffers.
void iterate_completed_buffers_read_only(ObjectClosure* cl);
void iterate_thread_buffers_read_only(ObjectClosure* cl);
#ifndef PRODUCT
// Helpful for debugging
void print_all(const char* msg);
#endif // PRODUCT
ObjPtrQueue* shared_satb_queue() { return &_shared_satb_queue; }
// If a marking is being abandoned, reset any unprocessed log buffers.
void abandon_partial_marking();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_SATBQUEUE_HPP
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册