提交 aea2819e 编写于 作者: J johnc

Merge

......@@ -1825,23 +1825,11 @@ void ConcurrentMark::completeCleanup() {
}
}
class G1CMIsAliveClosure: public BoolObjectClosure {
G1CollectedHeap* _g1;
public:
G1CMIsAliveClosure(G1CollectedHeap* g1) :
_g1(g1)
{}
void do_object(oop obj) {
assert(false, "not to be invoked");
}
bool do_object_b(oop obj) {
HeapWord* addr = (HeapWord*)obj;
return addr != NULL &&
(!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
}
};
bool G1CMIsAliveClosure::do_object_b(oop obj) {
HeapWord* addr = (HeapWord*)obj;
return addr != NULL &&
(!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
}
class G1CMKeepAliveClosure: public OopClosure {
G1CollectedHeap* _g1;
......@@ -1896,16 +1884,15 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
rp->setup_policy(clear_all_soft_refs);
assert(_markStack.isEmpty(), "mark stack should be empty");
G1CMIsAliveClosure g1IsAliveClosure (g1h);
G1CMKeepAliveClosure g1KeepAliveClosure(g1h, this, nextMarkBitMap());
G1CMIsAliveClosure g1_is_alive(g1h);
G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
G1CMDrainMarkingStackClosure
g1DrainMarkingStackClosure(nextMarkBitMap(), &_markStack,
&g1KeepAliveClosure);
g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
// XXXYYY Also: copy the parallel ref processing code from CMS.
rp->process_discovered_references(&g1IsAliveClosure,
&g1KeepAliveClosure,
&g1DrainMarkingStackClosure,
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
&g1_drain_mark_stack,
NULL);
assert(_markStack.overflow() || _markStack.isEmpty(),
"mark stack should be empty (unless it overflowed)");
......@@ -1918,8 +1905,8 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
assert(!rp->discovery_enabled(), "should have been disabled");
// Now clean up stale oops in SymbolTable and StringTable
SymbolTable::unlink(&g1IsAliveClosure);
StringTable::unlink(&g1IsAliveClosure);
SymbolTable::unlink(&g1_is_alive);
StringTable::unlink(&g1_is_alive);
}
void ConcurrentMark::swapMarkBitMaps() {
......
......@@ -33,6 +33,25 @@ class CMTask;
typedef GenericTaskQueue<oop> CMTaskQueue;
typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
// Closure used by CM during concurrent reference discovery
// and reference processing (during remarking) to determine
// if a particular object is alive. It is primarily used
// to determine if referents of discovered reference objects
// are alive. An instance is also embedded into the
// reference processor as the _is_alive_non_header field
class G1CMIsAliveClosure: public BoolObjectClosure {
G1CollectedHeap* _g1;
public:
G1CMIsAliveClosure(G1CollectedHeap* g1) :
_g1(g1)
{}
void do_object(oop obj) {
ShouldNotCallThis();
}
bool do_object_b(oop obj);
};
// A generic CM bit map. This is essentially a wrapper around the BitMap
// class, with one bit per (1<<_shifter) HeapWords.
......
......@@ -1192,6 +1192,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
return false;
}
DTraceGCProbeMarker gc_probe_marker(true /* full */);
ResourceMark rm;
if (PrintHeapAtGC) {
......@@ -1768,6 +1769,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_g1_policy(policy_),
_dirty_card_queue_set(false),
_into_cset_dirty_card_queue_set(false),
_is_alive_closure(this),
_ref_processor(NULL),
_process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
_bot_shared(NULL),
......@@ -2061,7 +2063,8 @@ void G1CollectedHeap::ref_processing_init() {
mr, // span
false, // Reference discovery is not atomic
true, // mt_discovery
NULL, // is alive closure: need to fill this in for efficiency
&_is_alive_closure, // is alive closure
// for efficiency
ParallelGCThreads,
ParallelRefProcEnabled,
true); // Setting next fields of discovered
......@@ -3211,13 +3214,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return false;
}
DTraceGCProbeMarker gc_probe_marker(false /* full */);
ResourceMark rm;
if (PrintHeapAtGC) {
Universe::print_heap_before_gc();
}
{
ResourceMark rm;
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
......@@ -3956,8 +3960,6 @@ void G1CollectedHeap::remove_self_forwarding_pointers() {
// Now restore saved marks, if any.
if (_objs_with_preserved_marks != NULL) {
assert(_preserved_marks_of_objs != NULL, "Both or none.");
assert(_objs_with_preserved_marks->length() ==
_preserved_marks_of_objs->length(), "Both or none.");
guarantee(_objs_with_preserved_marks->length() ==
_preserved_marks_of_objs->length(), "Both or none.");
for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
......@@ -4052,7 +4054,10 @@ void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
}
void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
if (m != markOopDesc::prototype()) {
assert(evacuation_failed(), "Oversaving!");
// We want to call the "for_promotion_failure" version only in the
// case of a promotion failure.
if (m->must_be_preserved_for_promotion_failure(obj)) {
if (_objs_with_preserved_marks == NULL) {
assert(_preserved_marks_of_objs == NULL, "Both or none.");
_objs_with_preserved_marks =
......
......@@ -849,6 +849,12 @@ protected:
void print_gc_alloc_regions();
#endif // !PRODUCT
// Instance of the concurrent mark is_alive closure for embedding
// into the reference processor as the is_alive_non_header. This
// prevents unnecessary additions to the discovered lists during
// concurrent discovery.
G1CMIsAliveClosure _is_alive_closure;
// ("Weak") Reference processing support
ReferenceProcessor* _ref_processor;
......@@ -893,7 +899,7 @@ public:
// specified by the policy object.
jint initialize();
void ref_processing_init();
virtual void ref_processing_init();
void set_par_threads(int t) {
SharedHeap::set_par_threads(t);
......
......@@ -1058,10 +1058,11 @@ bool ParNewGeneration::is_legal_forward_ptr(oop p) {
#endif
void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
if ((m != markOopDesc::prototype()) &&
(!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) {
if (m->must_be_preserved_for_promotion_failure(obj)) {
// We should really have separate per-worker stacks, rather
// than use locking of a common pair of stacks.
MutexLocker ml(ParGCRareEvent_lock);
DefNewGeneration::preserve_mark_if_necessary(obj, m);
preserve_mark(obj, m);
}
}
......
......@@ -694,6 +694,8 @@ void PSScavenge::clean_up_failed_promotion() {
void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
_promotion_failed = true;
if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
// Should use per-worker private stakcs hetre rather than
// locking a common pair of stacks.
ThreadCritical tc;
_preserved_oop_stack.push(obj);
_preserved_mark_stack.push(obj_mark);
......
......@@ -209,4 +209,15 @@ class VM_GenCollectForPermanentAllocation: public VM_GC_Operation {
HeapWord* result() const { return _res; }
};
class DTraceGCProbeMarker : public StackObj {
public:
DTraceGCProbeMarker(bool full) {
VM_GC_Operation::notify_gc_begin(full);
}
~DTraceGCProbeMarker() {
VM_GC_Operation::notify_gc_end();
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_VMGCOPERATIONS_HPP
......@@ -684,23 +684,28 @@ void DefNewGeneration::remove_forwarding_pointers() {
_preserved_marks_of_objs.clear(true);
}
void DefNewGeneration::preserve_mark(oop obj, markOop m) {
assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
"Oversaving!");
_objs_with_preserved_marks.push(obj);
_preserved_marks_of_objs.push(m);
}
void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
if (m->must_be_preserved_for_promotion_failure(obj)) {
_objs_with_preserved_marks.push(obj);
_preserved_marks_of_objs.push(m);
preserve_mark(obj, m);
}
}
void DefNewGeneration::handle_promotion_failure(oop old) {
preserve_mark_if_necessary(old, old->mark());
if (!_promotion_failed && PrintPromotionFailure) {
if (PrintPromotionFailure && !_promotion_failed) {
gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
old->size());
}
_promotion_failed = true;
preserve_mark_if_necessary(old, old->mark());
// forward to self
old->forward_to(old);
_promotion_failed = true;
_promo_failure_scan_stack.push(old);
......
......@@ -85,6 +85,7 @@ protected:
// Preserve the mark of "obj", if necessary, in preparation for its mark
// word being overwritten with a self-forwarding-pointer.
void preserve_mark_if_necessary(oop obj, markOop m);
void preserve_mark(oop obj, markOop m); // work routine used by the above
// Together, these keep <object with a preserved mark, mark value> pairs.
// They should always contain the same number of elements.
......
......@@ -30,7 +30,7 @@
#include "oops/markOop.hpp"
#include "runtime/globals.hpp"
// Should this header be preserved during GC?
// Should this header be preserved during GC (when biased locking is enabled)?
inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
if (has_bias_pattern()) {
......@@ -47,14 +47,15 @@ inline bool markOopDesc::must_be_preserved_with_bias(oop obj_containing_mark) co
return (!is_unlocked() || !has_no_hash());
}
// Should this header be preserved during GC?
inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias(obj_containing_mark);
}
// Should this header (including its age bits) be preserved in the
// case of a promotion failure during scavenge?
// Should this header be preserved in the case of a promotion failure
// during scavenge (when biased locking is enabled)?
inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// We don't explicitly save off the mark words of biased and
......@@ -70,18 +71,20 @@ inline bool markOopDesc::must_be_preserved_with_bias_for_promotion_failure(oop o
prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
return true;
}
return (this != prototype());
return (!is_unlocked() || !has_no_hash());
}
// Should this header be preserved in the case of a promotion failure
// during scavenge?
inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
if (!UseBiasedLocking)
return (this != prototype());
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
}
// Should this header (including its age bits) be preserved in the
// case of a scavenge in which CMS is the old generation?
// Same as must_be_preserved_with_bias_for_promotion_failure() except that
// it takes a klassOop argument, instead of the object of which this is the mark word.
inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const {
assert(UseBiasedLocking, "unexpected");
// CMS scavenges preserve mark words in similar fashion to promotion failures; see above
......@@ -89,11 +92,14 @@ inline bool markOopDesc::must_be_preserved_with_bias_for_cms_scavenge(klassOop k
klass_of_obj_containing_mark->klass_part()->prototype_header()->has_bias_pattern()) {
return true;
}
return (this != prototype());
return (!is_unlocked() || !has_no_hash());
}
// Same as must_be_preserved_for_promotion_failure() except that
// it takes a klassOop argument, instead of the object of which this is the mark word.
inline bool markOopDesc::must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const {
if (!UseBiasedLocking)
return (this != prototype());
return (!is_unlocked() || !has_no_hash());
return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册