提交 b2b3a5ba 编写于 作者: J johnc

7133038: G1: Some small profile based optimizations

Summary: Some minor profile based optimizations. Reduce the number of branches and branch mispredicts by removing some virtual calls, through closure specalization, and refactoring some conditional statements.
Reviewed-by: brutisso, tonyp
上级 b17696fe
......@@ -246,4 +246,77 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
// Closure that applies the given two closures in sequence.
// Used by the RSet refinement code (when updating RSets
// during an evacuation pause) to record cards containing
// pointers into the collection set.
class G1Mux2Closure : public OopClosure {
OopClosure* _c1;
OopClosure* _c2;
public:
G1Mux2Closure(OopClosure *c1, OopClosure *c2);
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
// A closure that returns true if it is actually applied
// to a reference
class G1TriggerClosure : public OopClosure {
bool _triggered;
public:
G1TriggerClosure();
bool triggered() const { return _triggered; }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
// A closure which uses a triggering closure to determine
// whether to apply an oop closure.
class G1InvokeIfNotTriggeredClosure: public OopClosure {
G1TriggerClosure* _trigger_cl;
OopClosure* _oop_cl;
public:
G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
class G1UpdateRSOrPushRefOopClosure: public OopClosure {
G1CollectedHeap* _g1;
G1RemSet* _g1_rem_set;
HeapRegion* _from;
OopsInHeapRegionClosure* _push_ref_cl;
bool _record_refs_into_cset;
int _worker_i;
public:
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
bool record_refs_into_cset,
int worker_i = 0);
void set_from(HeapRegion* from) {
assert(from != NULL, "from region must be non-NULL");
_from = from;
}
bool self_forwarded(oop obj) {
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
return result;
}
bool apply_to_weak_ref_discovered_field() { return true; }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
virtual void do_oop(oop* p) { do_oop_nv(p); }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
......@@ -142,4 +142,85 @@ inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
}
}
template <class T>
inline void G1Mux2Closure::do_oop_nv(T* p) {
// Apply first closure; then apply the second.
_c1->do_oop(p);
_c2->do_oop(p);
}
template <class T>
inline void G1TriggerClosure::do_oop_nv(T* p) {
// Record that this closure was actually applied (triggered).
_triggered = true;
}
template <class T>
inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
if (!_trigger_cl->triggered()) {
_oop_cl->do_oop(p);
}
}
template <class T>
inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
#ifdef ASSERT
// can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop");
// Do the safe subset of is_oop
if (obj != NULL) {
#ifdef CHECK_UNHANDLED_OOPS
oopDesc* o = obj.obj();
#else
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
}
#endif // ASSERT
assert(_from != NULL, "from region must be non-NULL");
HeapRegion* to = _g1->heap_region_containing(obj);
if (to != NULL && _from != to) {
// The _record_refs_into_cset flag is true during the RSet
// updating part of an evacuation pause. It is false at all
// other times:
// * rebuilding the rembered sets after a full GC
// * during concurrent refinement.
// * updating the remembered sets of regions in the collection
// set in the event of an evacuation failure (when deferred
// updates are enabled).
if (_record_refs_into_cset && to->in_collection_set()) {
// We are recording references that point into the collection
// set and this particular reference does exactly that...
// If the referenced object has already been forwarded
// to itself, we are handling an evacuation failure and
// we have already visited/tried to copy this object
// there is no need to retry.
if (!self_forwarded(obj)) {
assert(_push_ref_cl != NULL, "should not be null");
// Push the reference in the refs queue of the G1ParScanThreadState
// instance for this worker thread.
_push_ref_cl->do_oop(p);
}
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
} else {
// We either don't care about pushing references that point into the
// collection set (i.e. we're not during an evacuation pause) _or_
// the reference doesn't point into the collection set. Either way
// we add the reference directly to the RSet of the region containing
// the referenced object.
_g1_rem_set->par_write_ref(_from, p, _worker_i);
}
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -569,40 +569,26 @@ void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
static IntHistogram out_of_histo(50, 50);
class TriggerClosure : public OopClosure {
bool _trigger;
public:
TriggerClosure() : _trigger(false) { }
bool value() const { return _trigger; }
template <class T> void do_oop_nv(T* p) { _trigger = true; }
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
class InvokeIfNotTriggeredClosure: public OopClosure {
TriggerClosure* _t;
OopClosure* _oc;
public:
InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
_t(t), _oc(oc) { }
template <class T> void do_oop_nv(T* p) {
if (!_t->value()) _oc->do_oop(p);
}
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
G1TriggerClosure::G1TriggerClosure() :
_triggered(false) { }
class Mux2Closure : public OopClosure {
OopClosure* _c1;
OopClosure* _c2;
public:
Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
template <class T> void do_oop_nv(T* p) {
_c1->do_oop(p); _c2->do_oop(p);
}
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl,
OopClosure* oop_cl) :
_trigger_cl(t_cl), _oop_cl(oop_cl) { }
G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
_c1(c1), _c2(c2) { }
G1UpdateRSOrPushRefOopClosure::
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
bool record_refs_into_cset,
int worker_i) :
_g1(g1h), _g1_rem_set(rs), _from(NULL),
_record_refs_into_cset(record_refs_into_cset),
_push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
bool check_for_refs_into_cset) {
......@@ -629,17 +615,17 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
oops_in_heap_closure = _cset_rs_update_cl[worker_i];
}
UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
_g1->g1_rem_set(),
oops_in_heap_closure,
check_for_refs_into_cset,
worker_i);
G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
_g1->g1_rem_set(),
oops_in_heap_closure,
check_for_refs_into_cset,
worker_i);
update_rs_oop_cl.set_from(r);
TriggerClosure trigger_cl;
G1TriggerClosure trigger_cl;
FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
(check_for_refs_into_cset ?
......@@ -688,7 +674,7 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
_conc_refine_cards++;
}
return trigger_cl.value();
return trigger_cl.triggered();
}
bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
......
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -191,44 +191,5 @@ public:
virtual void do_oop( oop* p) { do_oop_work(p); }
};
class UpdateRSOrPushRefOopClosure: public OopClosure {
G1CollectedHeap* _g1;
G1RemSet* _g1_rem_set;
HeapRegion* _from;
OopsInHeapRegionClosure* _push_ref_cl;
bool _record_refs_into_cset;
int _worker_i;
template <class T> void do_oop_work(T* p);
public:
UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
bool record_refs_into_cset,
int worker_i = 0) :
_g1(g1h),
_g1_rem_set(rs),
_from(NULL),
_record_refs_into_cset(record_refs_into_cset),
_push_ref_cl(push_ref_cl),
_worker_i(worker_i) { }
void set_from(HeapRegion* from) {
assert(from != NULL, "from region must be non-NULL");
_from = from;
}
bool self_forwarded(oop obj) {
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
return result;
}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop(oop* p) { do_oop_work(p); }
bool apply_to_weak_ref_discovered_field() { return true; }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -85,66 +85,4 @@ inline void UpdateRSetImmediate::do_oop_work(T* p) {
}
}
template <class T>
inline void UpdateRSOrPushRefOopClosure::do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
#ifdef ASSERT
// can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop");
// Do the safe subset of is_oop
if (obj != NULL) {
#ifdef CHECK_UNHANDLED_OOPS
oopDesc* o = obj.obj();
#else
oopDesc* o = obj;
#endif // CHECK_UNHANDLED_OOPS
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
}
#endif // ASSERT
assert(_from != NULL, "from region must be non-NULL");
HeapRegion* to = _g1->heap_region_containing(obj);
if (to != NULL && _from != to) {
// The _record_refs_into_cset flag is true during the RSet
// updating part of an evacuation pause. It is false at all
// other times:
// * rebuilding the rembered sets after a full GC
// * during concurrent refinement.
// * updating the remembered sets of regions in the collection
// set in the event of an evacuation failure (when deferred
// updates are enabled).
if (_record_refs_into_cset && to->in_collection_set()) {
// We are recording references that point into the collection
// set and this particular reference does exactly that...
// If the referenced object has already been forwarded
// to itself, we are handling an evacuation failure and
// we have already visited/tried to copy this object
// there is no need to retry.
if (!self_forwarded(obj)) {
assert(_push_ref_cl != NULL, "should not be null");
// Push the reference in the refs queue of the G1ParScanThreadState
// instance for this worker thread.
_push_ref_cl->do_oop(p);
}
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
} else {
// We either don't care about pushing references that point into the
// collection set (i.e. we're not during an evacuation pause) _or_
// the reference doesn't point into the collection set. Either way
// we add the reference directly to the RSet of the region containing
// the referenced object.
_g1_rem_set->par_write_ref(_from, p, _worker_i);
}
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -32,12 +32,14 @@
// Forward declarations.
enum G1Barrier {
G1BarrierNone, G1BarrierRS, G1BarrierEvac
G1BarrierNone,
G1BarrierRS,
G1BarrierEvac
};
template<bool do_gen_barrier, G1Barrier barrier,
bool do_mark_object>
template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
class G1ParCopyClosure;
class G1ParScanClosure;
class G1ParPushHeapRSClosure;
......@@ -46,6 +48,13 @@ typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
class FilterIntoCSClosure;
class FilterOutOfRegionClosure;
class G1CMOopClosure;
class G1RootRegionScanClosure;
// Specialized oop closures from g1RemSet.cpp
class G1Mux2Closure;
class G1TriggerClosure;
class G1InvokeIfNotTriggeredClosure;
class G1UpdateRSOrPushRefOopClosure;
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
......@@ -57,7 +66,12 @@ class G1CMOopClosure;
f(G1ParPushHeapRSClosure,_nv) \
f(FilterIntoCSClosure,_nv) \
f(FilterOutOfRegionClosure,_nv) \
f(G1CMOopClosure,_nv)
f(G1CMOopClosure,_nv) \
f(G1RootRegionScanClosure,_nv) \
f(G1Mux2Closure,_nv) \
f(G1TriggerClosure,_nv) \
f(G1InvokeIfNotTriggeredClosure,_nv) \
f(G1UpdateRSOrPushRefOopClosure,_nv)
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
......
......@@ -659,7 +659,7 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
// If we're within a stop-world GC, then we might look at a card in a
// GC alloc region that extends onto a GC LAB, which may not be
// parseable. Stop such at the "saved_mark" of the region.
if (G1CollectedHeap::heap()->is_gc_active()) {
if (g1h->is_gc_active()) {
mr = mr.intersection(used_region_at_save_marks());
} else {
mr = mr.intersection(used_region());
......@@ -688,53 +688,63 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
OrderAccess::storeload();
}
// Cache the boundaries of the memory region in some const locals
HeapWord* const start = mr.start();
HeapWord* const end = mr.end();
// We used to use "block_start_careful" here. But we're actually happy
// to update the BOT while we do this...
HeapWord* cur = block_start(mr.start());
assert(cur <= mr.start(), "Postcondition");
HeapWord* cur = block_start(start);
assert(cur <= start, "Postcondition");
while (cur <= mr.start()) {
if (oop(cur)->klass_or_null() == NULL) {
oop obj;
HeapWord* next = cur;
while (next <= start) {
cur = next;
obj = oop(cur);
if (obj->klass_or_null() == NULL) {
// Ran into an unparseable point.
return cur;
}
// Otherwise...
int sz = oop(cur)->size();
if (cur + sz > mr.start()) break;
// Otherwise, go on.
cur = cur + sz;
next = (cur + obj->size());
}
oop obj;
obj = oop(cur);
// If we finish this loop...
assert(cur <= mr.start()
&& obj->klass_or_null() != NULL
&& cur + obj->size() > mr.start(),
// If we finish the above loop...We have a parseable object that
// begins on or before the start of the memory region, and ends
// inside or spans the entire region.
assert(obj == oop(cur), "sanity");
assert(cur <= start &&
obj->klass_or_null() != NULL &&
(cur + obj->size()) > start,
"Loop postcondition");
if (!g1h->is_obj_dead(obj)) {
obj->oop_iterate(cl, mr);
}
HeapWord* next;
while (cur < mr.end()) {
while (cur < end) {
obj = oop(cur);
if (obj->klass_or_null() == NULL) {
// Ran into an unparseable point.
return cur;
};
// Otherwise:
next = (cur + obj->size());
if (!g1h->is_obj_dead(obj)) {
if (next < mr.end()) {
if (next < end || !obj->is_objArray()) {
// This object either does not span the MemRegion
// boundary, or if it does it's not an array.
// Apply closure to whole object.
obj->oop_iterate(cl);
} else {
// this obj spans the boundary. If it's an array, stop at the
// boundary.
if (obj->is_objArray()) {
obj->oop_iterate(cl, mr);
} else {
obj->oop_iterate(cl);
}
// This obj is an array that spans the boundary.
// Stop at the boundary.
obj->oop_iterate(cl, mr);
}
}
cur = next;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册