提交 59ae9b34 编写于 作者: Y ysr

6904516: More object array barrier fixes, following up on 6906727

Summary: Fixed missing pre-barrier calls for G1, modified C1 to call pre- and correct post-barrier interfaces, deleted obsolete interface, (temporarily) disabled redundant deferred barrier in BacktraceBuilder.
Reviewed-by: coleenp, jmasa, kvn, never
上级 e401acac
...@@ -1075,6 +1075,7 @@ enum { ...@@ -1075,6 +1075,7 @@ enum {
}; };
// Below length is the # elements copied.
template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr, template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
oopDesc* dst, T* dst_addr, oopDesc* dst, T* dst_addr,
int length) { int length) {
...@@ -1083,22 +1084,22 @@ template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr, ...@@ -1083,22 +1084,22 @@ template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
// barrier. The assert will fail if this is not the case. // barrier. The assert will fail if this is not the case.
// Note that we use the non-virtual inlineable variant of write_ref_array. // Note that we use the non-virtual inlineable variant of write_ref_array.
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
"Barrier set must have ref array opt"); assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
if (src == dst) { if (src == dst) {
// same object, no check // same object, no check
bs->write_ref_array_pre(dst_addr, length);
Copy::conjoint_oops_atomic(src_addr, dst_addr, length); Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
bs->write_ref_array(MemRegion((HeapWord*)dst_addr, bs->write_ref_array((HeapWord*)dst_addr, length);
(HeapWord*)(dst_addr + length)));
return ac_ok; return ac_ok;
} else { } else {
klassOop bound = objArrayKlass::cast(dst->klass())->element_klass(); klassOop bound = objArrayKlass::cast(dst->klass())->element_klass();
klassOop stype = objArrayKlass::cast(src->klass())->element_klass(); klassOop stype = objArrayKlass::cast(src->klass())->element_klass();
if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) { if (stype == bound || Klass::cast(stype)->is_subtype_of(bound)) {
// Elements are guaranteed to be subtypes, so no check necessary // Elements are guaranteed to be subtypes, so no check necessary
bs->write_ref_array_pre(dst_addr, length);
Copy::conjoint_oops_atomic(src_addr, dst_addr, length); Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
bs->write_ref_array(MemRegion((HeapWord*)dst_addr, bs->write_ref_array((HeapWord*)dst_addr, length);
(HeapWord*)(dst_addr + length)));
return ac_ok; return ac_ok;
} }
} }
...@@ -1162,9 +1163,16 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num)) ...@@ -1162,9 +1163,16 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
#endif #endif
if (num == 0) return; if (num == 0) return;
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
bs->write_ref_array(MemRegion(dst, dst + num)); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
if (UseCompressedOops) {
bs->write_ref_array_pre((narrowOop*)dst, num);
} else {
bs->write_ref_array_pre((oop*)dst, num);
}
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
bs->write_ref_array(dst, num);
JRT_END JRT_END
......
...@@ -1121,10 +1121,23 @@ class BacktraceBuilder: public StackObj { ...@@ -1121,10 +1121,23 @@ class BacktraceBuilder: public StackObj {
} }
void flush() { void flush() {
// The following appears to have been an optimization to save from
// doing a barrier for each individual store into the _methods array,
// but rather to do it for the entire array after the series of writes.
// That optimization seems to have been lost when compressed oops was
// implemented. However, the extra card-marks below was left in place,
// but is now redundant because the individual stores into the
// _methods array already execute the barrier code. CR 6918185 has
// been filed so the original code may be restored by deferring the
// barriers until after the entire sequence of stores, thus re-enabling
// the intent of the original optimization. In the meantime the redundant
// card mark below is now disabled.
if (_dirty && _methods != NULL) { if (_dirty && _methods != NULL) {
#if 0
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length()); bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
#endif
_dirty = false; _dirty = false;
} }
} }
...@@ -1168,9 +1181,7 @@ class BacktraceBuilder: public StackObj { ...@@ -1168,9 +1181,7 @@ class BacktraceBuilder: public StackObj {
method = mhandle(); method = mhandle();
} }
_methods->obj_at_put(_index, method); _methods->obj_at_put(_index, method);
// bad for UseCompressedOops
// *_methods->obj_at_addr(_index) = method;
_bcis->ushort_at_put(_index, bci); _bcis->ushort_at_put(_index, bci);
_index++; _index++;
_dirty = true; _dirty = true;
......
...@@ -2505,6 +2505,7 @@ G1CollectedHeap* G1CollectedHeap::heap() { ...@@ -2505,6 +2505,7 @@ G1CollectedHeap* G1CollectedHeap::heap() {
} }
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) { void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
// always_do_update_barrier = false;
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
// Call allocation profiler // Call allocation profiler
AllocationProfiler::iterate_since_last_gc(); AllocationProfiler::iterate_since_last_gc();
...@@ -2518,6 +2519,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) { ...@@ -2518,6 +2519,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
// is set. // is set.
COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
"derived pointer present")); "derived pointer present"));
// always_do_update_barrier = true;
} }
void G1CollectedHeap::do_collection_pause() { void G1CollectedHeap::do_collection_pause() {
......
...@@ -124,8 +124,6 @@ public: ...@@ -124,8 +124,6 @@ public:
// Below length is the # array elements being written // Below length is the # array elements being written
virtual void write_ref_array_pre( oop* dst, int length) {} virtual void write_ref_array_pre( oop* dst, int length) {}
virtual void write_ref_array_pre(narrowOop* dst, int length) {} virtual void write_ref_array_pre(narrowOop* dst, int length) {}
// Below MemRegion mr is expected to be HeapWord-aligned
inline void write_ref_array(MemRegion mr);
// Below count is the # array elements being written, starting // Below count is the # array elements being written, starting
// at the address "start", which may not necessarily be HeapWord-aligned // at the address "start", which may not necessarily be HeapWord-aligned
inline void write_ref_array(HeapWord* start, size_t count); inline void write_ref_array(HeapWord* start, size_t count);
......
...@@ -42,16 +42,6 @@ void BarrierSet::write_ref_field(void* field, oop new_val) { ...@@ -42,16 +42,6 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
} }
} }
void BarrierSet::write_ref_array(MemRegion mr) {
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
} else {
write_ref_array_work(mr);
}
}
// count is number of array elements being written // count is number of array elements being written
void BarrierSet::write_ref_array(HeapWord* start, size_t count) { void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
assert(count <= (size_t)max_intx, "count too large"); assert(count <= (size_t)max_intx, "count too large");
...@@ -61,12 +51,12 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) { ...@@ -61,12 +51,12 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
// strictly necessary for current uses, but a case of good hygiene and, // strictly necessary for current uses, but a case of good hygiene and,
// if you will, aesthetics) and the second upward (this is essential for // if you will, aesthetics) and the second upward (this is essential for
// current uses) to a HeapWord boundary, so we mark all cards overlapping // current uses) to a HeapWord boundary, so we mark all cards overlapping
// this write. In the event that this evolves in the future to calling a // this write. If this evolves in the future to calling a
// logging barrier of narrow oop granularity, like the pre-barrier for G1 // logging barrier of narrow oop granularity, like the pre-barrier for G1
// (mentioned here merely by way of example), we will need to change this // (mentioned here merely by way of example), we will need to change this
// interface, much like the pre-barrier one above, so it is "exactly precise" // interface, so it is "exactly precise" (if i may be allowed the adverbial
// (if i may be allowed the adverbial redundancy for emphasis) and does not // redundancy for emphasis) and does not include narrow oop slots not
// include narrow oop slots not included in the original write interval. // included in the original write interval.
HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
// If compressed oops were not being used, these should already be aligned // If compressed oops were not being used, these should already be aligned
......
...@@ -196,11 +196,19 @@ void stubRoutines_init2() { StubRoutines::initialize2(); } ...@@ -196,11 +196,19 @@ void stubRoutines_init2() { StubRoutines::initialize2(); }
// Default versions of arraycopy functions // Default versions of arraycopy functions
// //
static void gen_arraycopy_barrier_pre(oop* dest, size_t count) {
assert(count != 0, "count should be non-zero");
assert(count <= (size_t)max_intx, "count too large");
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt");
bs->write_ref_array_pre(dest, (int)count);
}
static void gen_arraycopy_barrier(oop* dest, size_t count) { static void gen_arraycopy_barrier(oop* dest, size_t count) {
assert(count != 0, "count should be non-zero"); assert(count != 0, "count should be non-zero");
BarrierSet* bs = Universe::heap()->barrier_set(); BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array(MemRegion((HeapWord*)dest, (HeapWord*)(dest + count))); bs->write_ref_array((HeapWord*)dest, count);
} }
JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count)) JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
...@@ -240,6 +248,7 @@ JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count)) ...@@ -240,6 +248,7 @@ JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT #endif // !PRODUCT
assert(count != 0, "count should be non-zero"); assert(count != 0, "count should be non-zero");
gen_arraycopy_barrier_pre(dest, count);
Copy::conjoint_oops_atomic(src, dest, count); Copy::conjoint_oops_atomic(src, dest, count);
gen_arraycopy_barrier(dest, count); gen_arraycopy_barrier(dest, count);
JRT_END JRT_END
...@@ -281,6 +290,7 @@ JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, siz ...@@ -281,6 +290,7 @@ JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, siz
SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy
#endif // !PRODUCT #endif // !PRODUCT
assert(count != 0, "count should be non-zero"); assert(count != 0, "count should be non-zero");
gen_arraycopy_barrier_pre((oop *) dest, count);
Copy::arrayof_conjoint_oops(src, dest, count); Copy::arrayof_conjoint_oops(src, dest, count);
gen_arraycopy_barrier((oop *) dest, count); gen_arraycopy_barrier((oop *) dest, count);
JRT_END JRT_END
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册