提交 1f7260b7 编写于 作者: Y ysr

6906727: UseCompressedOops: some card-marking fixes related to object arrays

Summary: Introduced a new write_ref_array(HeapWords* start, size_t count) method that does the requisite MemRegion range calculation so (some of the) clients of the erstwhile write_ref_array(MemRegion mr) do not need to worry. This removed all external uses of array_size(), which was also simplified and made private. Asserts were added to catch other possible issues. Further, less essential, fixes stemming from this investigation are deferred to CR 6904516 (to follow shortly in hs17).
Reviewed-by: kvn, coleenp, jmasa
上级 6269ed5f
......@@ -1124,8 +1124,7 @@ class BacktraceBuilder: public StackObj {
if (_dirty && _methods != NULL) {
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array(MemRegion((HeapWord*)_methods->base(),
_methods->array_size()));
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
_dirty = false;
}
}
......
......@@ -289,7 +289,7 @@ attachListener.hpp allocation.hpp
attachListener.hpp debug.hpp
attachListener.hpp ostream.hpp
barrierSet.cpp barrierSet.hpp
barrierSet.cpp barrierSet.inline.hpp
barrierSet.cpp collectedHeap.hpp
barrierSet.cpp universe.hpp
......
......@@ -41,11 +41,6 @@ void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
// count is number of array elements being written
void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
assert(count <= (size_t)max_intx, "count too large");
HeapWord* end = start + objArrayOopDesc::array_size((int)count);
#if 0
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
start, count, start, end);
#endif
Universe::heap()->barrier_set()->write_ref_array_work(MemRegion(start, end));
// simply delegate to instance method
Universe::heap()->barrier_set()->write_ref_array(start, count);
}
......@@ -121,17 +121,20 @@ public:
virtual void read_ref_array(MemRegion mr) = 0;
virtual void read_prim_array(MemRegion mr) = 0;
// Below length is the # array elements being written
virtual void write_ref_array_pre( oop* dst, int length) {}
virtual void write_ref_array_pre(narrowOop* dst, int length) {}
// Below MemRegion mr is expected to be HeapWord-aligned
inline void write_ref_array(MemRegion mr);
// Below count is the # array elements being written, starting
// at the address "start", which may not necessarily be HeapWord-aligned
inline void write_ref_array(HeapWord* start, size_t count);
// Static versions, suitable for calling from generated code.
// Static versions, suitable for calling from generated code;
// count is # array elements being written, starting with "start",
// which may not necessarily be HeapWord-aligned.
static void static_write_ref_array_pre(HeapWord* start, size_t count);
static void static_write_ref_array_post(HeapWord* start, size_t count);
// Narrow oop versions of the above; count is # of array elements being written,
// starting with "start", which is HeapWord-aligned.
static void static_write_ref_array_pre_narrow(HeapWord* start, size_t count);
static void static_write_ref_array_post_narrow(HeapWord* start, size_t count);
protected:
virtual void write_ref_array_work(MemRegion mr) = 0;
......
......@@ -43,6 +43,8 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
}
void BarrierSet::write_ref_array(MemRegion mr) {
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start() , "Unaligned start");
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_ref_array(mr);
} else {
......@@ -50,6 +52,34 @@ void BarrierSet::write_ref_array(MemRegion mr) {
}
}
// count is number of array elements being written
void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
assert(count <= (size_t)max_intx, "count too large");
HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
// In the case of compressed oops, start and end may potentially be misaligned;
// so we need to conservatively align the first downward (this is not
// strictly necessary for current uses, but a case of good hygiene and,
// if you will, aesthetics) and the second upward (this is essential for
// current uses) to a HeapWord boundary, so we mark all cards overlapping
// this write. In the event that this evolves in the future to calling a
// logging barrier of narrow oop granularity, like the pre-barrier for G1
// (mentioned here merely by way of example), we will need to change this
// interface, much like the pre-barrier one above, so it is "exactly precise"
// (if i may be allowed the adverbial redundancy for emphasis) and does not
// include narrow oop slots not included in the original write interval.
HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
// If compressed oops were not being used, these should already be aligned
assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
"Expected heap word alignment of start and end");
#if 0
warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
start, count, aligned_start, aligned_end);
#endif
write_ref_array_work(MemRegion(aligned_start, aligned_end));
}
void BarrierSet::write_region(MemRegion mr) {
if (kind() == CardTableModRef) {
((CardTableModRefBS*)this)->inline_write_region(mr);
......
......@@ -511,6 +511,8 @@ void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
}
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
jbyte* cur = byte_for(mr.start());
jbyte* last = byte_after(mr.last());
while (cur < last) {
......@@ -520,6 +522,8 @@ void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
}
void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (!mri.is_empty()) dirty_MemRegion(mri);
......
......@@ -127,16 +127,14 @@ template <class T> void objArrayKlass::do_copy(arrayOop s, T* src,
// pointer delta is scaled to number of elements (length field in
// objArrayOop) which we assume is 32 bit.
assert(pd == (size_t)(int)pd, "length field overflow");
const size_t done_word_len = objArrayOopDesc::array_size((int)pd);
bs->write_ref_array(MemRegion((HeapWord*)dst, done_word_len));
bs->write_ref_array((HeapWord*)dst, pd);
THROW(vmSymbols::java_lang_ArrayStoreException());
return;
}
}
}
}
const size_t word_len = objArrayOopDesc::array_size(length);
bs->write_ref_array(MemRegion((HeapWord*)dst, word_len));
bs->write_ref_array((HeapWord*)dst, length);
}
void objArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
......
......@@ -37,6 +37,32 @@ class objArrayOopDesc : public arrayOopDesc {
return &((T*)base())[index];
}
private:
// Give size of objArrayOop in HeapWords minus the header
static int array_size(int length) {
const int OopsPerHeapWord = HeapWordSize/heapOopSize;
assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
"Else the following (new) computation would be in error");
#ifdef ASSERT
// The old code is left in for sanity-checking; it'll
// go away pretty soon. XXX
// Without UseCompressedOops, this is simply:
// oop->length() * HeapWordsPerOop;
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
// The oop elements are aligned up to wordSize
const int HeapWordsPerOop = heapOopSize/HeapWordSize;
int old_res;
if (HeapWordsPerOop > 0) {
old_res = length * HeapWordsPerOop;
} else {
old_res = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
}
#endif // ASSERT
int res = (length + OopsPerHeapWord - 1)/OopsPerHeapWord;
assert(res == old_res, "Inconsistency between old and new.");
return res;
}
public:
// Returns the offset of the first element.
static int base_offset_in_bytes() {
......@@ -67,29 +93,12 @@ class objArrayOopDesc : public arrayOopDesc {
// Sizing
static int header_size() { return arrayOopDesc::header_size(T_OBJECT); }
int object_size() { return object_size(length()); }
int array_size() { return array_size(length()); }
static int object_size(int length) {
// This returns the object size in HeapWords.
return align_object_size(header_size() + array_size(length));
}
// Give size of objArrayOop in HeapWords minus the header
static int array_size(int length) {
// Without UseCompressedOops, this is simply:
// oop->length() * HeapWordsPerOop;
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
// The oop elements are aligned up to wordSize
const int HeapWordsPerOop = heapOopSize/HeapWordSize;
if (HeapWordsPerOop > 0) {
return length * HeapWordsPerOop;
} else {
const int OopsPerHeapWord = HeapWordSize/heapOopSize;
int word_len = align_size_up(length, OopsPerHeapWord)/OopsPerHeapWord;
return word_len;
}
}
// special iterators for index ranges, returns size of object
#define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
int oop_iterate_range(OopClosureType* blk, int start, int end);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册