提交 9b64e3ed 编写于 作者: J johnc

Merge

/*
* Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2011 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -36,7 +36,6 @@
void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
int n_threads) {
if (n_threads > 0) {
assert((n_threads == 1 && ParallelGCThreads == 0) ||
......@@ -57,7 +56,7 @@ void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
int stride = 0;
while (!pst->is_task_claimed(/* reference */ stride)) {
process_stride(sp, mr, stride, n_strides, dcto_cl, cl, clear,
process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
lowest_non_clean,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
......@@ -83,7 +82,6 @@ process_stride(Space* sp,
jint stride, int n_strides,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size) {
......@@ -129,7 +127,7 @@ process_stride(Space* sp,
lowest_non_clean_base_chunk_index,
lowest_non_clean_chunk_size);
non_clean_card_iterate_work(chunk_mr, cl, clear);
non_clean_card_iterate_work(chunk_mr, cl);
// Find the next chunk of the stride.
chunk_card_start += CardsPerStrideChunk * n_strides;
......
......@@ -76,7 +76,7 @@ class AllocationStats VALUE_OBJ_CLASS_SPEC {
_beforeSweep = 0;
_coalBirths = 0;
_coalDeaths = 0;
_splitBirths = split_birth? 1 : 0;
_splitBirths = (split_birth ? 1 : 0);
_splitDeaths = 0;
_returnedBytes = 0;
}
......
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -459,18 +459,17 @@ bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
void CardTableModRefBS::non_clean_card_iterate(Space* sp,
MemRegion mr,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear) {
MemRegionClosure* cl) {
if (!mr.is_empty()) {
int n_threads = SharedHeap::heap()->n_par_threads();
if (n_threads > 0) {
#ifndef SERIALGC
par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, n_threads);
#else // SERIALGC
fatal("Parallel gc not supported here.");
#endif // SERIALGC
} else {
non_clean_card_iterate_work(mr, cl, clear);
non_clean_card_iterate_work(mr, cl);
}
}
}
......@@ -481,10 +480,7 @@ void CardTableModRefBS::non_clean_card_iterate(Space* sp,
// cards (and miss those marked precleaned). In that sense,
// the name precleaned is currently somewhat of a misnomer.
void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
MemRegionClosure* cl,
bool clear) {
// Figure out whether we have to worry about parallelism.
bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
MemRegionClosure* cl) {
for (int i = 0; i < _cur_covered_regions; i++) {
MemRegion mri = mr.intersection(_covered[i]);
if (mri.word_size() > 0) {
......@@ -506,22 +502,6 @@ void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
MemRegion cur_cards(addr_for(cur_entry),
non_clean_cards * card_size_in_words);
MemRegion dirty_region = cur_cards.intersection(mri);
if (clear) {
for (size_t i = 0; i < non_clean_cards; i++) {
// Clean the dirty cards (but leave the other non-clean
// alone.) If parallel, do the cleaning atomically.
jbyte cur_entry_val = cur_entry[i];
if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
if (is_par) {
jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
assert(res != clean_card,
"Dirty card mysteriously cleaned");
} else {
cur_entry[i] = clean_card;
}
}
}
}
cl->do_MemRegion(dirty_region);
}
cur_entry = next_entry;
......@@ -530,22 +510,6 @@ void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
}
}
void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
OopClosure* cl,
bool clear,
bool before_save_marks) {
// Note that dcto_cl is resource-allocated, so there is no
// corresponding "delete".
DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
MemRegion used_mr;
if (before_save_marks) {
used_mr = sp->used_region_at_save_marks();
} else {
used_mr = sp->used_region();
}
non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
}
void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
......@@ -593,8 +557,7 @@ void CardTableModRefBS::dirty(MemRegion mr) {
memset(first, dirty_card, last-first);
}
// NOTES:
// (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
// Unlike several other card table methods, dirty_card_iterate()
// iterates over dirty cards ranges in increasing address order.
void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
MemRegionClosure* cl) {
......@@ -698,7 +661,7 @@ public:
void CardTableModRefBS::verify_clean_region(MemRegion mr) {
GuaranteeNotModClosure blk(this);
non_clean_card_iterate_work(mr, &blk, false);
non_clean_card_iterate_work(mr, &blk);
}
// To verify a MemRegion is entirely dirty this closure is passed to
......
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -171,17 +171,14 @@ class CardTableModRefBS: public ModRefBarrierSet {
// mode if worker threads are available.
void non_clean_card_iterate(Space* sp, MemRegion mr,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear);
MemRegionClosure* cl);
// Utility function used to implement the other versions below.
void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl,
bool clear);
void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl);
void par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
int n_threads);
// Dirty the bytes corresponding to "mr" (not all of which must be
......@@ -241,7 +238,6 @@ class CardTableModRefBS: public ModRefBarrierSet {
jint stride, int n_strides,
DirtyCardToOopClosure* dcto_cl,
MemRegionClosure* cl,
bool clear,
jbyte** lowest_non_clean,
uintptr_t lowest_non_clean_base_chunk_index,
size_t lowest_non_clean_chunk_size);
......@@ -402,9 +398,6 @@ public:
virtual void invalidate(MemRegion mr, bool whole_heap = false);
void clear(MemRegion mr);
void dirty(MemRegion mr);
void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
bool clear = false,
bool before_save_marks = false);
// *** Card-table-RemSet-specific things.
......@@ -415,18 +408,15 @@ public:
// *decreasing* address order. (This order aids with imprecise card
// marking, where a dirty card may cause scanning, and summarization
// marking, of objects that extend onto subsequent cards.)
// If "clear" is true, the card is (conceptually) marked unmodified before
// applying the closure.
void mod_card_iterate(MemRegionClosure* cl, bool clear = false) {
non_clean_card_iterate_work(_whole_heap, cl, clear);
void mod_card_iterate(MemRegionClosure* cl) {
non_clean_card_iterate_work(_whole_heap, cl);
}
// Like the "mod_cards_iterate" above, except only invokes the closure
// for cards within the MemRegion "mr" (which is required to be
// card-aligned and sized.)
void mod_card_iterate(MemRegion mr, MemRegionClosure* cl,
bool clear = false) {
non_clean_card_iterate_work(mr, cl, clear);
void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) {
non_clean_card_iterate_work(mr, cl);
}
static uintx ct_max_alignment_constraint();
......
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -247,7 +247,7 @@ void CardTableRS::younger_refs_in_space_iterate(Space* sp,
ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
_ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
dcto_cl, &clear_cl, false);
dcto_cl, &clear_cl);
}
void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
......
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -88,15 +88,6 @@ public:
assert(false, "can't call");
}
// Invoke "cl->do_oop" on (the address of) every possibly-modifed
// reference field in objects in "sp". If "clear" is "true", the oops
// are no longer considered possibly modified after application of the
// closure. If' "before_save_marks" is true, oops in objects allocated
// after the last call to "save_marks" on "sp" will not be considered.
virtual void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
bool clear = false,
bool before_save_marks = false) = 0;
// Causes all refs in "mr" to be assumed to be modified. If "whole_heap"
// is true, the caller asserts that the entire heap is being invalidated,
// which may admit an optimized implementation for some barriers.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册