提交 851ac153 编写于 作者: N never

Merge

...@@ -189,7 +189,7 @@ public class CompactibleFreeListSpace extends CompactibleSpace { ...@@ -189,7 +189,7 @@ public class CompactibleFreeListSpace extends CompactibleSpace {
cur = cur.addOffsetTo(adjustObjectSizeInBytes(size)); cur = cur.addOffsetTo(adjustObjectSizeInBytes(size));
} }
if (FreeChunk.secondWordIndicatesFreeChunk(dbg.getAddressValue(klassOop))) { if (FreeChunk.indicatesFreeChunk(cur)) {
if (! cur.equals(regionStart)) { if (! cur.equals(regionStart)) {
res.add(new MemRegion(regionStart, cur)); res.add(new MemRegion(regionStart, cur));
} }
......
...@@ -28,6 +28,7 @@ import java.util.*; ...@@ -28,6 +28,7 @@ import java.util.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.oops.*;
public class FreeChunk extends VMObject { public class FreeChunk extends VMObject {
static { static {
...@@ -42,13 +43,13 @@ public class FreeChunk extends VMObject { ...@@ -42,13 +43,13 @@ public class FreeChunk extends VMObject {
Type type = db.lookupType("FreeChunk"); Type type = db.lookupType("FreeChunk");
nextField = type.getAddressField("_next"); nextField = type.getAddressField("_next");
prevField = type.getAddressField("_prev"); prevField = type.getAddressField("_prev");
sizeField = type.getCIntegerField("_size"); sizeField = type.getAddressField("_size");
} }
// Fields // Fields
private static AddressField nextField; private static AddressField nextField;
private static AddressField prevField; private static AddressField prevField;
private static CIntegerField sizeField; private static AddressField sizeField;
// Accessors // Accessors
public FreeChunk next() { public FreeChunk next() {
...@@ -61,20 +62,34 @@ public class FreeChunk extends VMObject { ...@@ -61,20 +62,34 @@ public class FreeChunk extends VMObject {
} }
public long size() { public long size() {
return sizeField.getValue(addr); if (VM.getVM().isCompressedOopsEnabled()) {
Mark mark = new Mark(sizeField.getValue(addr));
return mark.getSize();
} else {
Address size = sizeField.getValue(addr);
Debugger dbg = VM.getVM().getDebugger();
return dbg.getAddressValue(size);
}
} }
public FreeChunk(Address addr) { public FreeChunk(Address addr) {
super(addr); super(addr);
} }
public static boolean secondWordIndicatesFreeChunk(long word) { public static boolean indicatesFreeChunk(Address cur) {
return (word & 0x1L) == 0x1L; FreeChunk f = new FreeChunk(cur);
return f.isFree();
} }
public boolean isFree() { public boolean isFree() {
Debugger dbg = VM.getVM().getDebugger(); if (VM.getVM().isCompressedOopsEnabled()) {
Mark mark = new Mark(sizeField.getValue(addr));
return mark.isCmsFreeChunk();
} else {
Address prev = prevField.getValue(addr); Address prev = prevField.getValue(addr);
return secondWordIndicatesFreeChunk(dbg.getAddressValue(prev)); Debugger dbg = VM.getVM().getDebugger();
long word = dbg.getAddressValue(prev);
return (word & 0x1L) == 0x1L;
}
} }
} }
...@@ -79,6 +79,11 @@ public class Mark extends VMObject { ...@@ -79,6 +79,11 @@ public class Mark extends VMObject {
noHashInPlace = db.lookupLongConstant("markOopDesc::no_hash_in_place").longValue(); noHashInPlace = db.lookupLongConstant("markOopDesc::no_hash_in_place").longValue();
noLockInPlace = db.lookupLongConstant("markOopDesc::no_lock_in_place").longValue(); noLockInPlace = db.lookupLongConstant("markOopDesc::no_lock_in_place").longValue();
maxAge = db.lookupLongConstant("markOopDesc::max_age").longValue(); maxAge = db.lookupLongConstant("markOopDesc::max_age").longValue();
/* Constants in markOop used by CMS. */
cmsShift = db.lookupLongConstant("markOopDesc::cms_shift").longValue();
cmsMask = db.lookupLongConstant("markOopDesc::cms_mask").longValue();
sizeShift = db.lookupLongConstant("markOopDesc::size_shift").longValue();
} }
// Field accessors // Field accessors
...@@ -120,6 +125,11 @@ public class Mark extends VMObject { ...@@ -120,6 +125,11 @@ public class Mark extends VMObject {
private static long maxAge; private static long maxAge;
/* Constants in markOop used by CMS. */
private static long cmsShift;
private static long cmsMask;
private static long sizeShift;
public Mark(Address addr) { public Mark(Address addr) {
super(addr); super(addr);
} }
...@@ -290,4 +300,11 @@ public class Mark extends VMObject { ...@@ -290,4 +300,11 @@ public class Mark extends VMObject {
// //
// // Recover address of oop from encoded form used in mark // // Recover address of oop from encoded form used in mark
// inline void* decode_pointer() { return clear_lock_bits(); } // inline void* decode_pointer() { return clear_lock_bits(); }
// Copy markOop methods for CMS here.
public boolean isCmsFreeChunk() {
return isUnlocked() &&
(Bits.maskBitsLong(value() >> cmsShift, cmsMask) & 0x1L) == 0x1L;
}
public long getSize() { return (long)(value() >> sizeShift); }
} }
...@@ -805,22 +805,23 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { ...@@ -805,22 +805,23 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
// This must be volatile, or else there is a danger that the compiler // This must be volatile, or else there is a danger that the compiler
// will compile the code below into a sometimes-infinite loop, by keeping // will compile the code below into a sometimes-infinite loop, by keeping
// the value read the first time in a register. // the value read the first time in a register.
oop o = (oop)p;
volatile oop* second_word_addr = o->klass_addr();
while (true) { while (true) {
klassOop k = (klassOop)(*second_word_addr);
// We must do this until we get a consistent view of the object. // We must do this until we get a consistent view of the object.
if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { if (FreeChunk::indicatesFreeChunk(p)) {
FreeChunk* fc = (FreeChunk*)p; volatile FreeChunk* fc = (volatile FreeChunk*)p;
volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); size_t res = fc->size();
size_t res = (*sz_addr); // If the object is still a free chunk, return the size, else it
klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm. // has been allocated so try again.
if (k == k2) { if (FreeChunk::indicatesFreeChunk(p)) {
assert(res != 0, "Block size should not be 0"); assert(res != 0, "Block size should not be 0");
return res; return res;
} }
} else if (k != NULL) { } else {
// must read from what 'p' points to in each loop.
klassOop k = ((volatile oopDesc*)p)->klass_or_null();
if (k != NULL) {
assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop."); assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
oop o = (oop)p;
assert(o->is_parsable(), "Should be parsable"); assert(o->is_parsable(), "Should be parsable");
assert(o->is_oop(true /* ignore mark word */), "Should be an oop."); assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
size_t res = o->size_given_klass(k->klass_part()); size_t res = o->size_given_klass(k->klass_part());
...@@ -829,6 +830,7 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { ...@@ -829,6 +830,7 @@ size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
return res; return res;
} }
} }
}
} }
// A variant of the above that uses the Printezis bits for // A variant of the above that uses the Printezis bits for
...@@ -845,24 +847,23 @@ const { ...@@ -845,24 +847,23 @@ const {
// This must be volatile, or else there is a danger that the compiler // This must be volatile, or else there is a danger that the compiler
// will compile the code below into a sometimes-infinite loop, by keeping // will compile the code below into a sometimes-infinite loop, by keeping
// the value read the first time in a register. // the value read the first time in a register.
oop o = (oop)p;
volatile oop* second_word_addr = o->klass_addr();
DEBUG_ONLY(uint loops = 0;) DEBUG_ONLY(uint loops = 0;)
while (true) { while (true) {
klassOop k = (klassOop)(*second_word_addr);
// We must do this until we get a consistent view of the object. // We must do this until we get a consistent view of the object.
if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) { if (FreeChunk::indicatesFreeChunk(p)) {
FreeChunk* fc = (FreeChunk*)p; volatile FreeChunk* fc = (volatile FreeChunk*)p;
volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr()); size_t res = fc->size();
size_t res = (*sz_addr); if (FreeChunk::indicatesFreeChunk(p)) {
klassOop k2 = (klassOop)(*second_word_addr); // Read to confirm.
if (k == k2) {
assert(res != 0, "Block size should not be 0"); assert(res != 0, "Block size should not be 0");
assert(loops == 0, "Should be 0"); assert(loops == 0, "Should be 0");
return res; return res;
} }
} else if (k != NULL && o->is_parsable()) { } else {
// must read from what 'p' points to in each loop.
klassOop k = ((volatile oopDesc*)p)->klass_or_null();
if (k != NULL && ((oopDesc*)p)->is_parsable()) {
assert(k->is_oop(), "Should really be klass oop."); assert(k->is_oop(), "Should really be klass oop.");
oop o = (oop)p;
assert(o->is_oop(), "Should be an oop"); assert(o->is_oop(), "Should be an oop");
size_t res = o->size_given_klass(k->klass_part()); size_t res = o->size_given_klass(k->klass_part());
res = adjustObjectSize(res); res = adjustObjectSize(res);
...@@ -871,6 +872,7 @@ const { ...@@ -871,6 +872,7 @@ const {
} else { } else {
return c->block_size_if_printezis_bits(p); return c->block_size_if_printezis_bits(p);
} }
}
assert(loops == 0, "Can loop at most once"); assert(loops == 0, "Can loop at most once");
DEBUG_ONLY(loops++;) DEBUG_ONLY(loops++;)
} }
...@@ -907,9 +909,8 @@ bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { ...@@ -907,9 +909,8 @@ bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
// and those objects (if garbage) may have been modified to hold // and those objects (if garbage) may have been modified to hold
// live range information. // live range information.
// assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary"); // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
klassOop k = oop(p)->klass(); if (FreeChunk::indicatesFreeChunk(p)) return false;
intptr_t ki = (intptr_t)k; klassOop k = oop(p)->klass_or_null();
if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
if (k != NULL) { if (k != NULL) {
// Ignore mark word because it may have been used to // Ignore mark word because it may have been used to
// chain together promoted objects (the last one // chain together promoted objects (the last one
...@@ -1027,7 +1028,7 @@ HeapWord* CompactibleFreeListSpace::allocate(size_t size) { ...@@ -1027,7 +1028,7 @@ HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
FreeChunk* fc = (FreeChunk*)res; FreeChunk* fc = (FreeChunk*)res;
fc->markNotFree(); fc->markNotFree();
assert(!fc->isFree(), "shouldn't be marked free"); assert(!fc->isFree(), "shouldn't be marked free");
assert(oop(fc)->klass() == NULL, "should look uninitialized"); assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
// Verify that the block offset table shows this to // Verify that the block offset table shows this to
// be a single block, but not one which is unallocated. // be a single block, but not one which is unallocated.
_bt.verify_single_block(res, size); _bt.verify_single_block(res, size);
...@@ -2593,7 +2594,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) { ...@@ -2593,7 +2594,7 @@ HeapWord* CFLS_LAB::alloc(size_t word_sz) {
} }
res->markNotFree(); res->markNotFree();
assert(!res->isFree(), "shouldn't be marked free"); assert(!res->isFree(), "shouldn't be marked free");
assert(oop(res)->klass() == NULL, "should look uninitialized"); assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
// mangle a just allocated object with a distinct pattern. // mangle a just allocated object with a distinct pattern.
debug_only(res->mangleAllocated(word_sz)); debug_only(res->mangleAllocated(word_sz));
return (HeapWord*)res; return (HeapWord*)res;
......
...@@ -190,7 +190,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( ...@@ -190,7 +190,8 @@ ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
// depends on this property. // depends on this property.
debug_only( debug_only(
FreeChunk* junk = NULL; FreeChunk* junk = NULL;
assert(junk->prev_addr() == (void*)(oop(junk)->klass_addr()), assert(UseCompressedOops ||
junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
"Offset of FreeChunk::_prev within FreeChunk must match" "Offset of FreeChunk::_prev within FreeChunk must match"
" that of OopDesc::_klass within OopDesc"); " that of OopDesc::_klass within OopDesc");
) )
...@@ -1039,7 +1040,7 @@ void CMSCollector::direct_allocated(HeapWord* start, size_t size) { ...@@ -1039,7 +1040,7 @@ void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
// mark end of object // mark end of object
} }
// check that oop looks uninitialized // check that oop looks uninitialized
assert(oop(start)->klass() == NULL, "_klass should be NULL"); assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
} }
void CMSCollector::promoted(bool par, HeapWord* start, void CMSCollector::promoted(bool par, HeapWord* start,
...@@ -1309,17 +1310,25 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num, ...@@ -1309,17 +1310,25 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
} }
} }
oop obj = oop(obj_ptr); oop obj = oop(obj_ptr);
assert(obj->klass() == NULL, "Object should be uninitialized here."); assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
// Otherwise, copy the object. Here we must be careful to insert the // Otherwise, copy the object. Here we must be careful to insert the
// klass pointer last, since this marks the block as an allocated object. // klass pointer last, since this marks the block as an allocated object.
// Except with compressed oops it's the mark word.
HeapWord* old_ptr = (HeapWord*)old; HeapWord* old_ptr = (HeapWord*)old;
if (word_sz > (size_t)oopDesc::header_size()) { if (word_sz > (size_t)oopDesc::header_size()) {
Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
obj_ptr + oopDesc::header_size(), obj_ptr + oopDesc::header_size(),
word_sz - oopDesc::header_size()); word_sz - oopDesc::header_size());
} }
if (UseCompressedOops) {
// Copy gap missed by (aligned) header size calculation above
obj->set_klass_gap(old->klass_gap());
}
// Restore the mark word copied above. // Restore the mark word copied above.
obj->set_mark(m); obj->set_mark(m);
// Now we can track the promoted object, if necessary. We take care // Now we can track the promoted object, if necessary. We take care
// To delay the transition from uninitialized to full object // To delay the transition from uninitialized to full object
// (i.e., insertion of klass pointer) until after, so that it // (i.e., insertion of klass pointer) until after, so that it
...@@ -1327,7 +1336,8 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num, ...@@ -1327,7 +1336,8 @@ ConcurrentMarkSweepGeneration::par_promote(int thread_num,
if (promoInfo->tracking()) { if (promoInfo->tracking()) {
promoInfo->track((PromotedObject*)obj, old->klass()); promoInfo->track((PromotedObject*)obj, old->klass());
} }
// Finally, install the klass pointer.
// Finally, install the klass pointer (this should be volatile).
obj->set_klass(old->klass()); obj->set_klass(old->klass());
assert(old->is_oop(), "Will dereference klass ptr below"); assert(old->is_oop(), "Will dereference klass ptr below");
...@@ -6165,7 +6175,7 @@ size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const { ...@@ -6165,7 +6175,7 @@ size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
size_t sz = 0; size_t sz = 0;
oop p = (oop)addr; oop p = (oop)addr;
if (p->klass() != NULL && p->is_parsable()) { if (p->klass_or_null() != NULL && p->is_parsable()) {
sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
} else { } else {
sz = block_size_using_printezis_bits(addr); sz = block_size_using_printezis_bits(addr);
...@@ -6602,7 +6612,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( ...@@ -6602,7 +6612,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
} }
if (_bitMap->isMarked(addr)) { if (_bitMap->isMarked(addr)) {
// it's marked; is it potentially uninitialized? // it's marked; is it potentially uninitialized?
if (p->klass() != NULL) { if (p->klass_or_null() != NULL) {
if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) { if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
// Signal precleaning to redirty the card since // Signal precleaning to redirty the card since
// the klass pointer is already installed. // the klass pointer is already installed.
...@@ -6615,11 +6625,8 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( ...@@ -6615,11 +6625,8 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
if (p->is_objArray()) { if (p->is_objArray()) {
// objArrays are precisely marked; restrict scanning // objArrays are precisely marked; restrict scanning
// to dirty cards only. // to dirty cards only.
size = p->oop_iterate(_scanningClosure, mr); size = CompactibleFreeListSpace::adjustObjectSize(
assert(size == CompactibleFreeListSpace::adjustObjectSize(size), p->oop_iterate(_scanningClosure, mr));
"adjustObjectSize should be the identity for array sizes, "
"which are necessarily larger than minimum object size of "
"two heap words");
} else { } else {
// A non-array may have been imprecisely marked; we need // A non-array may have been imprecisely marked; we need
// to scan object in its entirety. // to scan object in its entirety.
...@@ -6653,7 +6660,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( ...@@ -6653,7 +6660,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
} }
} else { } else {
// Either a not yet marked object or an uninitialized object // Either a not yet marked object or an uninitialized object
if (p->klass() == NULL || !p->is_parsable()) { if (p->klass_or_null() == NULL || !p->is_parsable()) {
// An uninitialized object, skip to the next card, since // An uninitialized object, skip to the next card, since
// we may not be able to read its P-bits yet. // we may not be able to read its P-bits yet.
assert(size == 0, "Initial value"); assert(size == 0, "Initial value");
...@@ -6710,7 +6717,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { ...@@ -6710,7 +6717,7 @@ size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
HeapWord* addr = (HeapWord*)p; HeapWord* addr = (HeapWord*)p;
DEBUG_ONLY(_collector->verify_work_stacks_empty();) DEBUG_ONLY(_collector->verify_work_stacks_empty();)
assert(!_span.contains(addr), "we are scanning the survivor spaces"); assert(!_span.contains(addr), "we are scanning the survivor spaces");
assert(p->klass() != NULL, "object should be initializd"); assert(p->klass_or_null() != NULL, "object should be initializd");
assert(p->is_parsable(), "must be parsable."); assert(p->is_parsable(), "must be parsable.");
// an initialized object; ignore mark word in verification below // an initialized object; ignore mark word in verification below
// since we are running concurrent with mutators // since we are running concurrent with mutators
...@@ -6868,7 +6875,7 @@ void MarkFromRootsClosure::do_bit(size_t offset) { ...@@ -6868,7 +6875,7 @@ void MarkFromRootsClosure::do_bit(size_t offset) {
assert(_skipBits == 0, "tautology"); assert(_skipBits == 0, "tautology");
_skipBits = 2; // skip next two marked bits ("Printezis-marks") _skipBits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr); oop p = oop(addr);
if (p->klass() == NULL || !p->is_parsable()) { if (p->klass_or_null() == NULL || !p->is_parsable()) {
DEBUG_ONLY(if (!_verifying) {) DEBUG_ONLY(if (!_verifying) {)
// We re-dirty the cards on which this object lies and increase // We re-dirty the cards on which this object lies and increase
// the _threshold so that we'll come back to scan this object // the _threshold so that we'll come back to scan this object
...@@ -6890,7 +6897,7 @@ void MarkFromRootsClosure::do_bit(size_t offset) { ...@@ -6890,7 +6897,7 @@ void MarkFromRootsClosure::do_bit(size_t offset) {
if (_threshold < end_card_addr) { if (_threshold < end_card_addr) {
_threshold = end_card_addr; _threshold = end_card_addr;
} }
if (p->klass() != NULL) { if (p->klass_or_null() != NULL) {
// Redirty the range of cards... // Redirty the range of cards...
_mut->mark_range(redirty_range); _mut->mark_range(redirty_range);
} // ...else the setting of klass will dirty the card anyway. } // ...else the setting of klass will dirty the card anyway.
...@@ -7048,7 +7055,7 @@ void Par_MarkFromRootsClosure::do_bit(size_t offset) { ...@@ -7048,7 +7055,7 @@ void Par_MarkFromRootsClosure::do_bit(size_t offset) {
assert(_skip_bits == 0, "tautology"); assert(_skip_bits == 0, "tautology");
_skip_bits = 2; // skip next two marked bits ("Printezis-marks") _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr); oop p = oop(addr);
if (p->klass() == NULL || !p->is_parsable()) { if (p->klass_or_null() == NULL || !p->is_parsable()) {
// in the case of Clean-on-Enter optimization, redirty card // in the case of Clean-on-Enter optimization, redirty card
// and avoid clearing card by increasing the threshold. // and avoid clearing card by increasing the threshold.
return; return;
...@@ -8023,7 +8030,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { ...@@ -8023,7 +8030,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
"alignment problem"); "alignment problem");
#ifdef DEBUG #ifdef DEBUG
if (oop(addr)->klass() != NULL && if (oop(addr)->klass_or_null() != NULL &&
( !_collector->should_unload_classes() ( !_collector->should_unload_classes()
|| oop(addr)->is_parsable())) { || oop(addr)->is_parsable())) {
// Ignore mark word because we are running concurrent with mutators // Ignore mark word because we are running concurrent with mutators
...@@ -8036,7 +8043,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) { ...@@ -8036,7 +8043,7 @@ size_t SweepClosure::doLiveChunk(FreeChunk* fc) {
} else { } else {
// This should be an initialized object that's alive. // This should be an initialized object that's alive.
assert(oop(addr)->klass() != NULL && assert(oop(addr)->klass_or_null() != NULL &&
(!_collector->should_unload_classes() (!_collector->should_unload_classes()
|| oop(addr)->is_parsable()), || oop(addr)->is_parsable()),
"Should be an initialized object"); "Should be an initialized object");
......
...@@ -22,88 +22,6 @@ ...@@ -22,88 +22,6 @@
* *
*/ */
//
// Free block maintenance for Concurrent Mark Sweep Generation
//
// The main data structure for free blocks are
// . an indexed array of small free blocks, and
// . a dictionary of large free blocks
//
// No virtuals in FreeChunk (don't want any vtables).
// A FreeChunk is merely a chunk that can be in a doubly linked list
// and has a size field. NOTE: FreeChunks are distinguished from allocated
// objects in two ways (by the sweeper). The second word (prev) has the
// LSB set to indicate a free chunk; allocated objects' klass() pointers
// don't have their LSB set. The corresponding bit in the CMSBitMap is
// set when the chunk is allocated. There are also blocks that "look free"
// but are not part of the free list and should not be coalesced into larger
// free blocks. These free blocks have their two LSB's set.
class FreeChunk VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
FreeChunk* _next;
FreeChunk* _prev;
size_t _size;
public:
NOT_PRODUCT(static const size_t header_size();)
// Returns "true" if the "wrd", which is required to be the second word
// of a block, indicates that the block represents a free chunk.
static bool secondWordIndicatesFreeChunk(intptr_t wrd) {
return (wrd & 0x1) == 0x1;
}
bool isFree() const {
return secondWordIndicatesFreeChunk((intptr_t)_prev);
}
bool cantCoalesce() const { return (((intptr_t)_prev) & 0x3) == 0x3; }
FreeChunk* next() const { return _next; }
FreeChunk* prev() const { return (FreeChunk*)(((intptr_t)_prev) & ~(0x3)); }
debug_only(void* prev_addr() const { return (void*)&_prev; })
void linkAfter(FreeChunk* ptr) {
linkNext(ptr);
if (ptr != NULL) ptr->linkPrev(this);
}
void linkAfterNonNull(FreeChunk* ptr) {
assert(ptr != NULL, "precondition violation");
linkNext(ptr);
ptr->linkPrev(this);
}
void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) { _prev = (FreeChunk*)((intptr_t)ptr | 0x1); }
void clearPrev() { _prev = NULL; }
void clearNext() { _next = NULL; }
void dontCoalesce() {
// the block should be free
assert(isFree(), "Should look like a free block");
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
}
void markFree() { _prev = (FreeChunk*)((intptr_t)_prev | 0x1); }
void markNotFree() { _prev = NULL; }
size_t size() const { return _size; }
void setSize(size_t size) { _size = size; }
// For volatile reads:
size_t* size_addr() { return &_size; }
// Return the address past the end of this chunk
HeapWord* end() const { return ((HeapWord*) this) + _size; }
// debugging
void verify() const PRODUCT_RETURN;
void verifyList() const PRODUCT_RETURN;
void mangleAllocated(size_t size) PRODUCT_RETURN;
void mangleFreed(size_t size) PRODUCT_RETURN;
};
// Alignment helpers etc.
#define numQuanta(x,y) ((x+y-1)/y)
enum AlignmentConstants {
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
};
// A FreeBlockDictionary is an abstract superclass that will allow // A FreeBlockDictionary is an abstract superclass that will allow
// a number of alternative implementations in the future. // a number of alternative implementations in the future.
......
...@@ -47,15 +47,15 @@ void FreeChunk::mangleAllocated(size_t size) { ...@@ -47,15 +47,15 @@ void FreeChunk::mangleAllocated(size_t size) {
Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord); Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord);
} }
void FreeChunk::mangleFreed(size_t size) { void FreeChunk::mangleFreed(size_t sz) {
assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns"); assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns");
// mangle all but the header of a just-freed block of storage // mangle all but the header of a just-freed block of storage
// just prior to passing it to the storage dictionary // just prior to passing it to the storage dictionary
assert(size >= MinChunkSize, "smallest size of object"); assert(sz >= MinChunkSize, "smallest size of object");
assert(size == _size, "just checking"); assert(sz == size(), "just checking");
HeapWord* addr = (HeapWord*)this; HeapWord* addr = (HeapWord*)this;
size_t hdr = header_size(); size_t hdr = header_size();
Copy::fill_to_words(addr + hdr, size - hdr, deadbeefHeapWord); Copy::fill_to_words(addr + hdr, sz - hdr, deadbeefHeapWord);
} }
void FreeChunk::verifyList() const { void FreeChunk::verifyList() const {
......
/*
* Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
//
// Free block maintenance for Concurrent Mark Sweep Generation
//
// The main data structure for free blocks are
// . an indexed array of small free blocks, and
// . a dictionary of large free blocks
//
// No virtuals in FreeChunk (don't want any vtables).
// A FreeChunk is merely a chunk that can be in a doubly linked list
// and has a size field. NOTE: FreeChunks are distinguished from allocated
// objects in two ways (by the sweeper), depending on whether the VM is 32 or
// 64 bits.
// In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
// LSB set to indicate a free chunk; allocated objects' klass() pointers
// don't have their LSB set. The corresponding bit in the CMSBitMap is
// set when the chunk is allocated. There are also blocks that "look free"
// but are not part of the free list and should not be coalesced into larger
// free blocks. These free blocks have their two LSB's set.
class FreeChunk VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
// For 64 bit compressed oops, the markOop encodes both the size and the
// indication that this is a FreeChunk and not an object.
volatile size_t _size;
FreeChunk* _prev;
FreeChunk* _next;
markOop mark() const volatile { return (markOop)_size; }
void set_mark(markOop m) { _size = (size_t)m; }
public:
NOT_PRODUCT(static const size_t header_size();)
// Returns "true" if the address indicates that the block represents
// a free chunk.
static bool indicatesFreeChunk(const HeapWord* addr) {
// Force volatile read from addr because value might change between
// calls. We really want the read of _mark and _prev from this pointer
// to be volatile but making the fields volatile causes all sorts of
// compilation errors.
return ((volatile FreeChunk*)addr)->isFree();
}
bool isFree() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->is_cms_free_chunk(); else)
return (((intptr_t)_prev) & 0x1) == 0x1;
}
bool cantCoalesce() const {
assert(isFree(), "can't get coalesce bit on not free");
return (((intptr_t)_prev) & 0x2) == 0x2;
}
void dontCoalesce() {
// the block should be free
assert(isFree(), "Should look like a free block");
_prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
}
FreeChunk* prev() const {
return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
}
debug_only(void* prev_addr() const { return (void*)&_prev; })
size_t size() const volatile {
LP64_ONLY(if (UseCompressedOops) return mark()->get_size(); else )
return _size;
}
void setSize(size_t sz) {
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::set_size_and_free(sz)); else )
_size = sz;
}
FreeChunk* next() const { return _next; }
void linkAfter(FreeChunk* ptr) {
linkNext(ptr);
if (ptr != NULL) ptr->linkPrev(this);
}
void linkAfterNonNull(FreeChunk* ptr) {
assert(ptr != NULL, "precondition violation");
linkNext(ptr);
ptr->linkPrev(this);
}
void linkNext(FreeChunk* ptr) { _next = ptr; }
void linkPrev(FreeChunk* ptr) {
LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
_prev = (FreeChunk*)((intptr_t)ptr | 0x1);
}
void clearPrev() { _prev = NULL; }
void clearNext() { _next = NULL; }
void markNotFree() {
LP64_ONLY(if (UseCompressedOops) set_mark(markOopDesc::prototype());)
// Also set _prev to null
_prev = NULL;
}
// Return the address past the end of this chunk
HeapWord* end() const { return ((HeapWord*) this) + size(); }
// debugging
void verify() const PRODUCT_RETURN;
void verifyList() const PRODUCT_RETURN;
void mangleAllocated(size_t size) PRODUCT_RETURN;
void mangleFreed(size_t size) PRODUCT_RETURN;
};
// Alignment helpers etc.
#define numQuanta(x,y) ((x+y-1)/y)
enum AlignmentConstants {
MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment
};
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#define VM_STRUCTS_CMS(nonstatic_field, \ #define VM_STRUCTS_CMS(nonstatic_field, \
volatile_nonstatic_field, \
static_field) \ static_field) \
nonstatic_field(CompactibleFreeListSpace, _collector, CMSCollector*) \ nonstatic_field(CompactibleFreeListSpace, _collector, CMSCollector*) \
nonstatic_field(CompactibleFreeListSpace, _bt, BlockOffsetArrayNonContigSpace) \ nonstatic_field(CompactibleFreeListSpace, _bt, BlockOffsetArrayNonContigSpace) \
...@@ -36,9 +37,9 @@ ...@@ -36,9 +37,9 @@
nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \ nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \
nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \ nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \
static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \ static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \
volatile_nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(FreeChunk, _next, FreeChunk*) \ nonstatic_field(FreeChunk, _next, FreeChunk*) \
nonstatic_field(FreeChunk, _prev, FreeChunk*) \ nonstatic_field(FreeChunk, _prev, FreeChunk*) \
nonstatic_field(FreeChunk, _size, size_t) \
nonstatic_field(LinearAllocBlock, _word_size, size_t) \ nonstatic_field(LinearAllocBlock, _word_size, size_t) \
nonstatic_field(FreeList, _size, size_t) \ nonstatic_field(FreeList, _size, size_t) \
nonstatic_field(FreeList, _count, ssize_t) \ nonstatic_field(FreeList, _count, ssize_t) \
......
...@@ -206,6 +206,7 @@ freeBlockDictionary.cpp thread_<os_family>.inline.hpp ...@@ -206,6 +206,7 @@ freeBlockDictionary.cpp thread_<os_family>.inline.hpp
freeBlockDictionary.hpp allocation.hpp freeBlockDictionary.hpp allocation.hpp
freeBlockDictionary.hpp debug.hpp freeBlockDictionary.hpp debug.hpp
freeBlockDictionary.hpp freeChunk.hpp
freeBlockDictionary.hpp globalDefinitions.hpp freeBlockDictionary.hpp globalDefinitions.hpp
freeBlockDictionary.hpp memRegion.hpp freeBlockDictionary.hpp memRegion.hpp
freeBlockDictionary.hpp mutex.hpp freeBlockDictionary.hpp mutex.hpp
...@@ -214,6 +215,14 @@ freeBlockDictionary.hpp ostream.hpp ...@@ -214,6 +215,14 @@ freeBlockDictionary.hpp ostream.hpp
freeChunk.cpp copy.hpp freeChunk.cpp copy.hpp
freeChunk.cpp freeBlockDictionary.hpp freeChunk.cpp freeBlockDictionary.hpp
freeChunk.hpp allocation.hpp
freeChunk.hpp debug.hpp
freeChunk.hpp globalDefinitions.hpp
freeChunk.hpp markOop.hpp
freeChunk.hpp memRegion.hpp
freeChunk.hpp mutex.hpp
freeChunk.hpp ostream.hpp
freeList.cpp freeBlockDictionary.hpp freeList.cpp freeBlockDictionary.hpp
freeList.cpp freeList.hpp freeList.cpp freeList.hpp
freeList.cpp globals.hpp freeList.cpp globals.hpp
......
...@@ -29,8 +29,10 @@ ...@@ -29,8 +29,10 @@
// //
// Bit-format of an object header (most significant first): // Bit-format of an object header (most significant first):
// //
// // 32 bits: unused:0 hash:25 age:4 biased_lock:1 lock:2
// unused:0/25 hash:25/31 age:4 biased_lock:1 lock:2 = 32/64 bits // 64 bits: unused:24 hash:31 cms:2 age:4 biased_lock:1 lock:2
// unused:20 size:35 cms:2 age:4 biased_lock:1 lock:2 (if cms
// free chunk)
// //
// - hash contains the identity hash value: largest value is // - hash contains the identity hash value: largest value is
// 31 bits, see os::random(). Also, 64-bit vm's require // 31 bits, see os::random(). Also, 64-bit vm's require
...@@ -91,6 +93,7 @@ class markOopDesc: public oopDesc { ...@@ -91,6 +93,7 @@ class markOopDesc: public oopDesc {
biased_lock_bits = 1, biased_lock_bits = 1,
max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits, max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits, hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,
cms_bits = LP64_ONLY(1) NOT_LP64(0),
epoch_bits = 2 epoch_bits = 2
}; };
...@@ -106,7 +109,8 @@ class markOopDesc: public oopDesc { ...@@ -106,7 +109,8 @@ class markOopDesc: public oopDesc {
enum { lock_shift = 0, enum { lock_shift = 0,
biased_lock_shift = lock_bits, biased_lock_shift = lock_bits,
age_shift = lock_bits + biased_lock_bits, age_shift = lock_bits + biased_lock_bits,
hash_shift = lock_bits + biased_lock_bits + age_bits, cms_shift = age_shift + age_bits,
hash_shift = cms_shift + cms_bits,
epoch_shift = hash_shift epoch_shift = hash_shift
}; };
...@@ -118,7 +122,9 @@ class markOopDesc: public oopDesc { ...@@ -118,7 +122,9 @@ class markOopDesc: public oopDesc {
age_mask = right_n_bits(age_bits), age_mask = right_n_bits(age_bits),
age_mask_in_place = age_mask << age_shift, age_mask_in_place = age_mask << age_shift,
epoch_mask = right_n_bits(epoch_bits), epoch_mask = right_n_bits(epoch_bits),
epoch_mask_in_place = epoch_mask << epoch_shift epoch_mask_in_place = epoch_mask << epoch_shift,
cms_mask = right_n_bits(cms_bits),
cms_mask_in_place = cms_mask << cms_shift
#ifndef _WIN64 #ifndef _WIN64
,hash_mask = right_n_bits(hash_bits), ,hash_mask = right_n_bits(hash_bits),
hash_mask_in_place = (address_word)hash_mask << hash_shift hash_mask_in_place = (address_word)hash_mask << hash_shift
...@@ -360,4 +366,40 @@ class markOopDesc: public oopDesc { ...@@ -360,4 +366,40 @@ class markOopDesc: public oopDesc {
// see the definition in markOop.cpp for the gory details // see the definition in markOop.cpp for the gory details
bool should_not_be_cached() const; bool should_not_be_cached() const;
// These markOops indicate cms free chunk blocks and not objects.
// In 64 bit, the markOop is set to distinguish them from oops.
// These are defined in 32 bit mode for vmStructs.
const static uintptr_t cms_free_chunk_pattern = 0x1;
// Constants for the size field.
enum { size_shift = cms_shift + cms_bits,
size_bits = 35 // need for compressed oops 32G
};
// These values are too big for Win64
const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
NOT_LP64(0);
const static uintptr_t size_mask_in_place =
(address_word)size_mask << size_shift;
#ifdef _LP64
static markOop cms_free_prototype() {
return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
((cms_free_chunk_pattern & cms_mask) << cms_shift));
}
uintptr_t cms_encoding() const {
return mask_bits(value() >> cms_shift, cms_mask);
}
bool is_cms_free_chunk() const {
return is_neutral() &&
(cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;
}
size_t get_size() const { return (size_t)(value() >> size_shift); }
static markOop set_size_and_free(size_t size) {
assert((size & ~size_mask) == 0, "shouldn't overflow size field");
return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
(((intptr_t)size & size_mask) << size_shift));
}
#endif // _LP64
}; };
...@@ -1174,7 +1174,7 @@ void Arguments::set_ergonomics_flags() { ...@@ -1174,7 +1174,7 @@ void Arguments::set_ergonomics_flags() {
// field offset to determine free list chunk markers. // field offset to determine free list chunk markers.
// Check that UseCompressedOops can be set with the max heap size allocated // Check that UseCompressedOops can be set with the max heap size allocated
// by ergonomics. // by ergonomics.
if (!UseConcMarkSweepGC && MaxHeapSize <= max_heap_for_compressed_oops()) { if (MaxHeapSize <= max_heap_for_compressed_oops()) {
if (FLAG_IS_DEFAULT(UseCompressedOops)) { if (FLAG_IS_DEFAULT(UseCompressedOops)) {
// Leave compressed oops off by default. Uncomment // Leave compressed oops off by default. Uncomment
// the following line to return it to default status. // the following line to return it to default status.
......
...@@ -1695,7 +1695,12 @@ static inline uint64_t cast_uint64_t(size_t x) ...@@ -1695,7 +1695,12 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_constant(markOopDesc::no_hash) \ declare_constant(markOopDesc::no_hash) \
declare_constant(markOopDesc::no_hash_in_place) \ declare_constant(markOopDesc::no_hash_in_place) \
declare_constant(markOopDesc::no_lock_in_place) \ declare_constant(markOopDesc::no_lock_in_place) \
declare_constant(markOopDesc::max_age) declare_constant(markOopDesc::max_age) \
\
/* Constants in markOop used by CMS. */ \
declare_constant(markOopDesc::cms_shift) \
declare_constant(markOopDesc::cms_mask) \
declare_constant(markOopDesc::size_shift) \
/* NOTE that we do not use the last_entry() macro here; it is used */ /* NOTE that we do not use the last_entry() macro here; it is used */
/* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and */ /* in vmStructs_<os>_<cpu>.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and */
...@@ -1959,6 +1964,7 @@ VMStructEntry VMStructs::localHotSpotVMStructs[] = { ...@@ -1959,6 +1964,7 @@ VMStructEntry VMStructs::localHotSpotVMStructs[] = {
GENERATE_STATIC_VM_STRUCT_ENTRY) GENERATE_STATIC_VM_STRUCT_ENTRY)
VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \ VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
GENERATE_STATIC_VM_STRUCT_ENTRY) GENERATE_STATIC_VM_STRUCT_ENTRY)
#endif // SERIALGC #endif // SERIALGC
...@@ -2100,6 +2106,7 @@ VMStructs::init() { ...@@ -2100,6 +2106,7 @@ VMStructs::init() {
CHECK_STATIC_VM_STRUCT_ENTRY); CHECK_STATIC_VM_STRUCT_ENTRY);
VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY, VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY); CHECK_STATIC_VM_STRUCT_ENTRY);
#endif // SERIALGC #endif // SERIALGC
...@@ -2204,6 +2211,7 @@ VMStructs::init() { ...@@ -2204,6 +2211,7 @@ VMStructs::init() {
debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT, \ debug_only(VM_STRUCTS_PARALLELGC(ENSURE_FIELD_TYPE_PRESENT, \
ENSURE_FIELD_TYPE_PRESENT)); ENSURE_FIELD_TYPE_PRESENT));
debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \ debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \
ENSURE_FIELD_TYPE_PRESENT, \
ENSURE_FIELD_TYPE_PRESENT)); ENSURE_FIELD_TYPE_PRESENT));
#endif // SERIALGC #endif // SERIALGC
debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \ debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册