You need to sign in or sign up before continuing.
提交 4f7abda8 编写于 作者: N never

Merge

/* /*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -407,6 +407,11 @@ class CompactibleFreeListSpace: public CompactibleSpace { ...@@ -407,6 +407,11 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void save_sweep_limit() { void save_sweep_limit() {
_sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
unallocated_block() : end(); unallocated_block() : end();
if (CMSTraceSweeper) {
gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
" for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
_sweep_limit, bottom(), end());
}
} }
NOT_PRODUCT( NOT_PRODUCT(
void clear_sweep_limit() { _sweep_limit = NULL; } void clear_sweep_limit() { _sweep_limit = NULL; }
......
...@@ -7888,60 +7888,64 @@ SweepClosure::SweepClosure(CMSCollector* collector, ...@@ -7888,60 +7888,64 @@ SweepClosure::SweepClosure(CMSCollector* collector,
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print("\n====================\nStarting new sweep\n"); gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
_limit);
} }
} }
// We need this destructor to reclaim any space at the end void SweepClosure::print_on(outputStream* st) const {
// of the space, which do_blk below may not yet have added back to tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
// the free lists. _sp->bottom(), _sp->end());
tty->print_cr("_limit = " PTR_FORMAT, _limit);
tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
_inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
}
#ifndef PRODUCT
// Assertion checking only: no useful work in product mode --
// however, if any of the flags below become product flags,
// you may need to review this code to see if it needs to be
// enabled in product mode.
SweepClosure::~SweepClosure() { SweepClosure::~SweepClosure() {
assert_lock_strong(_freelistLock); assert_lock_strong(_freelistLock);
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
// Flush any remaining coterminal free run as a single
// coalesced chunk to the appropriate free list.
if (inFreeRange()) { if (inFreeRange()) {
assert(freeFinger() < _limit, "freeFinger points too high"); warning("inFreeRange() should have been reset; dumping state of SweepClosure");
flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger())); print();
if (CMSTraceSweeper) { ShouldNotReachHere();
gclog_or_tty->print("Sweep: last chunk: "); }
gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n", if (Verbose && PrintGC) {
freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced()); gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
_numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
SIZE_FORMAT" bytes "
"Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
_numObjectsLive, _numWordsLive*sizeof(HeapWord),
_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
* sizeof(HeapWord);
gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
indexListReturnedBytes);
gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
dictReturnedBytes);
} }
} // else nothing to flush }
NOT_PRODUCT(
if (Verbose && PrintGC) {
gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
SIZE_FORMAT " bytes",
_numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
SIZE_FORMAT" bytes "
"Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
_numObjectsLive, _numWordsLive*sizeof(HeapWord),
_numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
sizeof(HeapWord);
gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
indexListReturnedBytes);
gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
dictReturnedBytes);
}
}
)
// Now, in debug mode, just null out the sweep_limit
NOT_PRODUCT(_sp->clear_sweep_limit();)
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print("end of sweep\n================\n"); gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
_limit);
} }
} }
#endif // PRODUCT
void SweepClosure::initialize_free_range(HeapWord* freeFinger, void SweepClosure::initialize_free_range(HeapWord* freeFinger,
bool freeRangeInFreeLists) { bool freeRangeInFreeLists) {
...@@ -8001,15 +8005,17 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) { ...@@ -8001,15 +8005,17 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// we started the sweep, it may no longer be one because heap expansion // we started the sweep, it may no longer be one because heap expansion
// may have caused us to coalesce the block ending at the address _limit // may have caused us to coalesce the block ending at the address _limit
// with a newly expanded chunk (this happens when _limit was set to the // with a newly expanded chunk (this happens when _limit was set to the
// previous _end of the space), so we may have stepped past _limit; see CR 6977970. // previous _end of the space), so we may have stepped past _limit:
// see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
if (addr >= _limit) { // we have swept up to or past the limit: finish up if (addr >= _limit) { // we have swept up to or past the limit: finish up
assert(_limit >= _sp->bottom() && _limit <= _sp->end(), assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
"sweep _limit out of bounds"); "sweep _limit out of bounds");
assert(addr < _sp->end(), "addr out of bounds"); assert(addr < _sp->end(), "addr out of bounds");
// Flush any remaining coterminal free run as a single // Flush any free range we might be holding as a single
// coalesced chunk to the appropriate free list. // coalesced chunk to the appropriate free list.
if (inFreeRange()) { if (inFreeRange()) {
assert(freeFinger() < _limit, "finger points too high"); assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
flush_cur_free_chunk(freeFinger(), flush_cur_free_chunk(freeFinger(),
pointer_delta(addr, freeFinger())); pointer_delta(addr, freeFinger()));
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
...@@ -8033,7 +8039,16 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) { ...@@ -8033,7 +8039,16 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
res = fc->size(); res = fc->size();
do_already_free_chunk(fc); do_already_free_chunk(fc);
debug_only(_sp->verifyFreeLists()); debug_only(_sp->verifyFreeLists());
assert(res == fc->size(), "Don't expect the size to change"); // If we flush the chunk at hand in lookahead_and_flush()
// and it's coalesced with a preceding chunk, then the
// process of "mangling" the payload of the coalesced block
// will cause erasure of the size information from the
// (erstwhile) header of all the coalesced blocks but the
// first, so the first disjunct in the assert will not hold
// in that specific case (in which case the second disjunct
// will hold).
assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
"Otherwise the size info doesn't change at this step");
NOT_PRODUCT( NOT_PRODUCT(
_numObjectsAlreadyFree++; _numObjectsAlreadyFree++;
_numWordsAlreadyFree += res; _numWordsAlreadyFree += res;
...@@ -8103,7 +8118,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) { ...@@ -8103,7 +8118,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// //
void SweepClosure::do_already_free_chunk(FreeChunk* fc) { void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
size_t size = fc->size(); const size_t size = fc->size();
// Chunks that cannot be coalesced are not in the // Chunks that cannot be coalesced are not in the
// free lists. // free lists.
if (CMSTestInFreeList && !fc->cantCoalesce()) { if (CMSTestInFreeList && !fc->cantCoalesce()) {
...@@ -8112,7 +8127,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) { ...@@ -8112,7 +8127,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
} }
// a chunk that is already free, should not have been // a chunk that is already free, should not have been
// marked in the bit map // marked in the bit map
HeapWord* addr = (HeapWord*) fc; HeapWord* const addr = (HeapWord*) fc;
assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
// Verify that the bit map has no bits marked between // Verify that the bit map has no bits marked between
// addr and purported end of this block. // addr and purported end of this block.
...@@ -8149,7 +8164,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) { ...@@ -8149,7 +8164,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
} }
} else { } else {
// the midst of a free range, we are coalescing // the midst of a free range, we are coalescing
debug_only(record_free_block_coalesced(fc);) print_free_block_coalesced(fc);
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size); gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
} }
...@@ -8173,6 +8188,10 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) { ...@@ -8173,6 +8188,10 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
} }
} }
} }
// Note that if the chunk is not coalescable (the else arm
// below), we unconditionally flush, without needing to do
// a "lookahead," as we do below.
if (inFreeRange()) lookahead_and_flush(fc, size);
} else { } else {
// Code path common to both original and adaptive free lists. // Code path common to both original and adaptive free lists.
...@@ -8191,8 +8210,8 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) { ...@@ -8191,8 +8210,8 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// This is a chunk of garbage. It is not in any free list. // This is a chunk of garbage. It is not in any free list.
// Add it to a free list or let it possibly be coalesced into // Add it to a free list or let it possibly be coalesced into
// a larger chunk. // a larger chunk.
HeapWord* addr = (HeapWord*) fc; HeapWord* const addr = (HeapWord*) fc;
size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
if (_sp->adaptive_freelists()) { if (_sp->adaptive_freelists()) {
// Verify that the bit map has no bits marked between // Verify that the bit map has no bits marked between
...@@ -8205,7 +8224,6 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) { ...@@ -8205,7 +8224,6 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// start of a new free range // start of a new free range
assert(size > 0, "A free range should have a size"); assert(size > 0, "A free range should have a size");
initialize_free_range(addr, false); initialize_free_range(addr, false);
} else { } else {
// this will be swept up when we hit the end of the // this will be swept up when we hit the end of the
// free range // free range
...@@ -8235,6 +8253,9 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) { ...@@ -8235,6 +8253,9 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// addr and purported end of just dead object. // addr and purported end of just dead object.
_bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
} }
assert(_limit >= addr + size,
"A freshly garbage chunk can't possibly straddle over _limit");
if (inFreeRange()) lookahead_and_flush(fc, size);
return size; return size;
} }
...@@ -8284,8 +8305,8 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) { ...@@ -8284,8 +8305,8 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
(!_collector->should_unload_classes() (!_collector->should_unload_classes()
|| oop(addr)->is_parsable()), || oop(addr)->is_parsable()),
"Should be an initialized object"); "Should be an initialized object");
// Note that there are objects used during class redefinition // Note that there are objects used during class redefinition,
// (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
// which are discarded with their is_conc_safe state still // which are discarded with their is_conc_safe state still
// false. These object may be floating garbage so may be // false. These object may be floating garbage so may be
// seen here. If they are floating garbage their size // seen here. If they are floating garbage their size
...@@ -8307,7 +8328,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, ...@@ -8307,7 +8328,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t chunkSize) { size_t chunkSize) {
// do_post_free_or_garbage_chunk() should only be called in the case // do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator. // of the adaptive free list allocator.
bool fcInFreeLists = fc->isFree(); const bool fcInFreeLists = fc->isFree();
assert(_sp->adaptive_freelists(), "Should only be used in this case."); assert(_sp->adaptive_freelists(), "Should only be used in this case.");
assert((HeapWord*)fc <= _limit, "sweep invariant"); assert((HeapWord*)fc <= _limit, "sweep invariant");
if (CMSTestInFreeList && fcInFreeLists) { if (CMSTestInFreeList && fcInFreeLists) {
...@@ -8318,11 +8339,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, ...@@ -8318,11 +8339,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
} }
HeapWord* addr = (HeapWord*) fc; HeapWord* const fc_addr = (HeapWord*) fc;
bool coalesce; bool coalesce;
size_t left = pointer_delta(addr, freeFinger()); const size_t left = pointer_delta(fc_addr, freeFinger());
size_t right = chunkSize; const size_t right = chunkSize;
switch (FLSCoalescePolicy) { switch (FLSCoalescePolicy) {
// numeric value forms a coalition aggressiveness metric // numeric value forms a coalition aggressiveness metric
case 0: { // never coalesce case 0: { // never coalesce
...@@ -8355,15 +8376,15 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, ...@@ -8355,15 +8376,15 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
// If the chunk is in a free range and either we decided to coalesce above // If the chunk is in a free range and either we decided to coalesce above
// or the chunk is near the large block at the end of the heap // or the chunk is near the large block at the end of the heap
// (isNearLargestChunk() returns true), then coalesce this chunk. // (isNearLargestChunk() returns true), then coalesce this chunk.
bool doCoalesce = inFreeRange() && const bool doCoalesce = inFreeRange()
(coalesce || _g->isNearLargestChunk((HeapWord*)fc)); && (coalesce || _g->isNearLargestChunk(fc_addr));
if (doCoalesce) { if (doCoalesce) {
// Coalesce the current free range on the left with the new // Coalesce the current free range on the left with the new
// chunk on the right. If either is on a free list, // chunk on the right. If either is on a free list,
// it must be removed from the list and stashed in the closure. // it must be removed from the list and stashed in the closure.
if (freeRangeInFreeLists()) { if (freeRangeInFreeLists()) {
FreeChunk* ffc = (FreeChunk*)freeFinger(); FreeChunk* const ffc = (FreeChunk*)freeFinger();
assert(ffc->size() == pointer_delta(addr, freeFinger()), assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
"Size of free range is inconsistent with chunk size."); "Size of free range is inconsistent with chunk size.");
if (CMSTestInFreeList) { if (CMSTestInFreeList) {
assert(_sp->verifyChunkInFreeLists(ffc), assert(_sp->verifyChunkInFreeLists(ffc),
...@@ -8380,13 +8401,14 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, ...@@ -8380,13 +8401,14 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
_sp->removeFreeChunkFromFreeLists(fc); _sp->removeFreeChunkFromFreeLists(fc);
} }
set_lastFreeRangeCoalesced(true); set_lastFreeRangeCoalesced(true);
print_free_block_coalesced(fc);
} else { // not in a free range and/or should not coalesce } else { // not in a free range and/or should not coalesce
// Return the current free range and start a new one. // Return the current free range and start a new one.
if (inFreeRange()) { if (inFreeRange()) {
// In a free range but cannot coalesce with the right hand chunk. // In a free range but cannot coalesce with the right hand chunk.
// Put the current free range into the free lists. // Put the current free range into the free lists.
flush_cur_free_chunk(freeFinger(), flush_cur_free_chunk(freeFinger(),
pointer_delta(addr, freeFinger())); pointer_delta(fc_addr, freeFinger()));
} }
// Set up for new free range. Pass along whether the right hand // Set up for new free range. Pass along whether the right hand
// chunk is in the free lists. // chunk is in the free lists.
...@@ -8394,6 +8416,42 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, ...@@ -8394,6 +8416,42 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
} }
} }
// Lookahead flush:
// If we are tracking a free range, and this is the last chunk that
// we'll look at because its end crosses past _limit, we'll preemptively
// flush it along with any free range we may be holding on to. Note that
// this can be the case only for an already free or freshly garbage
// chunk. If this block is an object, it can never straddle
// over _limit. The "straddling" occurs when _limit is set at
// the previous end of the space when this cycle started, and
// a subsequent heap expansion caused the previously co-terminal
// free block to be coalesced with the newly expanded portion,
// thus rendering _limit a non-block-boundary making it dangerous
// for the sweeper to step over and examine.
void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
assert(inFreeRange(), "Should only be called if currently in a free range.");
HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
assert(_sp->used_region().contains(eob - 1),
err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
" when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
_limit, _sp->bottom(), _sp->end(), fc, chunk_size));
if (eob >= _limit) {
assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
"[" PTR_FORMAT "," PTR_FORMAT ") in space "
"[" PTR_FORMAT "," PTR_FORMAT ")",
_limit, fc, eob, _sp->bottom(), _sp->end());
}
// Return the storage we are tracking back into the free lists.
if (CMSTraceSweeper) {
gclog_or_tty->print_cr("Flushing ... ");
}
assert(freeFinger() < eob, "Error");
flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
}
}
void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
assert(inFreeRange(), "Should only be called if currently in a free range."); assert(inFreeRange(), "Should only be called if currently in a free range.");
assert(size > 0, assert(size > 0,
...@@ -8419,6 +8477,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { ...@@ -8419,6 +8477,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
} }
_sp->addChunkAndRepairOffsetTable(chunk, size, _sp->addChunkAndRepairOffsetTable(chunk, size,
lastFreeRangeCoalesced()); lastFreeRangeCoalesced());
} else if (CMSTraceSweeper) {
gclog_or_tty->print_cr("Already in free list: nothing to flush");
} }
set_inFreeRange(false); set_inFreeRange(false);
set_freeRangeInFreeLists(false); set_freeRangeInFreeLists(false);
...@@ -8477,13 +8537,14 @@ void SweepClosure::do_yield_work(HeapWord* addr) { ...@@ -8477,13 +8537,14 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
bool debug_verifyChunkInFreeLists(FreeChunk* fc) { bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
return debug_cms_space->verifyChunkInFreeLists(fc); return debug_cms_space->verifyChunkInFreeLists(fc);
} }
#endif
void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const { void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
if (CMSTraceSweeper) { if (CMSTraceSweeper) {
gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size()); gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
fc, fc->size());
} }
} }
#endif
// CMSIsAliveClosure // CMSIsAliveClosure
bool CMSIsAliveClosure::do_object_b(oop obj) { bool CMSIsAliveClosure::do_object_b(oop obj) {
......
...@@ -1701,9 +1701,9 @@ class SweepClosure: public BlkClosureCareful { ...@@ -1701,9 +1701,9 @@ class SweepClosure: public BlkClosureCareful {
CMSCollector* _collector; // collector doing the work CMSCollector* _collector; // collector doing the work
ConcurrentMarkSweepGeneration* _g; // Generation being swept ConcurrentMarkSweepGeneration* _g; // Generation being swept
CompactibleFreeListSpace* _sp; // Space being swept CompactibleFreeListSpace* _sp; // Space being swept
HeapWord* _limit;// the address at which the sweep should stop because HeapWord* _limit;// the address at or above which the sweep should stop
// we do not expect blocks eligible for sweeping past // because we do not expect newly garbage blocks
// that address. // eligible for sweeping past that address.
Mutex* _freelistLock; // Free list lock (in space) Mutex* _freelistLock; // Free list lock (in space)
CMSBitMap* _bitMap; // Marking bit map (in CMSBitMap* _bitMap; // Marking bit map (in
// generation) // generation)
...@@ -1750,6 +1750,10 @@ class SweepClosure: public BlkClosureCareful { ...@@ -1750,6 +1750,10 @@ class SweepClosure: public BlkClosureCareful {
void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
// Process a free chunk during sweeping. // Process a free chunk during sweeping.
void do_already_free_chunk(FreeChunk *fc); void do_already_free_chunk(FreeChunk *fc);
// Work method called when processing an already free or a
// freshly garbage chunk to do a lookahead and possibly a
// premptive flush if crossing over _limit.
void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
// Process a garbage chunk during sweeping. // Process a garbage chunk during sweeping.
size_t do_garbage_chunk(FreeChunk *fc); size_t do_garbage_chunk(FreeChunk *fc);
// Process a live chunk during sweeping. // Process a live chunk during sweeping.
...@@ -1758,8 +1762,6 @@ class SweepClosure: public BlkClosureCareful { ...@@ -1758,8 +1762,6 @@ class SweepClosure: public BlkClosureCareful {
// Accessors. // Accessors.
HeapWord* freeFinger() const { return _freeFinger; } HeapWord* freeFinger() const { return _freeFinger; }
void set_freeFinger(HeapWord* v) { _freeFinger = v; } void set_freeFinger(HeapWord* v) { _freeFinger = v; }
size_t freeRangeSize() const { return _freeRangeSize; }
void set_freeRangeSize(size_t v) { _freeRangeSize = v; }
bool inFreeRange() const { return _inFreeRange; } bool inFreeRange() const { return _inFreeRange; }
void set_inFreeRange(bool v) { _inFreeRange = v; } void set_inFreeRange(bool v) { _inFreeRange = v; }
bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
...@@ -1779,14 +1781,16 @@ class SweepClosure: public BlkClosureCareful { ...@@ -1779,14 +1781,16 @@ class SweepClosure: public BlkClosureCareful {
void do_yield_work(HeapWord* addr); void do_yield_work(HeapWord* addr);
// Debugging/Printing // Debugging/Printing
void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; void print_free_block_coalesced(FreeChunk* fc) const;
public: public:
SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
CMSBitMap* bitMap, bool should_yield); CMSBitMap* bitMap, bool should_yield);
~SweepClosure(); ~SweepClosure() PRODUCT_RETURN;
size_t do_blk_careful(HeapWord* addr); size_t do_blk_careful(HeapWord* addr);
void print() const { print_on(tty); }
void print_on(outputStream *st) const;
}; };
// Closures related to weak references processing // Closures related to weak references processing
......
...@@ -63,6 +63,15 @@ void Rewriter::compute_index_maps() { ...@@ -63,6 +63,15 @@ void Rewriter::compute_index_maps() {
_have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0); _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
} }
// Unrewrite the bytecodes if an error occurs.
void Rewriter::restore_bytecodes() {
int len = _methods->length();
for (int i = len-1; i >= 0; i--) {
methodOop method = (methodOop)_methods->obj_at(i);
scan_method(method, true);
}
}
// Creates a constant pool cache given a CPC map // Creates a constant pool cache given a CPC map
void Rewriter::make_constant_pool_cache(TRAPS) { void Rewriter::make_constant_pool_cache(TRAPS) {
...@@ -133,57 +142,94 @@ void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) { ...@@ -133,57 +142,94 @@ void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
// Rewrite a classfile-order CP index into a native-order CPC index. // Rewrite a classfile-order CP index into a native-order CPC index.
void Rewriter::rewrite_member_reference(address bcp, int offset) { void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
address p = bcp + offset; address p = bcp + offset;
int cp_index = Bytes::get_Java_u2(p); if (!reverse) {
int cache_index = cp_entry_to_cp_cache(cp_index); int cp_index = Bytes::get_Java_u2(p);
Bytes::put_native_u2(p, cache_index); int cache_index = cp_entry_to_cp_cache(cp_index);
Bytes::put_native_u2(p, cache_index);
} else {
int cache_index = Bytes::get_native_u2(p);
int pool_index = cp_cache_entry_pool_index(cache_index);
Bytes::put_Java_u2(p, pool_index);
}
} }
void Rewriter::rewrite_invokedynamic(address bcp, int offset) { void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
address p = bcp + offset; address p = bcp + offset;
assert(p[-1] == Bytecodes::_invokedynamic, ""); assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
int cp_index = Bytes::get_Java_u2(p); if (!reverse) {
int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily int cp_index = Bytes::get_Java_u2(p);
int cpc2 = add_secondary_cp_cache_entry(cpc); int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily
int cpc2 = add_secondary_cp_cache_entry(cpc);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode, // Replace the trailing four bytes with a CPC index for the dynamic
// not just one per distinct CP entry. In other words, the // call site. Unlike other CPC entries, there is one per bytecode,
// CPC-to-CP relation is many-to-one for invokedynamic entries. // not just one per distinct CP entry. In other words, the
// This means we must use a larger index size than u2 to address // CPC-to-CP relation is many-to-one for invokedynamic entries.
// all these entries. That is the main reason invokedynamic // This means we must use a larger index size than u2 to address
// must have a five-byte instruction format. (Of course, other JVM // all these entries. That is the main reason invokedynamic
// implementations can use the bytes for other purposes.) // must have a five-byte instruction format. (Of course, other JVM
Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2)); // implementations can use the bytes for other purposes.)
// Note: We use native_u4 format exclusively for 4-byte indexes. Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
// Note: We use native_u4 format exclusively for 4-byte indexes.
} else {
int cache_index = constantPoolCacheOopDesc::decode_secondary_index(
Bytes::get_native_u4(p));
int secondary_index = cp_cache_secondary_entry_main_index(cache_index);
int pool_index = cp_cache_entry_pool_index(secondary_index);
assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index");
// zero out 4 bytes
Bytes::put_Java_u4(p, 0);
Bytes::put_Java_u2(p, pool_index);
}
} }
// Rewrite some ldc bytecodes to _fast_aldc // Rewrite some ldc bytecodes to _fast_aldc
void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) { void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), ""); bool reverse) {
address p = bcp + offset; if (!reverse) {
int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p); assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
constantTag tag = _pool->tag_at(cp_index).value(); address p = bcp + offset;
if (tag.is_method_handle() || tag.is_method_type()) { int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
int cache_index = cp_entry_to_cp_cache(cp_index); constantTag tag = _pool->tag_at(cp_index).value();
if (is_wide) { if (tag.is_method_handle() || tag.is_method_type()) {
(*bcp) = Bytecodes::_fast_aldc_w; int cache_index = cp_entry_to_cp_cache(cp_index);
assert(cache_index == (u2)cache_index, ""); if (is_wide) {
Bytes::put_native_u2(p, cache_index); (*bcp) = Bytecodes::_fast_aldc_w;
} else { assert(cache_index == (u2)cache_index, "index overflow");
(*bcp) = Bytecodes::_fast_aldc; Bytes::put_native_u2(p, cache_index);
assert(cache_index == (u1)cache_index, ""); } else {
(*p) = (u1)cache_index; (*bcp) = Bytecodes::_fast_aldc;
assert(cache_index == (u1)cache_index, "index overflow");
(*p) = (u1)cache_index;
}
}
} else {
Bytecodes::Code rewritten_bc =
(is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
if ((*bcp) == rewritten_bc) {
address p = bcp + offset;
int cache_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p);
int pool_index = cp_cache_entry_pool_index(cache_index);
if (is_wide) {
(*bcp) = Bytecodes::_ldc_w;
assert(pool_index == (u2)pool_index, "index overflow");
Bytes::put_Java_u2(p, pool_index);
} else {
(*bcp) = Bytecodes::_ldc;
assert(pool_index == (u1)pool_index, "index overflow");
(*p) = (u1)pool_index;
}
} }
} }
} }
// Rewrites a method given the index_map information // Rewrites a method given the index_map information
void Rewriter::scan_method(methodOop method) { void Rewriter::scan_method(methodOop method, bool reverse) {
int nof_jsrs = 0; int nof_jsrs = 0;
bool has_monitor_bytecodes = false; bool has_monitor_bytecodes = false;
...@@ -233,6 +279,13 @@ void Rewriter::scan_method(methodOop method) { ...@@ -233,6 +279,13 @@ void Rewriter::scan_method(methodOop method) {
? Bytecodes::_fast_linearswitch ? Bytecodes::_fast_linearswitch
: Bytecodes::_fast_binaryswitch : Bytecodes::_fast_binaryswitch
); );
#endif
break;
}
case Bytecodes::_fast_linearswitch:
case Bytecodes::_fast_binaryswitch: {
#ifndef CC_INTERP
(*bcp) = Bytecodes::_lookupswitch;
#endif #endif
break; break;
} }
...@@ -244,16 +297,18 @@ void Rewriter::scan_method(methodOop method) { ...@@ -244,16 +297,18 @@ void Rewriter::scan_method(methodOop method) {
case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : case Bytecodes::_invokestatic :
case Bytecodes::_invokeinterface: case Bytecodes::_invokeinterface:
rewrite_member_reference(bcp, prefix_length+1); rewrite_member_reference(bcp, prefix_length+1, reverse);
break; break;
case Bytecodes::_invokedynamic: case Bytecodes::_invokedynamic:
rewrite_invokedynamic(bcp, prefix_length+1); rewrite_invokedynamic(bcp, prefix_length+1, reverse);
break; break;
case Bytecodes::_ldc: case Bytecodes::_ldc:
maybe_rewrite_ldc(bcp, prefix_length+1, false); case Bytecodes::_fast_aldc:
maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
break; break;
case Bytecodes::_ldc_w: case Bytecodes::_ldc_w:
maybe_rewrite_ldc(bcp, prefix_length+1, true); case Bytecodes::_fast_aldc_w:
maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
break; break;
case Bytecodes::_jsr : // fall through case Bytecodes::_jsr : // fall through
case Bytecodes::_jsr_w : nof_jsrs++; break; case Bytecodes::_jsr_w : nof_jsrs++; break;
...@@ -273,12 +328,13 @@ void Rewriter::scan_method(methodOop method) { ...@@ -273,12 +328,13 @@ void Rewriter::scan_method(methodOop method) {
if (nof_jsrs > 0) { if (nof_jsrs > 0) {
method->set_has_jsrs(); method->set_has_jsrs();
// Second pass will revisit this method. // Second pass will revisit this method.
assert(method->has_jsrs(), ""); assert(method->has_jsrs(), "didn't we just set this?");
} }
} }
// After constant pool is created, revisit methods containing jsrs. // After constant pool is created, revisit methods containing jsrs.
methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) { methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
ResourceMark rm(THREAD);
ResolveOopMapConflicts romc(method); ResolveOopMapConflicts romc(method);
methodHandle original_method = method; methodHandle original_method = method;
method = romc.do_potential_rewrite(CHECK_(methodHandle())); method = romc.do_potential_rewrite(CHECK_(methodHandle()));
...@@ -300,7 +356,6 @@ methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) { ...@@ -300,7 +356,6 @@ methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
return method; return method;
} }
void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) { void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
ResourceMark rm(THREAD); ResourceMark rm(THREAD);
Rewriter rw(klass, klass->constants(), klass->methods(), CHECK); Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
...@@ -343,34 +398,57 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArray ...@@ -343,34 +398,57 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArray
} }
// rewrite methods, in two passes // rewrite methods, in two passes
int i, len = _methods->length(); int len = _methods->length();
for (i = len; --i >= 0; ) { for (int i = len-1; i >= 0; i--) {
methodOop method = (methodOop)_methods->obj_at(i); methodOop method = (methodOop)_methods->obj_at(i);
scan_method(method); scan_method(method);
} }
// allocate constant pool cache, now that we've seen all the bytecodes // allocate constant pool cache, now that we've seen all the bytecodes
make_constant_pool_cache(CHECK); make_constant_pool_cache(THREAD);
// Restore bytecodes to their unrewritten state if there are exceptions
// rewriting bytecodes or allocating the cpCache
if (HAS_PENDING_EXCEPTION) {
restore_bytecodes();
return;
}
}
// Relocate jsr/rets in a method. This can't be done with the rewriter
// stage because it can throw other exceptions, leaving the bytecodes
// pointing at constant pool cache entries.
// Link and check jvmti dependencies while we're iterating over the methods.
// JSR292 code calls with a different set of methods, so two entry points.
void Rewriter::relocate_and_link(instanceKlassHandle this_oop, TRAPS) {
objArrayHandle methods(THREAD, this_oop->methods());
relocate_and_link(this_oop, methods, THREAD);
}
for (i = len; --i >= 0; ) { void Rewriter::relocate_and_link(instanceKlassHandle this_oop,
methodHandle m(THREAD, (methodOop)_methods->obj_at(i)); objArrayHandle methods, TRAPS) {
int len = methods->length();
for (int i = len-1; i >= 0; i--) {
methodHandle m(THREAD, (methodOop)methods->obj_at(i));
if (m->has_jsrs()) { if (m->has_jsrs()) {
m = rewrite_jsrs(m, CHECK); m = rewrite_jsrs(m, CHECK);
// Method might have gotten rewritten. // Method might have gotten rewritten.
_methods->obj_at_put(i, m()); methods->obj_at_put(i, m());
} }
// Set up method entry points for compiler and interpreter. // Set up method entry points for compiler and interpreter .
m->link_method(m, CHECK); m->link_method(m, CHECK);
// This is for JVMTI and unrelated to relocator but the last thing we do
#ifdef ASSERT #ifdef ASSERT
if (StressMethodComparator) { if (StressMethodComparator) {
static int nmc = 0; static int nmc = 0;
for (int j = i; j >= 0 && j >= i-4; j--) { for (int j = i; j >= 0 && j >= i-4; j--) {
if ((++nmc % 1000) == 0) tty->print_cr("Have run MethodComparator %d times...", nmc); if ((++nmc % 1000) == 0) tty->print_cr("Have run MethodComparator %d times...", nmc);
bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j)); bool z = MethodComparator::methods_EMCP(m(),
(methodOop)methods->obj_at(j));
if (j == i && !z) { if (j == i && !z) {
tty->print("MethodComparator FAIL: "); m->print(); m->print_codes(); tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
assert(z, "method must compare equal to itself"); assert(z, "method must compare equal to itself");
......
...@@ -85,13 +85,15 @@ class Rewriter: public StackObj { ...@@ -85,13 +85,15 @@ class Rewriter: public StackObj {
void compute_index_maps(); void compute_index_maps();
void make_constant_pool_cache(TRAPS); void make_constant_pool_cache(TRAPS);
void scan_method(methodOop m); void scan_method(methodOop m, bool reverse = false);
methodHandle rewrite_jsrs(methodHandle m, TRAPS);
void rewrite_Object_init(methodHandle m, TRAPS); void rewrite_Object_init(methodHandle m, TRAPS);
void rewrite_member_reference(address bcp, int offset); void rewrite_member_reference(address bcp, int offset, bool reverse = false);
void rewrite_invokedynamic(address bcp, int offset); void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
void maybe_rewrite_ldc(address bcp, int offset, bool is_wide); void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
// Revert bytecodes in case of an exception.
void restore_bytecodes();
static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
public: public:
// Driver routine: // Driver routine:
static void rewrite(instanceKlassHandle klass, TRAPS); static void rewrite(instanceKlassHandle klass, TRAPS);
...@@ -100,6 +102,13 @@ class Rewriter: public StackObj { ...@@ -100,6 +102,13 @@ class Rewriter: public StackObj {
enum { enum {
_secondary_entry_tag = nth_bit(30) _secondary_entry_tag = nth_bit(30)
}; };
// Second pass, not gated by is_rewritten flag
static void relocate_and_link(instanceKlassHandle klass, TRAPS);
// JSR292 version to call with it's own methods.
static void relocate_and_link(instanceKlassHandle klass,
objArrayHandle methods, TRAPS);
}; };
#endif // SHARE_VM_INTERPRETER_REWRITER_HPP #endif // SHARE_VM_INTERPRETER_REWRITER_HPP
/* /*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -566,11 +566,17 @@ HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( ...@@ -566,11 +566,17 @@ HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
q = n; q = n;
n += _sp->block_size(n); n += _sp->block_size(n);
assert(n > q, assert(n > q,
err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT " _sp = [" PTR_FORMAT "," PTR_FORMAT ")", err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT","
n, last, _sp->bottom(), _sp->end())); " while querying blk_start(" PTR_FORMAT ")"
" on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
n, last, addr, _sp->bottom(), _sp->end()));
} }
assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr)); assert(q <= addr,
assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n)); err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
q, addr));
assert(addr <= n,
err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
addr, n));
return q; return q;
} }
......
...@@ -335,6 +335,9 @@ bool instanceKlass::link_class_impl( ...@@ -335,6 +335,9 @@ bool instanceKlass::link_class_impl(
this_oop->rewrite_class(CHECK_false); this_oop->rewrite_class(CHECK_false);
} }
// relocate jsrs and link methods after they are all rewritten
this_oop->relocate_and_link_methods(CHECK_false);
// Initialize the vtable and interface table after // Initialize the vtable and interface table after
// methods have been rewritten since rewrite may // methods have been rewritten since rewrite may
// fabricate new methodOops. // fabricate new methodOops.
...@@ -365,17 +368,8 @@ bool instanceKlass::link_class_impl( ...@@ -365,17 +368,8 @@ bool instanceKlass::link_class_impl(
// Rewrite the byte codes of all of the methods of a class. // Rewrite the byte codes of all of the methods of a class.
// Three cases:
// During the link of a newly loaded class.
// During the preloading of classes to be written to the shared spaces.
// - Rewrite the methods and update the method entry points.
//
// During the link of a class in the shared spaces.
// - The methods were already rewritten, update the metho entry points.
//
// The rewriter must be called exactly once. Rewriting must happen after // The rewriter must be called exactly once. Rewriting must happen after
// verification but before the first method of the class is executed. // verification but before the first method of the class is executed.
void instanceKlass::rewrite_class(TRAPS) { void instanceKlass::rewrite_class(TRAPS) {
assert(is_loaded(), "must be loaded"); assert(is_loaded(), "must be loaded");
instanceKlassHandle this_oop(THREAD, this->as_klassOop()); instanceKlassHandle this_oop(THREAD, this->as_klassOop());
...@@ -383,10 +377,19 @@ void instanceKlass::rewrite_class(TRAPS) { ...@@ -383,10 +377,19 @@ void instanceKlass::rewrite_class(TRAPS) {
assert(this_oop()->is_shared(), "rewriting an unshared class?"); assert(this_oop()->is_shared(), "rewriting an unshared class?");
return; return;
} }
Rewriter::rewrite(this_oop, CHECK); // No exception can happen here Rewriter::rewrite(this_oop, CHECK);
this_oop->set_rewritten(); this_oop->set_rewritten();
} }
// Now relocate and link method entry points after class is rewritten.
// This is outside is_rewritten flag. In case of an exception, it can be
// executed more than once.
void instanceKlass::relocate_and_link_methods(TRAPS) {
assert(is_loaded(), "must be loaded");
instanceKlassHandle this_oop(THREAD, this->as_klassOop());
Rewriter::relocate_and_link(this_oop, CHECK);
}
void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
// Make sure klass is linked (verified) before initialization // Make sure klass is linked (verified) before initialization
......
...@@ -392,6 +392,7 @@ class instanceKlass: public Klass { ...@@ -392,6 +392,7 @@ class instanceKlass: public Klass {
bool link_class_or_fail(TRAPS); // returns false on failure bool link_class_or_fail(TRAPS); // returns false on failure
void unlink_class(); void unlink_class();
void rewrite_class(TRAPS); void rewrite_class(TRAPS);
void relocate_and_link_methods(TRAPS);
methodOop class_initializer(); methodOop class_initializer();
// set the class to initialized if no static initializer is present // set the class to initialized if no static initializer is present
......
...@@ -693,7 +693,10 @@ void methodOopDesc::unlink_method() { ...@@ -693,7 +693,10 @@ void methodOopDesc::unlink_method() {
// Called when the method_holder is getting linked. Setup entrypoints so the method // Called when the method_holder is getting linked. Setup entrypoints so the method
// is ready to be called from interpreter, compiler, and vtables. // is ready to be called from interpreter, compiler, and vtables.
void methodOopDesc::link_method(methodHandle h_method, TRAPS) { void methodOopDesc::link_method(methodHandle h_method, TRAPS) {
assert(_i2i_entry == NULL, "should only be called once"); // If the code cache is full, we may reenter this function for the
// leftover methods that weren't linked.
if (_i2i_entry != NULL) return;
assert(_adapter == NULL, "init'd to NULL" ); assert(_adapter == NULL, "init'd to NULL" );
assert( _code == NULL, "nothing compiled yet" ); assert( _code == NULL, "nothing compiled yet" );
......
...@@ -992,6 +992,9 @@ jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { ...@@ -992,6 +992,9 @@ jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) {
} }
Rewriter::rewrite(scratch_class, THREAD); Rewriter::rewrite(scratch_class, THREAD);
if (!HAS_PENDING_EXCEPTION) {
Rewriter::relocate_and_link(scratch_class, THREAD);
}
if (HAS_PENDING_EXCEPTION) { if (HAS_PENDING_EXCEPTION) {
Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name(); Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name();
CLEAR_PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION;
......
...@@ -1604,6 +1604,7 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const { ...@@ -1604,6 +1604,7 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const {
objArrayHandle methods(THREAD, m_array); objArrayHandle methods(THREAD, m_array);
methods->obj_at_put(0, m()); methods->obj_at_put(0, m());
Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class. Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty)); // Use fake class.
Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty)); // Use fake class.
// Set the invocation counter's count to the invoke count of the // Set the invocation counter's count to the invoke count of the
// original call site. // original call site.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册