Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
4f7abda8
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4f7abda8
编写于
6月 15, 2011
作者:
N
never
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
fc66c70f
9b9d8cff
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
319 addition
and
145 deletion
+319
-145
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+6
-1
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+127
-66
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+11
-7
src/share/vm/interpreter/rewriter.cpp
src/share/vm/interpreter/rewriter.cpp
+128
-50
src/share/vm/interpreter/rewriter.hpp
src/share/vm/interpreter/rewriter.hpp
+14
-5
src/share/vm/memory/blockOffsetTable.cpp
src/share/vm/memory/blockOffsetTable.cpp
+11
-5
src/share/vm/oops/instanceKlass.cpp
src/share/vm/oops/instanceKlass.cpp
+13
-10
src/share/vm/oops/instanceKlass.hpp
src/share/vm/oops/instanceKlass.hpp
+1
-0
src/share/vm/oops/methodOop.cpp
src/share/vm/oops/methodOop.cpp
+4
-1
src/share/vm/prims/jvmtiRedefineClasses.cpp
src/share/vm/prims/jvmtiRedefineClasses.cpp
+3
-0
src/share/vm/prims/methodHandleWalk.cpp
src/share/vm/prims/methodHandleWalk.cpp
+1
-0
未找到文件。
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
浏览文件 @
4f7abda8
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -407,6 +407,11 @@ class CompactibleFreeListSpace: public CompactibleSpace {
void
save_sweep_limit
()
{
_sweep_limit
=
BlockOffsetArrayUseUnallocatedBlock
?
unallocated_block
()
:
end
();
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print_cr
(
">>>>> Saving sweep limit "
PTR_FORMAT
" for space ["
PTR_FORMAT
","
PTR_FORMAT
") <<<<<<"
,
_sweep_limit
,
bottom
(),
end
());
}
}
NOT_PRODUCT
(
void
clear_sweep_limit
()
{
_sweep_limit
=
NULL
;
}
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
4f7abda8
...
...
@@ -7888,60 +7888,64 @@ SweepClosure::SweepClosure(CMSCollector* collector,
assert
(
_limit
>=
_sp
->
bottom
()
&&
_limit
<=
_sp
->
end
(),
"sweep _limit out of bounds"
);
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print
(
"
\n
====================
\n
Starting new sweep
\n
"
);
gclog_or_tty
->
print_cr
(
"
\n
====================
\n
Starting new sweep with limit "
PTR_FORMAT
,
_limit
);
}
}
// We need this destructor to reclaim any space at the end
// of the space, which do_blk below may not yet have added back to
// the free lists.
void
SweepClosure
::
print_on
(
outputStream
*
st
)
const
{
tty
->
print_cr
(
"_sp = ["
PTR_FORMAT
","
PTR_FORMAT
")"
,
_sp
->
bottom
(),
_sp
->
end
());
tty
->
print_cr
(
"_limit = "
PTR_FORMAT
,
_limit
);
tty
->
print_cr
(
"_freeFinger = "
PTR_FORMAT
,
_freeFinger
);
NOT_PRODUCT
(
tty
->
print_cr
(
"_last_fc = "
PTR_FORMAT
,
_last_fc
);)
tty
->
print_cr
(
"_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d"
,
_inFreeRange
,
_freeRangeInFreeLists
,
_lastFreeRangeCoalesced
);
}
#ifndef PRODUCT
// Assertion checking only: no useful work in product mode --
// however, if any of the flags below become product flags,
// you may need to review this code to see if it needs to be
// enabled in product mode.
SweepClosure
::~
SweepClosure
()
{
assert_lock_strong
(
_freelistLock
);
assert
(
_limit
>=
_sp
->
bottom
()
&&
_limit
<=
_sp
->
end
(),
"sweep _limit out of bounds"
);
// Flush any remaining coterminal free run as a single
// coalesced chunk to the appropriate free list.
if
(
inFreeRange
())
{
assert
(
freeFinger
()
<
_limit
,
"freeFinger points too high"
);
flush_cur_free_chunk
(
freeFinger
(),
pointer_delta
(
_limit
,
freeFinger
()));
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print
(
"Sweep: last chunk: "
);
gclog_or_tty
->
print
(
"put_free_blk 0x%x ("
SIZE_FORMAT
") [coalesced:"
SIZE_FORMAT
"]
\n
"
,
freeFinger
(),
pointer_delta
(
_limit
,
freeFinger
()),
lastFreeRangeCoalesced
());
warning
(
"inFreeRange() should have been reset; dumping state of SweepClosure"
);
print
();
ShouldNotReachHere
();
}
if
(
Verbose
&&
PrintGC
)
{
gclog_or_tty
->
print
(
"Collected "
SIZE_FORMAT
" objects, "
SIZE_FORMAT
" bytes"
,
_numObjectsFreed
,
_numWordsFreed
*
sizeof
(
HeapWord
));
gclog_or_tty
->
print_cr
(
"
\n
Live "
SIZE_FORMAT
" objects, "
SIZE_FORMAT
" bytes "
"Already free "
SIZE_FORMAT
" objects, "
SIZE_FORMAT
" bytes"
,
_numObjectsLive
,
_numWordsLive
*
sizeof
(
HeapWord
),
_numObjectsAlreadyFree
,
_numWordsAlreadyFree
*
sizeof
(
HeapWord
));
size_t
totalBytes
=
(
_numWordsFreed
+
_numWordsLive
+
_numWordsAlreadyFree
)
*
sizeof
(
HeapWord
);
gclog_or_tty
->
print_cr
(
"Total sweep: "
SIZE_FORMAT
" bytes"
,
totalBytes
);
if
(
PrintCMSStatistics
&&
CMSVerifyReturnedBytes
)
{
size_t
indexListReturnedBytes
=
_sp
->
sumIndexedFreeListArrayReturnedBytes
();
size_t
dictReturnedBytes
=
_sp
->
dictionary
()
->
sumDictReturnedBytes
();
size_t
returnedBytes
=
indexListReturnedBytes
+
dictReturnedBytes
;
gclog_or_tty
->
print
(
"Returned "
SIZE_FORMAT
" bytes"
,
returnedBytes
);
gclog_or_tty
->
print
(
" Indexed List Returned "
SIZE_FORMAT
" bytes"
,
indexListReturnedBytes
);
gclog_or_tty
->
print_cr
(
" Dictionary Returned "
SIZE_FORMAT
" bytes"
,
dictReturnedBytes
);
}
}
// else nothing to flush
NOT_PRODUCT
(
if
(
Verbose
&&
PrintGC
)
{
gclog_or_tty
->
print
(
"Collected "
SIZE_FORMAT
" objects, "
SIZE_FORMAT
" bytes"
,
_numObjectsFreed
,
_numWordsFreed
*
sizeof
(
HeapWord
));
gclog_or_tty
->
print_cr
(
"
\n
Live "
SIZE_FORMAT
" objects, "
SIZE_FORMAT
" bytes "
"Already free "
SIZE_FORMAT
" objects, "
SIZE_FORMAT
" bytes"
,
_numObjectsLive
,
_numWordsLive
*
sizeof
(
HeapWord
),
_numObjectsAlreadyFree
,
_numWordsAlreadyFree
*
sizeof
(
HeapWord
));
size_t
totalBytes
=
(
_numWordsFreed
+
_numWordsLive
+
_numWordsAlreadyFree
)
*
sizeof
(
HeapWord
);
gclog_or_tty
->
print_cr
(
"Total sweep: "
SIZE_FORMAT
" bytes"
,
totalBytes
);
if
(
PrintCMSStatistics
&&
CMSVerifyReturnedBytes
)
{
size_t
indexListReturnedBytes
=
_sp
->
sumIndexedFreeListArrayReturnedBytes
();
size_t
dictReturnedBytes
=
_sp
->
dictionary
()
->
sumDictReturnedBytes
();
size_t
returnedBytes
=
indexListReturnedBytes
+
dictReturnedBytes
;
gclog_or_tty
->
print
(
"Returned "
SIZE_FORMAT
" bytes"
,
returnedBytes
);
gclog_or_tty
->
print
(
" Indexed List Returned "
SIZE_FORMAT
" bytes"
,
indexListReturnedBytes
);
gclog_or_tty
->
print_cr
(
" Dictionary Returned "
SIZE_FORMAT
" bytes"
,
dictReturnedBytes
);
}
}
)
// Now, in debug mode, just null out the sweep_limit
NOT_PRODUCT
(
_sp
->
clear_sweep_limit
();)
}
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print
(
"end of sweep
\n
================
\n
"
);
gclog_or_tty
->
print_cr
(
"end of sweep with _limit = "
PTR_FORMAT
"
\n
================"
,
_limit
);
}
}
#endif // PRODUCT
void
SweepClosure
::
initialize_free_range
(
HeapWord
*
freeFinger
,
bool
freeRangeInFreeLists
)
{
...
...
@@ -8001,15 +8005,17 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
// we started the sweep, it may no longer be one because heap expansion
// may have caused us to coalesce the block ending at the address _limit
// with a newly expanded chunk (this happens when _limit was set to the
// previous _end of the space), so we may have stepped past _limit; see CR 6977970.
// previous _end of the space), so we may have stepped past _limit:
// see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
if
(
addr
>=
_limit
)
{
// we have swept up to or past the limit: finish up
assert
(
_limit
>=
_sp
->
bottom
()
&&
_limit
<=
_sp
->
end
(),
"sweep _limit out of bounds"
);
assert
(
addr
<
_sp
->
end
(),
"addr out of bounds"
);
// Flush any
remaining coterminal free run
as a single
// Flush any
free range we might be holding
as a single
// coalesced chunk to the appropriate free list.
if
(
inFreeRange
())
{
assert
(
freeFinger
()
<
_limit
,
"finger points too high"
);
assert
(
freeFinger
()
>=
_sp
->
bottom
()
&&
freeFinger
()
<
_limit
,
err_msg
(
"freeFinger() "
PTR_FORMAT
" is out-of-bounds"
,
freeFinger
()));
flush_cur_free_chunk
(
freeFinger
(),
pointer_delta
(
addr
,
freeFinger
()));
if
(
CMSTraceSweeper
)
{
...
...
@@ -8033,7 +8039,16 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
res
=
fc
->
size
();
do_already_free_chunk
(
fc
);
debug_only
(
_sp
->
verifyFreeLists
());
assert
(
res
==
fc
->
size
(),
"Don't expect the size to change"
);
// If we flush the chunk at hand in lookahead_and_flush()
// and it's coalesced with a preceding chunk, then the
// process of "mangling" the payload of the coalesced block
// will cause erasure of the size information from the
// (erstwhile) header of all the coalesced blocks but the
// first, so the first disjunct in the assert will not hold
// in that specific case (in which case the second disjunct
// will hold).
assert
(
res
==
fc
->
size
()
||
((
HeapWord
*
)
fc
)
+
res
>=
_limit
,
"Otherwise the size info doesn't change at this step"
);
NOT_PRODUCT
(
_numObjectsAlreadyFree
++
;
_numWordsAlreadyFree
+=
res
;
...
...
@@ -8103,7 +8118,7 @@ size_t SweepClosure::do_blk_careful(HeapWord* addr) {
//
void
SweepClosure
::
do_already_free_chunk
(
FreeChunk
*
fc
)
{
size_t
size
=
fc
->
size
();
const
size_t
size
=
fc
->
size
();
// Chunks that cannot be coalesced are not in the
// free lists.
if
(
CMSTestInFreeList
&&
!
fc
->
cantCoalesce
())
{
...
...
@@ -8112,7 +8127,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
}
// a chunk that is already free, should not have been
// marked in the bit map
HeapWord
*
addr
=
(
HeapWord
*
)
fc
;
HeapWord
*
const
addr
=
(
HeapWord
*
)
fc
;
assert
(
!
_bitMap
->
isMarked
(
addr
),
"free chunk should be unmarked"
);
// Verify that the bit map has no bits marked between
// addr and purported end of this block.
...
...
@@ -8149,7 +8164,7 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
}
}
else
{
// the midst of a free range, we are coalescing
debug_only
(
record_free_block_coalesced
(
fc
);)
print_free_block_coalesced
(
fc
);
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print
(
" -- pick up free block 0x%x (%d)
\n
"
,
fc
,
size
);
}
...
...
@@ -8173,6 +8188,10 @@ void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
}
}
}
// Note that if the chunk is not coalescable (the else arm
// below), we unconditionally flush, without needing to do
// a "lookahead," as we do below.
if
(
inFreeRange
())
lookahead_and_flush
(
fc
,
size
);
}
else
{
// Code path common to both original and adaptive free lists.
...
...
@@ -8191,8 +8210,8 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// This is a chunk of garbage. It is not in any free list.
// Add it to a free list or let it possibly be coalesced into
// a larger chunk.
HeapWord
*
addr
=
(
HeapWord
*
)
fc
;
size_t
size
=
CompactibleFreeListSpace
::
adjustObjectSize
(
oop
(
addr
)
->
size
());
HeapWord
*
const
addr
=
(
HeapWord
*
)
fc
;
const
size_t
size
=
CompactibleFreeListSpace
::
adjustObjectSize
(
oop
(
addr
)
->
size
());
if
(
_sp
->
adaptive_freelists
())
{
// Verify that the bit map has no bits marked between
...
...
@@ -8205,7 +8224,6 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// start of a new free range
assert
(
size
>
0
,
"A free range should have a size"
);
initialize_free_range
(
addr
,
false
);
}
else
{
// this will be swept up when we hit the end of the
// free range
...
...
@@ -8235,6 +8253,9 @@ size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
// addr and purported end of just dead object.
_bitMap
->
verifyNoOneBitsInRange
(
addr
+
1
,
addr
+
size
);
}
assert
(
_limit
>=
addr
+
size
,
"A freshly garbage chunk can't possibly straddle over _limit"
);
if
(
inFreeRange
())
lookahead_and_flush
(
fc
,
size
);
return
size
;
}
...
...
@@ -8284,8 +8305,8 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
(
!
_collector
->
should_unload_classes
()
||
oop
(
addr
)
->
is_parsable
()),
"Should be an initialized object"
);
// Note that there are objects used during class redefinition
//
(e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
// Note that there are objects used during class redefinition
,
//
e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
// which are discarded with their is_conc_safe state still
// false. These object may be floating garbage so may be
// seen here. If they are floating garbage their size
...
...
@@ -8307,7 +8328,7 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
size_t
chunkSize
)
{
// do_post_free_or_garbage_chunk() should only be called in the case
// of the adaptive free list allocator.
bool
fcInFreeLists
=
fc
->
isFree
();
const
bool
fcInFreeLists
=
fc
->
isFree
();
assert
(
_sp
->
adaptive_freelists
(),
"Should only be used in this case."
);
assert
((
HeapWord
*
)
fc
<=
_limit
,
"sweep invariant"
);
if
(
CMSTestInFreeList
&&
fcInFreeLists
)
{
...
...
@@ -8318,11 +8339,11 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
gclog_or_tty
->
print_cr
(
" -- pick up another chunk at 0x%x (%d)"
,
fc
,
chunkSize
);
}
HeapWord
*
addr
=
(
HeapWord
*
)
fc
;
HeapWord
*
const
fc_
addr
=
(
HeapWord
*
)
fc
;
bool
coalesce
;
size_t
left
=
pointer_delta
(
addr
,
freeFinger
());
size_t
right
=
chunkSize
;
const
size_t
left
=
pointer_delta
(
fc_
addr
,
freeFinger
());
const
size_t
right
=
chunkSize
;
switch
(
FLSCoalescePolicy
)
{
// numeric value forms a coalition aggressiveness metric
case
0
:
{
// never coalesce
...
...
@@ -8355,15 +8376,15 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
// If the chunk is in a free range and either we decided to coalesce above
// or the chunk is near the large block at the end of the heap
// (isNearLargestChunk() returns true), then coalesce this chunk.
bool
doCoalesce
=
inFreeRange
()
&&
(
coalesce
||
_g
->
isNearLargestChunk
((
HeapWord
*
)
fc
));
const
bool
doCoalesce
=
inFreeRange
()
&&
(
coalesce
||
_g
->
isNearLargestChunk
(
fc_addr
));
if
(
doCoalesce
)
{
// Coalesce the current free range on the left with the new
// chunk on the right. If either is on a free list,
// it must be removed from the list and stashed in the closure.
if
(
freeRangeInFreeLists
())
{
FreeChunk
*
ffc
=
(
FreeChunk
*
)
freeFinger
();
assert
(
ffc
->
size
()
==
pointer_delta
(
addr
,
freeFinger
()),
FreeChunk
*
const
ffc
=
(
FreeChunk
*
)
freeFinger
();
assert
(
ffc
->
size
()
==
pointer_delta
(
fc_
addr
,
freeFinger
()),
"Size of free range is inconsistent with chunk size."
);
if
(
CMSTestInFreeList
)
{
assert
(
_sp
->
verifyChunkInFreeLists
(
ffc
),
...
...
@@ -8380,13 +8401,14 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
_sp
->
removeFreeChunkFromFreeLists
(
fc
);
}
set_lastFreeRangeCoalesced
(
true
);
print_free_block_coalesced
(
fc
);
}
else
{
// not in a free range and/or should not coalesce
// Return the current free range and start a new one.
if
(
inFreeRange
())
{
// In a free range but cannot coalesce with the right hand chunk.
// Put the current free range into the free lists.
flush_cur_free_chunk
(
freeFinger
(),
pointer_delta
(
addr
,
freeFinger
()));
pointer_delta
(
fc_
addr
,
freeFinger
()));
}
// Set up for new free range. Pass along whether the right hand
// chunk is in the free lists.
...
...
@@ -8394,6 +8416,42 @@ void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
}
}
// Lookahead flush:
// If we are tracking a free range, and this is the last chunk that
// we'll look at because its end crosses past _limit, we'll preemptively
// flush it along with any free range we may be holding on to. Note that
// this can be the case only for an already free or freshly garbage
// chunk. If this block is an object, it can never straddle
// over _limit. The "straddling" occurs when _limit is set at
// the previous end of the space when this cycle started, and
// a subsequent heap expansion caused the previously co-terminal
// free block to be coalesced with the newly expanded portion,
// thus rendering _limit a non-block-boundary making it dangerous
// for the sweeper to step over and examine.
void
SweepClosure
::
lookahead_and_flush
(
FreeChunk
*
fc
,
size_t
chunk_size
)
{
assert
(
inFreeRange
(),
"Should only be called if currently in a free range."
);
HeapWord
*
const
eob
=
((
HeapWord
*
)
fc
)
+
chunk_size
;
assert
(
_sp
->
used_region
().
contains
(
eob
-
1
),
err_msg
(
"eob = "
PTR_FORMAT
" out of bounds wrt _sp = ["
PTR_FORMAT
","
PTR_FORMAT
")"
" when examining fc = "
PTR_FORMAT
"("
SIZE_FORMAT
")"
,
_limit
,
_sp
->
bottom
(),
_sp
->
end
(),
fc
,
chunk_size
));
if
(
eob
>=
_limit
)
{
assert
(
eob
==
_limit
||
fc
->
isFree
(),
"Only a free chunk should allow us to cross over the limit"
);
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print_cr
(
"_limit "
PTR_FORMAT
" reached or crossed by block "
"["
PTR_FORMAT
","
PTR_FORMAT
") in space "
"["
PTR_FORMAT
","
PTR_FORMAT
")"
,
_limit
,
fc
,
eob
,
_sp
->
bottom
(),
_sp
->
end
());
}
// Return the storage we are tracking back into the free lists.
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print_cr
(
"Flushing ... "
);
}
assert
(
freeFinger
()
<
eob
,
"Error"
);
flush_cur_free_chunk
(
freeFinger
(),
pointer_delta
(
eob
,
freeFinger
()));
}
}
void
SweepClosure
::
flush_cur_free_chunk
(
HeapWord
*
chunk
,
size_t
size
)
{
assert
(
inFreeRange
(),
"Should only be called if currently in a free range."
);
assert
(
size
>
0
,
...
...
@@ -8419,6 +8477,8 @@ void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
}
_sp
->
addChunkAndRepairOffsetTable
(
chunk
,
size
,
lastFreeRangeCoalesced
());
}
else
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print_cr
(
"Already in free list: nothing to flush"
);
}
set_inFreeRange
(
false
);
set_freeRangeInFreeLists
(
false
);
...
...
@@ -8477,13 +8537,14 @@ void SweepClosure::do_yield_work(HeapWord* addr) {
bool
debug_verifyChunkInFreeLists
(
FreeChunk
*
fc
)
{
return
debug_cms_space
->
verifyChunkInFreeLists
(
fc
);
}
#endif
void
SweepClosure
::
record
_free_block_coalesced
(
FreeChunk
*
fc
)
const
{
void
SweepClosure
::
print
_free_block_coalesced
(
FreeChunk
*
fc
)
const
{
if
(
CMSTraceSweeper
)
{
gclog_or_tty
->
print
(
"Sweep:coal_free_blk 0x%x (%d)
\n
"
,
fc
,
fc
->
size
());
gclog_or_tty
->
print_cr
(
"Sweep:coal_free_blk "
PTR_FORMAT
" ("
SIZE_FORMAT
")"
,
fc
,
fc
->
size
());
}
}
#endif
// CMSIsAliveClosure
bool
CMSIsAliveClosure
::
do_object_b
(
oop
obj
)
{
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
浏览文件 @
4f7abda8
...
...
@@ -1701,9 +1701,9 @@ class SweepClosure: public BlkClosureCareful {
CMSCollector
*
_collector
;
// collector doing the work
ConcurrentMarkSweepGeneration
*
_g
;
// Generation being swept
CompactibleFreeListSpace
*
_sp
;
// Space being swept
HeapWord
*
_limit
;
// the address at
which the sweep should stop because
//
we do not expect blocks eligible for sweeping past
// that address.
HeapWord
*
_limit
;
// the address at
or above which the sweep should stop
//
because we do not expect newly garbage blocks
//
eligible for sweeping past
that address.
Mutex
*
_freelistLock
;
// Free list lock (in space)
CMSBitMap
*
_bitMap
;
// Marking bit map (in
// generation)
...
...
@@ -1750,6 +1750,10 @@ class SweepClosure: public BlkClosureCareful {
void
do_post_free_or_garbage_chunk
(
FreeChunk
*
fc
,
size_t
chunkSize
);
// Process a free chunk during sweeping.
void
do_already_free_chunk
(
FreeChunk
*
fc
);
// Work method called when processing an already free or a
// freshly garbage chunk to do a lookahead and possibly a
// premptive flush if crossing over _limit.
void
lookahead_and_flush
(
FreeChunk
*
fc
,
size_t
chunkSize
);
// Process a garbage chunk during sweeping.
size_t
do_garbage_chunk
(
FreeChunk
*
fc
);
// Process a live chunk during sweeping.
...
...
@@ -1758,8 +1762,6 @@ class SweepClosure: public BlkClosureCareful {
// Accessors.
HeapWord
*
freeFinger
()
const
{
return
_freeFinger
;
}
void
set_freeFinger
(
HeapWord
*
v
)
{
_freeFinger
=
v
;
}
size_t
freeRangeSize
()
const
{
return
_freeRangeSize
;
}
void
set_freeRangeSize
(
size_t
v
)
{
_freeRangeSize
=
v
;
}
bool
inFreeRange
()
const
{
return
_inFreeRange
;
}
void
set_inFreeRange
(
bool
v
)
{
_inFreeRange
=
v
;
}
bool
lastFreeRangeCoalesced
()
const
{
return
_lastFreeRangeCoalesced
;
}
...
...
@@ -1779,14 +1781,16 @@ class SweepClosure: public BlkClosureCareful {
void
do_yield_work
(
HeapWord
*
addr
);
// Debugging/Printing
void
record_free_block_coalesced
(
FreeChunk
*
fc
)
const
PRODUCT_RETURN
;
void
print_free_block_coalesced
(
FreeChunk
*
fc
)
const
;
public:
SweepClosure
(
CMSCollector
*
collector
,
ConcurrentMarkSweepGeneration
*
g
,
CMSBitMap
*
bitMap
,
bool
should_yield
);
~
SweepClosure
();
~
SweepClosure
()
PRODUCT_RETURN
;
size_t
do_blk_careful
(
HeapWord
*
addr
);
void
print
()
const
{
print_on
(
tty
);
}
void
print_on
(
outputStream
*
st
)
const
;
};
// Closures related to weak references processing
...
...
src/share/vm/interpreter/rewriter.cpp
浏览文件 @
4f7abda8
...
...
@@ -63,6 +63,15 @@ void Rewriter::compute_index_maps() {
_have_invoke_dynamic
=
((
tag_mask
&
(
1
<<
JVM_CONSTANT_InvokeDynamic
))
!=
0
);
}
// Unrewrite the bytecodes if an error occurs.
void
Rewriter
::
restore_bytecodes
()
{
int
len
=
_methods
->
length
();
for
(
int
i
=
len
-
1
;
i
>=
0
;
i
--
)
{
methodOop
method
=
(
methodOop
)
_methods
->
obj_at
(
i
);
scan_method
(
method
,
true
);
}
}
// Creates a constant pool cache given a CPC map
void
Rewriter
::
make_constant_pool_cache
(
TRAPS
)
{
...
...
@@ -133,57 +142,94 @@ void Rewriter::rewrite_Object_init(methodHandle method, TRAPS) {
// Rewrite a classfile-order CP index into a native-order CPC index.
void
Rewriter
::
rewrite_member_reference
(
address
bcp
,
int
offset
)
{
void
Rewriter
::
rewrite_member_reference
(
address
bcp
,
int
offset
,
bool
reverse
)
{
address
p
=
bcp
+
offset
;
int
cp_index
=
Bytes
::
get_Java_u2
(
p
);
int
cache_index
=
cp_entry_to_cp_cache
(
cp_index
);
Bytes
::
put_native_u2
(
p
,
cache_index
);
if
(
!
reverse
)
{
int
cp_index
=
Bytes
::
get_Java_u2
(
p
);
int
cache_index
=
cp_entry_to_cp_cache
(
cp_index
);
Bytes
::
put_native_u2
(
p
,
cache_index
);
}
else
{
int
cache_index
=
Bytes
::
get_native_u2
(
p
);
int
pool_index
=
cp_cache_entry_pool_index
(
cache_index
);
Bytes
::
put_Java_u2
(
p
,
pool_index
);
}
}
void
Rewriter
::
rewrite_invokedynamic
(
address
bcp
,
int
offset
)
{
void
Rewriter
::
rewrite_invokedynamic
(
address
bcp
,
int
offset
,
bool
reverse
)
{
address
p
=
bcp
+
offset
;
assert
(
p
[
-
1
]
==
Bytecodes
::
_invokedynamic
,
""
);
int
cp_index
=
Bytes
::
get_Java_u2
(
p
);
int
cpc
=
maybe_add_cp_cache_entry
(
cp_index
);
// add lazily
int
cpc2
=
add_secondary_cp_cache_entry
(
cpc
);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
// CPC-to-CP relation is many-to-one for invokedynamic entries.
// This means we must use a larger index size than u2 to address
// all these entries. That is the main reason invokedynamic
// must have a five-byte instruction format. (Of course, other JVM
// implementations can use the bytes for other purposes.)
Bytes
::
put_native_u4
(
p
,
constantPoolCacheOopDesc
::
encode_secondary_index
(
cpc2
));
// Note: We use native_u4 format exclusively for 4-byte indexes.
assert
(
p
[
-
1
]
==
Bytecodes
::
_invokedynamic
,
"not invokedynamic bytecode"
);
if
(
!
reverse
)
{
int
cp_index
=
Bytes
::
get_Java_u2
(
p
);
int
cpc
=
maybe_add_cp_cache_entry
(
cp_index
);
// add lazily
int
cpc2
=
add_secondary_cp_cache_entry
(
cpc
);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
// CPC-to-CP relation is many-to-one for invokedynamic entries.
// This means we must use a larger index size than u2 to address
// all these entries. That is the main reason invokedynamic
// must have a five-byte instruction format. (Of course, other JVM
// implementations can use the bytes for other purposes.)
Bytes
::
put_native_u4
(
p
,
constantPoolCacheOopDesc
::
encode_secondary_index
(
cpc2
));
// Note: We use native_u4 format exclusively for 4-byte indexes.
}
else
{
int
cache_index
=
constantPoolCacheOopDesc
::
decode_secondary_index
(
Bytes
::
get_native_u4
(
p
));
int
secondary_index
=
cp_cache_secondary_entry_main_index
(
cache_index
);
int
pool_index
=
cp_cache_entry_pool_index
(
secondary_index
);
assert
(
_pool
->
tag_at
(
pool_index
).
is_invoke_dynamic
(),
"wrong index"
);
// zero out 4 bytes
Bytes
::
put_Java_u4
(
p
,
0
);
Bytes
::
put_Java_u2
(
p
,
pool_index
);
}
}
// Rewrite some ldc bytecodes to _fast_aldc
void
Rewriter
::
maybe_rewrite_ldc
(
address
bcp
,
int
offset
,
bool
is_wide
)
{
assert
((
*
bcp
)
==
(
is_wide
?
Bytecodes
::
_ldc_w
:
Bytecodes
::
_ldc
),
""
);
address
p
=
bcp
+
offset
;
int
cp_index
=
is_wide
?
Bytes
::
get_Java_u2
(
p
)
:
(
u1
)(
*
p
);
constantTag
tag
=
_pool
->
tag_at
(
cp_index
).
value
();
if
(
tag
.
is_method_handle
()
||
tag
.
is_method_type
())
{
int
cache_index
=
cp_entry_to_cp_cache
(
cp_index
);
if
(
is_wide
)
{
(
*
bcp
)
=
Bytecodes
::
_fast_aldc_w
;
assert
(
cache_index
==
(
u2
)
cache_index
,
""
);
Bytes
::
put_native_u2
(
p
,
cache_index
);
}
else
{
(
*
bcp
)
=
Bytecodes
::
_fast_aldc
;
assert
(
cache_index
==
(
u1
)
cache_index
,
""
);
(
*
p
)
=
(
u1
)
cache_index
;
void
Rewriter
::
maybe_rewrite_ldc
(
address
bcp
,
int
offset
,
bool
is_wide
,
bool
reverse
)
{
if
(
!
reverse
)
{
assert
((
*
bcp
)
==
(
is_wide
?
Bytecodes
::
_ldc_w
:
Bytecodes
::
_ldc
),
"not ldc bytecode"
);
address
p
=
bcp
+
offset
;
int
cp_index
=
is_wide
?
Bytes
::
get_Java_u2
(
p
)
:
(
u1
)(
*
p
);
constantTag
tag
=
_pool
->
tag_at
(
cp_index
).
value
();
if
(
tag
.
is_method_handle
()
||
tag
.
is_method_type
())
{
int
cache_index
=
cp_entry_to_cp_cache
(
cp_index
);
if
(
is_wide
)
{
(
*
bcp
)
=
Bytecodes
::
_fast_aldc_w
;
assert
(
cache_index
==
(
u2
)
cache_index
,
"index overflow"
);
Bytes
::
put_native_u2
(
p
,
cache_index
);
}
else
{
(
*
bcp
)
=
Bytecodes
::
_fast_aldc
;
assert
(
cache_index
==
(
u1
)
cache_index
,
"index overflow"
);
(
*
p
)
=
(
u1
)
cache_index
;
}
}
}
else
{
Bytecodes
::
Code
rewritten_bc
=
(
is_wide
?
Bytecodes
::
_fast_aldc_w
:
Bytecodes
::
_fast_aldc
);
if
((
*
bcp
)
==
rewritten_bc
)
{
address
p
=
bcp
+
offset
;
int
cache_index
=
is_wide
?
Bytes
::
get_native_u2
(
p
)
:
(
u1
)(
*
p
);
int
pool_index
=
cp_cache_entry_pool_index
(
cache_index
);
if
(
is_wide
)
{
(
*
bcp
)
=
Bytecodes
::
_ldc_w
;
assert
(
pool_index
==
(
u2
)
pool_index
,
"index overflow"
);
Bytes
::
put_Java_u2
(
p
,
pool_index
);
}
else
{
(
*
bcp
)
=
Bytecodes
::
_ldc
;
assert
(
pool_index
==
(
u1
)
pool_index
,
"index overflow"
);
(
*
p
)
=
(
u1
)
pool_index
;
}
}
}
}
// Rewrites a method given the index_map information
void
Rewriter
::
scan_method
(
methodOop
method
)
{
void
Rewriter
::
scan_method
(
methodOop
method
,
bool
reverse
)
{
int
nof_jsrs
=
0
;
bool
has_monitor_bytecodes
=
false
;
...
...
@@ -233,6 +279,13 @@ void Rewriter::scan_method(methodOop method) {
?
Bytecodes
::
_fast_linearswitch
:
Bytecodes
::
_fast_binaryswitch
);
#endif
break
;
}
case
Bytecodes
::
_fast_linearswitch
:
case
Bytecodes
::
_fast_binaryswitch
:
{
#ifndef CC_INTERP
(
*
bcp
)
=
Bytecodes
::
_lookupswitch
;
#endif
break
;
}
...
...
@@ -244,16 +297,18 @@ void Rewriter::scan_method(methodOop method) {
case
Bytecodes
::
_invokespecial
:
// fall through
case
Bytecodes
::
_invokestatic
:
case
Bytecodes
::
_invokeinterface
:
rewrite_member_reference
(
bcp
,
prefix_length
+
1
);
rewrite_member_reference
(
bcp
,
prefix_length
+
1
,
reverse
);
break
;
case
Bytecodes
::
_invokedynamic
:
rewrite_invokedynamic
(
bcp
,
prefix_length
+
1
);
rewrite_invokedynamic
(
bcp
,
prefix_length
+
1
,
reverse
);
break
;
case
Bytecodes
::
_ldc
:
maybe_rewrite_ldc
(
bcp
,
prefix_length
+
1
,
false
);
case
Bytecodes
::
_fast_aldc
:
maybe_rewrite_ldc
(
bcp
,
prefix_length
+
1
,
false
,
reverse
);
break
;
case
Bytecodes
::
_ldc_w
:
maybe_rewrite_ldc
(
bcp
,
prefix_length
+
1
,
true
);
case
Bytecodes
::
_fast_aldc_w
:
maybe_rewrite_ldc
(
bcp
,
prefix_length
+
1
,
true
,
reverse
);
break
;
case
Bytecodes
::
_jsr
:
// fall through
case
Bytecodes
::
_jsr_w
:
nof_jsrs
++
;
break
;
...
...
@@ -273,12 +328,13 @@ void Rewriter::scan_method(methodOop method) {
if
(
nof_jsrs
>
0
)
{
method
->
set_has_jsrs
();
// Second pass will revisit this method.
assert
(
method
->
has_jsrs
(),
""
);
assert
(
method
->
has_jsrs
(),
"
didn't we just set this?
"
);
}
}
// After constant pool is created, revisit methods containing jsrs.
methodHandle
Rewriter
::
rewrite_jsrs
(
methodHandle
method
,
TRAPS
)
{
ResourceMark
rm
(
THREAD
);
ResolveOopMapConflicts
romc
(
method
);
methodHandle
original_method
=
method
;
method
=
romc
.
do_potential_rewrite
(
CHECK_
(
methodHandle
()));
...
...
@@ -300,7 +356,6 @@ methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
return
method
;
}
void
Rewriter
::
rewrite
(
instanceKlassHandle
klass
,
TRAPS
)
{
ResourceMark
rm
(
THREAD
);
Rewriter
rw
(
klass
,
klass
->
constants
(),
klass
->
methods
(),
CHECK
);
...
...
@@ -343,34 +398,57 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArray
}
// rewrite methods, in two passes
int
i
,
len
=
_methods
->
length
();
int
len
=
_methods
->
length
();
for
(
i
=
len
;
--
i
>=
0
;
)
{
for
(
i
nt
i
=
len
-
1
;
i
>=
0
;
i
--
)
{
methodOop
method
=
(
methodOop
)
_methods
->
obj_at
(
i
);
scan_method
(
method
);
}
// allocate constant pool cache, now that we've seen all the bytecodes
make_constant_pool_cache
(
CHECK
);
make_constant_pool_cache
(
THREAD
);
// Restore bytecodes to their unrewritten state if there are exceptions
// rewriting bytecodes or allocating the cpCache
if
(
HAS_PENDING_EXCEPTION
)
{
restore_bytecodes
();
return
;
}
}
// Relocate jsr/rets in a method. This can't be done with the rewriter
// stage because it can throw other exceptions, leaving the bytecodes
// pointing at constant pool cache entries.
// Link and check jvmti dependencies while we're iterating over the methods.
// JSR292 code calls with a different set of methods, so two entry points.
void
Rewriter
::
relocate_and_link
(
instanceKlassHandle
this_oop
,
TRAPS
)
{
objArrayHandle
methods
(
THREAD
,
this_oop
->
methods
());
relocate_and_link
(
this_oop
,
methods
,
THREAD
);
}
for
(
i
=
len
;
--
i
>=
0
;
)
{
methodHandle
m
(
THREAD
,
(
methodOop
)
_methods
->
obj_at
(
i
));
void
Rewriter
::
relocate_and_link
(
instanceKlassHandle
this_oop
,
objArrayHandle
methods
,
TRAPS
)
{
int
len
=
methods
->
length
();
for
(
int
i
=
len
-
1
;
i
>=
0
;
i
--
)
{
methodHandle
m
(
THREAD
,
(
methodOop
)
methods
->
obj_at
(
i
));
if
(
m
->
has_jsrs
())
{
m
=
rewrite_jsrs
(
m
,
CHECK
);
// Method might have gotten rewritten.
_
methods
->
obj_at_put
(
i
,
m
());
methods
->
obj_at_put
(
i
,
m
());
}
// Set up method entry points for compiler and interpreter.
// Set up method entry points for compiler and interpreter
.
m
->
link_method
(
m
,
CHECK
);
// This is for JVMTI and unrelated to relocator but the last thing we do
#ifdef ASSERT
if
(
StressMethodComparator
)
{
static
int
nmc
=
0
;
for
(
int
j
=
i
;
j
>=
0
&&
j
>=
i
-
4
;
j
--
)
{
if
((
++
nmc
%
1000
)
==
0
)
tty
->
print_cr
(
"Have run MethodComparator %d times..."
,
nmc
);
bool
z
=
MethodComparator
::
methods_EMCP
(
m
(),
(
methodOop
)
_methods
->
obj_at
(
j
));
bool
z
=
MethodComparator
::
methods_EMCP
(
m
(),
(
methodOop
)
methods
->
obj_at
(
j
));
if
(
j
==
i
&&
!
z
)
{
tty
->
print
(
"MethodComparator FAIL: "
);
m
->
print
();
m
->
print_codes
();
assert
(
z
,
"method must compare equal to itself"
);
...
...
src/share/vm/interpreter/rewriter.hpp
浏览文件 @
4f7abda8
...
...
@@ -85,13 +85,15 @@ class Rewriter: public StackObj {
void
compute_index_maps
();
void
make_constant_pool_cache
(
TRAPS
);
void
scan_method
(
methodOop
m
);
methodHandle
rewrite_jsrs
(
methodHandle
m
,
TRAPS
);
void
scan_method
(
methodOop
m
,
bool
reverse
=
false
);
void
rewrite_Object_init
(
methodHandle
m
,
TRAPS
);
void
rewrite_member_reference
(
address
bcp
,
int
offset
);
void
rewrite_invokedynamic
(
address
bcp
,
int
offset
);
void
maybe_rewrite_ldc
(
address
bcp
,
int
offset
,
bool
is_wide
);
void
rewrite_member_reference
(
address
bcp
,
int
offset
,
bool
reverse
=
false
);
void
rewrite_invokedynamic
(
address
bcp
,
int
offset
,
bool
reverse
=
false
);
void
maybe_rewrite_ldc
(
address
bcp
,
int
offset
,
bool
is_wide
,
bool
reverse
=
false
);
// Revert bytecodes in case of an exception.
void
restore_bytecodes
();
static
methodHandle
rewrite_jsrs
(
methodHandle
m
,
TRAPS
);
public:
// Driver routine:
static
void
rewrite
(
instanceKlassHandle
klass
,
TRAPS
);
...
...
@@ -100,6 +102,13 @@ class Rewriter: public StackObj {
enum
{
_secondary_entry_tag
=
nth_bit
(
30
)
};
// Second pass, not gated by is_rewritten flag
static
void
relocate_and_link
(
instanceKlassHandle
klass
,
TRAPS
);
// JSR292 version to call with it's own methods.
static
void
relocate_and_link
(
instanceKlassHandle
klass
,
objArrayHandle
methods
,
TRAPS
);
};
#endif // SHARE_VM_INTERPRETER_REWRITER_HPP
src/share/vm/memory/blockOffsetTable.cpp
浏览文件 @
4f7abda8
/*
* Copyright (c) 2000, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -566,11 +566,17 @@ HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
q
=
n
;
n
+=
_sp
->
block_size
(
n
);
assert
(
n
>
q
,
err_msg
(
"Looping at n = "
PTR_FORMAT
" with last = "
PTR_FORMAT
" _sp = ["
PTR_FORMAT
","
PTR_FORMAT
")"
,
n
,
last
,
_sp
->
bottom
(),
_sp
->
end
()));
err_msg
(
"Looping at n = "
PTR_FORMAT
" with last = "
PTR_FORMAT
","
" while querying blk_start("
PTR_FORMAT
")"
" on _sp = ["
PTR_FORMAT
","
PTR_FORMAT
")"
,
n
,
last
,
addr
,
_sp
->
bottom
(),
_sp
->
end
()));
}
assert
(
q
<=
addr
,
err_msg
(
"wrong order for current ("
INTPTR_FORMAT
") <= arg ("
INTPTR_FORMAT
")"
,
q
,
addr
));
assert
(
addr
<=
n
,
err_msg
(
"wrong order for arg ("
INTPTR_FORMAT
") <= next ("
INTPTR_FORMAT
")"
,
addr
,
n
));
assert
(
q
<=
addr
,
err_msg
(
"wrong order for current ("
INTPTR_FORMAT
")"
" <= arg ("
INTPTR_FORMAT
")"
,
q
,
addr
));
assert
(
addr
<=
n
,
err_msg
(
"wrong order for arg ("
INTPTR_FORMAT
") <= next ("
INTPTR_FORMAT
")"
,
addr
,
n
));
return
q
;
}
...
...
src/share/vm/oops/instanceKlass.cpp
浏览文件 @
4f7abda8
...
...
@@ -335,6 +335,9 @@ bool instanceKlass::link_class_impl(
this_oop
->
rewrite_class
(
CHECK_false
);
}
// relocate jsrs and link methods after they are all rewritten
this_oop
->
relocate_and_link_methods
(
CHECK_false
);
// Initialize the vtable and interface table after
// methods have been rewritten since rewrite may
// fabricate new methodOops.
...
...
@@ -365,17 +368,8 @@ bool instanceKlass::link_class_impl(
// Rewrite the byte codes of all of the methods of a class.
// Three cases:
// During the link of a newly loaded class.
// During the preloading of classes to be written to the shared spaces.
// - Rewrite the methods and update the method entry points.
//
// During the link of a class in the shared spaces.
// - The methods were already rewritten, update the metho entry points.
//
// The rewriter must be called exactly once. Rewriting must happen after
// verification but before the first method of the class is executed.
void
instanceKlass
::
rewrite_class
(
TRAPS
)
{
assert
(
is_loaded
(),
"must be loaded"
);
instanceKlassHandle
this_oop
(
THREAD
,
this
->
as_klassOop
());
...
...
@@ -383,10 +377,19 @@ void instanceKlass::rewrite_class(TRAPS) {
assert
(
this_oop
()
->
is_shared
(),
"rewriting an unshared class?"
);
return
;
}
Rewriter
::
rewrite
(
this_oop
,
CHECK
);
// No exception can happen here
Rewriter
::
rewrite
(
this_oop
,
CHECK
);
this_oop
->
set_rewritten
();
}
// Now relocate and link method entry points after class is rewritten.
// This is outside is_rewritten flag. In case of an exception, it can be
// executed more than once.
void
instanceKlass
::
relocate_and_link_methods
(
TRAPS
)
{
assert
(
is_loaded
(),
"must be loaded"
);
instanceKlassHandle
this_oop
(
THREAD
,
this
->
as_klassOop
());
Rewriter
::
relocate_and_link
(
this_oop
,
CHECK
);
}
void
instanceKlass
::
initialize_impl
(
instanceKlassHandle
this_oop
,
TRAPS
)
{
// Make sure klass is linked (verified) before initialization
...
...
src/share/vm/oops/instanceKlass.hpp
浏览文件 @
4f7abda8
...
...
@@ -392,6 +392,7 @@ class instanceKlass: public Klass {
bool
link_class_or_fail
(
TRAPS
);
// returns false on failure
void
unlink_class
();
void
rewrite_class
(
TRAPS
);
void
relocate_and_link_methods
(
TRAPS
);
methodOop
class_initializer
();
// set the class to initialized if no static initializer is present
...
...
src/share/vm/oops/methodOop.cpp
浏览文件 @
4f7abda8
...
...
@@ -693,7 +693,10 @@ void methodOopDesc::unlink_method() {
// Called when the method_holder is getting linked. Setup entrypoints so the method
// is ready to be called from interpreter, compiler, and vtables.
void
methodOopDesc
::
link_method
(
methodHandle
h_method
,
TRAPS
)
{
assert
(
_i2i_entry
==
NULL
,
"should only be called once"
);
// If the code cache is full, we may reenter this function for the
// leftover methods that weren't linked.
if
(
_i2i_entry
!=
NULL
)
return
;
assert
(
_adapter
==
NULL
,
"init'd to NULL"
);
assert
(
_code
==
NULL
,
"nothing compiled yet"
);
...
...
src/share/vm/prims/jvmtiRedefineClasses.cpp
浏览文件 @
4f7abda8
...
...
@@ -992,6 +992,9 @@ jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) {
}
Rewriter
::
rewrite
(
scratch_class
,
THREAD
);
if
(
!
HAS_PENDING_EXCEPTION
)
{
Rewriter
::
relocate_and_link
(
scratch_class
,
THREAD
);
}
if
(
HAS_PENDING_EXCEPTION
)
{
Symbol
*
ex_name
=
PENDING_EXCEPTION
->
klass
()
->
klass_part
()
->
name
();
CLEAR_PENDING_EXCEPTION
;
...
...
src/share/vm/prims/methodHandleWalk.cpp
浏览文件 @
4f7abda8
...
...
@@ -1604,6 +1604,7 @@ methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const {
objArrayHandle
methods
(
THREAD
,
m_array
);
methods
->
obj_at_put
(
0
,
m
());
Rewriter
::
rewrite
(
_target_klass
(),
cpool
,
methods
,
CHECK_
(
empty
));
// Use fake class.
Rewriter
::
relocate_and_link
(
_target_klass
(),
methods
,
CHECK_
(
empty
));
// Use fake class.
// Set the invocation counter's count to the invoke count of the
// original call site.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录