Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
be5b9106
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
be5b9106
编写于
5月 17, 2010
作者:
N
never
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
6950075: nmethod sweeper should operate concurrently
Reviewed-by: never, kvn Contributed-by: eric.caspole@amd.com
上级
bbc9d454
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
171 addition
and
89 deletion
+171
-89
src/share/vm/code/codeCache.cpp
src/share/vm/code/codeCache.cpp
+21
-3
src/share/vm/code/codeCache.hpp
src/share/vm/code/codeCache.hpp
+2
-0
src/share/vm/code/nmethod.cpp
src/share/vm/code/nmethod.cpp
+15
-27
src/share/vm/code/nmethod.hpp
src/share/vm/code/nmethod.hpp
+2
-7
src/share/vm/compiler/compileBroker.cpp
src/share/vm/compiler/compileBroker.cpp
+14
-1
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+3
-0
src/share/vm/runtime/safepoint.cpp
src/share/vm/runtime/safepoint.cpp
+1
-1
src/share/vm/runtime/sweeper.cpp
src/share/vm/runtime/sweeper.cpp
+108
-49
src/share/vm/runtime/sweeper.hpp
src/share/vm/runtime/sweeper.hpp
+5
-1
未找到文件。
src/share/vm/code/codeCache.cpp
浏览文件 @
be5b9106
...
...
@@ -124,6 +124,23 @@ nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
return
(
nmethod
*
)
cb
;
}
nmethod
*
CodeCache
::
first_nmethod
()
{
assert_locked_or_safepoint
(
CodeCache_lock
);
CodeBlob
*
cb
=
first
();
while
(
cb
!=
NULL
&&
!
cb
->
is_nmethod
())
{
cb
=
next
(
cb
);
}
return
(
nmethod
*
)
cb
;
}
nmethod
*
CodeCache
::
next_nmethod
(
CodeBlob
*
cb
)
{
assert_locked_or_safepoint
(
CodeCache_lock
);
cb
=
next
(
cb
);
while
(
cb
!=
NULL
&&
!
cb
->
is_nmethod
())
{
cb
=
next
(
cb
);
}
return
(
nmethod
*
)
cb
;
}
CodeBlob
*
CodeCache
::
allocate
(
int
size
)
{
// Do not seize the CodeCache lock here--if the caller has not
...
...
@@ -414,7 +431,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
saved
->
set_speculatively_disconnected
(
false
);
saved
->
set_saved_nmethod_link
(
NULL
);
if
(
PrintMethodFlushing
)
{
saved
->
print_on
(
tty
,
" ### nmethod is reconnected"
);
saved
->
print_on
(
tty
,
" ### nmethod is reconnected
\n
"
);
}
if
(
LogCompilation
&&
(
xtty
!=
NULL
))
{
ttyLocker
ttyl
;
...
...
@@ -432,7 +449,8 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
}
void
CodeCache
::
remove_saved_code
(
nmethod
*
nm
)
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
// For conc swpr this will be called with CodeCache_lock taken by caller
assert_locked_or_safepoint
(
CodeCache_lock
);
assert
(
nm
->
is_speculatively_disconnected
(),
"shouldn't call for other nmethods"
);
nmethod
*
saved
=
_saved_nmethods
;
nmethod
*
prev
=
NULL
;
...
...
@@ -463,7 +481,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
nm
->
set_saved_nmethod_link
(
_saved_nmethods
);
_saved_nmethods
=
nm
;
if
(
PrintMethodFlushing
)
{
nm
->
print_on
(
tty
,
" ### nmethod is speculatively disconnected"
);
nm
->
print_on
(
tty
,
" ### nmethod is speculatively disconnected
\n
"
);
}
if
(
LogCompilation
&&
(
xtty
!=
NULL
))
{
ttyLocker
ttyl
;
...
...
src/share/vm/code/codeCache.hpp
浏览文件 @
be5b9106
...
...
@@ -102,6 +102,8 @@ class CodeCache : AllStatic {
static
CodeBlob
*
next
(
CodeBlob
*
cb
);
static
CodeBlob
*
alive
(
CodeBlob
*
cb
);
static
nmethod
*
alive_nmethod
(
CodeBlob
*
cb
);
static
nmethod
*
first_nmethod
();
static
nmethod
*
next_nmethod
(
CodeBlob
*
cb
);
static
int
nof_blobs
()
{
return
_number_of_blobs
;
}
// GC support
...
...
src/share/vm/code/nmethod.cpp
浏览文件 @
be5b9106
...
...
@@ -1014,9 +1014,7 @@ void nmethod::clear_inline_caches() {
void
nmethod
::
cleanup_inline_caches
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
()
&&
!
CompiledIC_lock
->
is_locked
()
&&
!
Patching_lock
->
is_locked
(),
"no threads must be updating the inline caches by them selfs"
);
assert_locked_or_safepoint
(
CompiledIC_lock
);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
...
...
@@ -1071,7 +1069,6 @@ void nmethod::mark_as_seen_on_stack() {
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
bool
nmethod
::
can_not_entrant_be_converted
()
{
assert
(
is_not_entrant
(),
"must be a non-entrant method"
);
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be called during a safepoint"
);
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
...
...
@@ -1127,7 +1124,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
_method
=
NULL
;
// Clear the method of this dead nmethod
}
// Make the class unloaded - i.e., change state and notify sweeper
check_safepoint
(
);
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be at safepoint"
);
if
(
is_in_use
())
{
// Transitioning directly from live to unloaded -- so
// we need to force a cache clean-up; remember this
...
...
@@ -1220,17 +1217,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert
(
NativeJump
::
instruction_size
==
nmethod
::
_zombie_instruction_size
,
""
);
}
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if
(
state
==
zombie
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be done at safepoint"
);
flush_dependencies
(
NULL
);
}
else
{
assert
(
state
==
not_entrant
,
"other cases may need to be handled differently"
);
}
was_alive
=
is_in_use
();
// Read state under lock
// Change state
...
...
@@ -1241,6 +1227,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
}
// leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if
(
state
==
zombie
)
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
flush_dependencies
(
NULL
);
}
else
{
assert
(
state
==
not_entrant
,
"other cases may need to be handled differently"
);
}
if
(
state
==
not_entrant
)
{
Events
::
log
(
"Make nmethod not entrant "
INTPTR_FORMAT
,
this
);
}
else
{
...
...
@@ -1310,21 +1307,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
return
true
;
}
#ifndef PRODUCT
void
nmethod
::
check_safepoint
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be at safepoint"
);
}
#endif
void
nmethod
::
flush
()
{
// Note that there are no valid oops in the nmethod anymore.
assert
(
is_zombie
()
||
(
is_osr_method
()
&&
is_unloaded
()),
"must be a zombie method"
);
assert
(
is_marked_for_reclamation
()
||
(
is_osr_method
()
&&
is_unloaded
()),
"must be marked for reclamation"
);
assert
(
!
is_locked_by_vm
(),
"locked methods shouldn't be flushed"
);
check_safepoint
(
);
assert_locked_or_safepoint
(
CodeCache_lock
);
// completely deallocate this method
EventMark
m
(
"flushing nmethod "
INTPTR_FORMAT
" %s"
,
this
,
""
);
...
...
@@ -1373,7 +1362,7 @@ void nmethod::flush() {
// notifies instanceKlasses that are reachable
void
nmethod
::
flush_dependencies
(
BoolObjectClosure
*
is_alive
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be done at safepoint"
);
assert
_locked_or_safepoint
(
CodeCache_lock
);
assert
(
Universe
::
heap
()
->
is_gc_active
()
==
(
is_alive
!=
NULL
),
"is_alive is non-NULL if and only if we are called during GC"
);
if
(
!
has_flushed_dependencies
())
{
...
...
@@ -2266,7 +2255,6 @@ void nmethod::print() const {
tty
->
print
(
" for method "
INTPTR_FORMAT
,
(
address
)
method
());
tty
->
print
(
" { "
);
if
(
version
())
tty
->
print
(
"v%d "
,
version
());
if
(
level
())
tty
->
print
(
"l%d "
,
level
());
if
(
is_in_use
())
tty
->
print
(
"in_use "
);
if
(
is_not_entrant
())
tty
->
print
(
"not_entrant "
);
if
(
is_zombie
())
tty
->
print
(
"zombie "
);
...
...
src/share/vm/code/nmethod.hpp
浏览文件 @
be5b9106
...
...
@@ -82,7 +82,6 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
struct
nmFlags
{
friend
class
VMStructs
;
unsigned
int
version
:
8
;
// version number (0 = first version)
unsigned
int
level
:
4
;
// optimization level
unsigned
int
age
:
4
;
// age (in # of sweep steps)
unsigned
int
state
:
2
;
// {alive, zombie, unloaded)
...
...
@@ -410,14 +409,13 @@ class nmethod : public CodeBlob {
void
flush_dependencies
(
BoolObjectClosure
*
is_alive
);
bool
has_flushed_dependencies
()
{
return
flags
.
hasFlushedDependencies
;
}
void
set_has_flushed_dependencies
()
{
check_safepoint
();
assert
(
!
has_flushed_dependencies
(),
"should only happen once"
);
flags
.
hasFlushedDependencies
=
1
;
}
bool
is_marked_for_reclamation
()
const
{
return
flags
.
markedForReclamation
;
}
void
mark_for_reclamation
()
{
check_safepoint
();
flags
.
markedForReclamation
=
1
;
}
void
unmark_for_reclamation
()
{
check_safepoint
();
flags
.
markedForReclamation
=
0
;
}
void
mark_for_reclamation
()
{
flags
.
markedForReclamation
=
1
;
}
void
unmark_for_reclamation
()
{
flags
.
markedForReclamation
=
0
;
}
bool
has_unsafe_access
()
const
{
return
flags
.
has_unsafe_access
;
}
void
set_has_unsafe_access
(
bool
z
)
{
flags
.
has_unsafe_access
=
z
;
}
...
...
@@ -428,9 +426,6 @@ class nmethod : public CodeBlob {
bool
is_speculatively_disconnected
()
const
{
return
flags
.
speculatively_disconnected
;
}
void
set_speculatively_disconnected
(
bool
z
)
{
flags
.
speculatively_disconnected
=
z
;
}
int
level
()
const
{
return
flags
.
level
;
}
void
set_level
(
int
newLevel
)
{
check_safepoint
();
flags
.
level
=
newLevel
;
}
int
comp_level
()
const
{
return
_comp_level
;
}
int
version
()
const
{
return
flags
.
version
;
}
...
...
src/share/vm/compiler/compileBroker.cpp
浏览文件 @
be5b9106
...
...
@@ -461,13 +461,26 @@ void CompileQueue::add(CompileTask* task) {
//
// Get the next CompileTask from a CompileQueue
CompileTask
*
CompileQueue
::
get
()
{
NMethodSweeper
::
possibly_sweep
();
MutexLocker
locker
(
lock
());
// Wait for an available CompileTask.
while
(
_first
==
NULL
)
{
// There is no work to be done right now. Wait.
if
(
UseCodeCacheFlushing
&&
(
!
CompileBroker
::
should_compile_new_jobs
()
||
CodeCache
::
needs_flushing
()))
{
// During the emergency sweeping periods, wake up and sweep occasionally
bool
timedout
=
lock
()
->
wait
(
!
Mutex
::
_no_safepoint_check_flag
,
NmethodSweepCheckInterval
*
1000
);
if
(
timedout
)
{
MutexUnlocker
ul
(
lock
());
// When otherwise not busy, run nmethod sweeping
NMethodSweeper
::
possibly_sweep
();
}
}
else
{
// During normal operation no need to wake up on timer
lock
()
->
wait
();
}
}
CompileTask
*
task
=
_first
;
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
be5b9106
...
...
@@ -2756,6 +2756,9 @@ class CommandLineFlags {
product(intx, NmethodSweepFraction, 4, \
"Number of invocations of sweeper to cover all nmethods") \
\
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
notproduct(intx, MemProfilingInterval, 500, \
"Time between each invocation of the MemProfiler") \
\
...
...
src/share/vm/runtime/safepoint.cpp
浏览文件 @
be5b9106
...
...
@@ -472,7 +472,7 @@ void SafepointSynchronize::do_cleanup_tasks() {
}
TraceTime
t4
(
"sweeping nmethods"
,
TraceSafepointCleanupTime
);
NMethodSweeper
::
s
weep
();
NMethodSweeper
::
s
can_stacks
();
}
...
...
src/share/vm/runtime/sweeper.cpp
浏览文件 @
be5b9106
...
...
@@ -33,6 +33,8 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we
jint
NMethodSweeper
::
_locked_seen
=
0
;
jint
NMethodSweeper
::
_not_entrant_seen_on_stack
=
0
;
bool
NMethodSweeper
::
_rescan
=
false
;
bool
NMethodSweeper
::
_do_sweep
=
false
;
jint
NMethodSweeper
::
_sweep_started
=
0
;
bool
NMethodSweeper
::
_was_full
=
false
;
jint
NMethodSweeper
::
_advise_to_sweep
=
0
;
jlong
NMethodSweeper
::
_last_was_full
=
0
;
...
...
@@ -50,14 +52,20 @@ public:
};
static
MarkActivationClosure
mark_activation_closure
;
void
NMethodSweeper
::
s
weep
()
{
void
NMethodSweeper
::
s
can_stacks
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be executed at a safepoint"
);
if
(
!
MethodFlushing
)
return
;
_do_sweep
=
true
;
// No need to synchronize access, since this is always executed at a
// safepoint. If we aren't in the middle of scan and a rescan
// hasn't been requested then just return.
if
(
_current
==
NULL
&&
!
_rescan
)
return
;
// hasn't been requested then just return. If UseCodeCacheFlushing is on and
// code cache flushing is in progress, don't skip sweeping to help make progress
// clearing space in the code cache.
if
((
_current
==
NULL
&&
!
_rescan
)
&&
!
(
UseCodeCacheFlushing
&&
!
CompileBroker
::
should_compile_new_jobs
()))
{
_do_sweep
=
false
;
return
;
}
// Make sure CompiledIC_lock in unlocked, since we might update some
// inline caches. If it is, we just bail-out and try later.
...
...
@@ -68,7 +76,7 @@ void NMethodSweeper::sweep() {
if
(
_current
==
NULL
)
{
_seen
=
0
;
_invocations
=
NmethodSweepFraction
;
_current
=
CodeCache
::
first
();
_current
=
CodeCache
::
first
_nmethod
();
_traversals
+=
1
;
if
(
PrintMethodFlushing
)
{
tty
->
print_cr
(
"### Sweep: stack traversal %d"
,
_traversals
);
...
...
@@ -81,48 +89,9 @@ void NMethodSweeper::sweep() {
_not_entrant_seen_on_stack
=
0
;
}
if
(
PrintMethodFlushing
&&
Verbose
)
{
tty
->
print_cr
(
"### Sweep at %d out of %d. Invocations left: %d"
,
_seen
,
CodeCache
::
nof_blobs
(),
_invocations
);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int
todo
=
CodeCache
::
nof_blobs
();
if
(
_invocations
!=
1
)
{
todo
=
(
CodeCache
::
nof_blobs
()
-
_seen
)
/
_invocations
;
_invocations
--
;
}
for
(
int
i
=
0
;
i
<
todo
&&
_current
!=
NULL
;
i
++
)
{
CodeBlob
*
next
=
CodeCache
::
next
(
_current
);
// Read next before we potentially delete current
if
(
_current
->
is_nmethod
())
{
process_nmethod
((
nmethod
*
)
_current
);
}
_seen
++
;
_current
=
next
;
}
// Because we could stop on a codeBlob other than an nmethod we skip forward
// to the next nmethod (if any). codeBlobs other than nmethods can be freed
// async to us and make _current invalid while we sleep.
while
(
_current
!=
NULL
&&
!
_current
->
is_nmethod
())
{
_current
=
CodeCache
::
next
(
_current
);
}
if
(
_current
==
NULL
&&
!
_rescan
&&
(
_locked_seen
||
_not_entrant_seen_on_stack
))
{
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if
(
PrintMethodFlushing
)
{
tty
->
print_cr
(
"### Couldn't make progress on some nmethods so stopping sweep"
);
}
}
if
(
UseCodeCacheFlushing
)
{
if
(
!
CodeCache
::
needs_flushing
())
{
//
In
a safepoint, no race with setters
//
scan_stacks() runs during
a safepoint, no race with setters
_advise_to_sweep
=
0
;
}
...
...
@@ -155,13 +124,99 @@ void NMethodSweeper::sweep() {
}
}
void
NMethodSweeper
::
possibly_sweep
()
{
if
((
!
MethodFlushing
)
||
(
!
_do_sweep
))
return
;
if
(
_invocations
>
0
)
{
// Only one thread at a time will sweep
jint
old
=
Atomic
::
cmpxchg
(
1
,
&
_sweep_started
,
0
);
if
(
old
!=
0
)
{
return
;
}
sweep_code_cache
();
}
_sweep_started
=
0
;
}
void
NMethodSweeper
::
sweep_code_cache
()
{
#ifdef ASSERT
jlong
sweep_start
;
if
(
PrintMethodFlushing
)
{
sweep_start
=
os
::
javaTimeMillis
();
}
#endif
if
(
PrintMethodFlushing
&&
Verbose
)
{
tty
->
print_cr
(
"### Sweep at %d out of %d. Invocations left: %d"
,
_seen
,
CodeCache
::
nof_blobs
(),
_invocations
);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int
todo
=
CodeCache
::
nof_blobs
();
if
(
_invocations
>
1
)
{
todo
=
(
CodeCache
::
nof_blobs
()
-
_seen
)
/
_invocations
;
}
// Compilers may check to sweep more often than stack scans happen,
// don't keep trying once it is all scanned
_invocations
--
;
assert
(
!
SafepointSynchronize
::
is_at_safepoint
(),
"should not be in safepoint when we get here"
);
assert
(
!
CodeCache_lock
->
owned_by_self
(),
"just checking"
);
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
for
(
int
i
=
0
;
i
<
todo
&&
_current
!=
NULL
;
i
++
)
{
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
// Other blobs can be deleted by other threads
// Read next before we potentially delete current
CodeBlob
*
next
=
CodeCache
::
next_nmethod
(
_current
);
// Now ready to process nmethod and give up CodeCache_lock
{
MutexUnlockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
process_nmethod
((
nmethod
*
)
_current
);
}
_seen
++
;
_current
=
next
;
}
// Skip forward to the next nmethod (if any). Code blobs other than nmethods
// can be freed async to us and make _current invalid while we sleep.
_current
=
CodeCache
::
next_nmethod
(
_current
);
}
if
(
_current
==
NULL
&&
!
_rescan
&&
(
_locked_seen
||
_not_entrant_seen_on_stack
))
{
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if
(
PrintMethodFlushing
)
{
tty
->
print_cr
(
"### Couldn't make progress on some nmethods so stopping sweep"
);
}
}
#ifdef ASSERT
if
(
PrintMethodFlushing
)
{
jlong
sweep_end
=
os
::
javaTimeMillis
();
tty
->
print_cr
(
"### sweeper: sweep time(%d): "
INT64_FORMAT
,
_invocations
,
sweep_end
-
sweep_start
);
}
#endif
}
void
NMethodSweeper
::
process_nmethod
(
nmethod
*
nm
)
{
assert
(
!
CodeCache_lock
->
owned_by_self
(),
"just checking"
);
// Skip methods that are currently referenced by the VM
if
(
nm
->
is_locked_by_vm
())
{
// But still remember to clean-up inline caches for alive nmethods
if
(
nm
->
is_alive
())
{
// Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker
cl
(
CompiledIC_lock
);
nm
->
cleanup_inline_caches
();
}
else
{
_locked_seen
++
;
...
...
@@ -178,6 +233,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
if
(
PrintMethodFlushing
&&
Verbose
)
{
tty
->
print_cr
(
"### Nmethod %3d/"
PTR_FORMAT
" (marked for reclamation) being flushed"
,
nm
->
compile_id
(),
nm
);
}
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nm
->
flush
();
}
else
{
if
(
PrintMethodFlushing
&&
Verbose
)
{
...
...
@@ -197,10 +253,11 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
_rescan
=
true
;
}
else
{
// Still alive, clean up its inline caches
MutexLocker
cl
(
CompiledIC_lock
);
nm
->
cleanup_inline_caches
();
// we coudn't transition this nmethod so don't immediately
// request a rescan. If this method stays on the stack for a
// long time we don't want to keep rescanning
at every safepoint
.
// long time we don't want to keep rescanning
the code cache
.
_not_entrant_seen_on_stack
++
;
}
}
else
if
(
nm
->
is_unloaded
())
{
...
...
@@ -209,6 +266,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
tty
->
print_cr
(
"### Nmethod %3d/"
PTR_FORMAT
" (unloaded) being made zombie"
,
nm
->
compile_id
(),
nm
);
if
(
nm
->
is_osr_method
())
{
// No inline caches will ever point to osr methods, so we can just remove it
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nm
->
flush
();
}
else
{
nm
->
make_zombie
();
...
...
@@ -227,6 +285,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
}
// Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker
cl
(
CompiledIC_lock
);
nm
->
cleanup_inline_caches
();
}
}
...
...
@@ -235,8 +294,8 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// they will call a vm op that comes here. This code attempts to speculatively
// unload the oldest half of the nmethods (based on the compile job id) by
// saving the old code in a list in the CodeCache. Then
// execution resumes. If a method so marked is not called by the second
// s
afepoint from
the current one, the nmethod will be marked non-entrant and
// execution resumes. If a method so marked is not called by the second
sweeper
// s
tack traversal after
the current one, the nmethod will be marked non-entrant and
// got rid of by normal sweeping. If the method is called, the methodOop's
// _code field is restored and the methodOop/nmethod
// go back to their normal state.
...
...
@@ -364,8 +423,8 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
xtty
->
end_elem
();
}
// Shut off compiler. Sweeper will
run exiting from this safepoint
//
and turn it back on if it clears enough space
// Shut off compiler. Sweeper will
start over with a new stack scan and
//
traversal cycle and turn it back on if it clears enough space.
if
(
was_full
())
{
_last_was_full
=
os
::
javaTimeMillis
();
CompileBroker
::
set_should_compile_new_jobs
(
CompileBroker
::
stop_compilation
);
...
...
src/share/vm/runtime/sweeper.hpp
浏览文件 @
be5b9106
...
...
@@ -35,6 +35,8 @@ class NMethodSweeper : public AllStatic {
static
bool
_rescan
;
// Indicates that we should do a full rescan of the
// of the code cache looking for work to do.
static
bool
_do_sweep
;
// Flag to skip the conc sweep if no stack scan happened
static
jint
_sweep_started
;
// Flag to control conc sweeper
static
int
_locked_seen
;
// Number of locked nmethods encountered during the scan
static
int
_not_entrant_seen_on_stack
;
// Number of not entrant nmethod were are still on stack
...
...
@@ -48,7 +50,9 @@ class NMethodSweeper : public AllStatic {
public:
static
long
traversal_count
()
{
return
_traversals
;
}
static
void
sweep
();
// Invoked at the end of each safepoint
static
void
scan_stacks
();
// Invoked at the end of each safepoint
static
void
sweep_code_cache
();
// Concurrent part of sweep job
static
void
possibly_sweep
();
// Compiler threads call this to sweep
static
void
notify
(
nmethod
*
nm
)
{
// Perform a full scan of the code cache from the beginning. No
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录