Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
0f0ccf63
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0f0ccf63
编写于
5月 20, 2010
作者:
J
jrose
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
44a04f60
ea49eda6
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
277 addition
and
192 deletion
+277
-192
src/os/linux/vm/os_linux.cpp
src/os/linux/vm/os_linux.cpp
+1
-1
src/share/vm/adlc/formssel.cpp
src/share/vm/adlc/formssel.cpp
+5
-3
src/share/vm/adlc/formssel.hpp
src/share/vm/adlc/formssel.hpp
+3
-3
src/share/vm/adlc/output_c.cpp
src/share/vm/adlc/output_c.cpp
+6
-6
src/share/vm/adlc/output_h.cpp
src/share/vm/adlc/output_h.cpp
+3
-8
src/share/vm/c1/c1_GraphBuilder.cpp
src/share/vm/c1/c1_GraphBuilder.cpp
+5
-1
src/share/vm/code/codeCache.cpp
src/share/vm/code/codeCache.cpp
+21
-3
src/share/vm/code/codeCache.hpp
src/share/vm/code/codeCache.hpp
+2
-0
src/share/vm/code/nmethod.cpp
src/share/vm/code/nmethod.cpp
+15
-27
src/share/vm/code/nmethod.hpp
src/share/vm/code/nmethod.hpp
+2
-7
src/share/vm/compiler/compileBroker.cpp
src/share/vm/compiler/compileBroker.cpp
+14
-1
src/share/vm/opto/addnode.cpp
src/share/vm/opto/addnode.cpp
+0
-65
src/share/vm/opto/addnode.hpp
src/share/vm/opto/addnode.hpp
+0
-1
src/share/vm/opto/cfgnode.cpp
src/share/vm/opto/cfgnode.cpp
+58
-0
src/share/vm/opto/escape.cpp
src/share/vm/opto/escape.cpp
+25
-15
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+3
-0
src/share/vm/runtime/safepoint.cpp
src/share/vm/runtime/safepoint.cpp
+1
-1
src/share/vm/runtime/sweeper.cpp
src/share/vm/runtime/sweeper.cpp
+108
-49
src/share/vm/runtime/sweeper.hpp
src/share/vm/runtime/sweeper.hpp
+5
-1
未找到文件。
src/os/linux/vm/os_linux.cpp
浏览文件 @
0f0ccf63
...
...
@@ -2788,7 +2788,7 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
}
// attach to the region
addr
=
(
char
*
)
shmat
(
shmid
,
NULL
,
0
);
addr
=
(
char
*
)
shmat
(
shmid
,
req_addr
,
0
);
int
err
=
errno
;
// Remove shmid. If shmat() is successful, the actual shared memory segment
...
...
src/share/vm/adlc/formssel.cpp
浏览文件 @
0f0ccf63
...
...
@@ -735,7 +735,7 @@ int InstructForm::memory_operand(FormDict &globals) const {
// This instruction captures the machine-independent bottom_type
// Expected use is for pointer vs oop determination for LoadP
bool
InstructForm
::
captures_bottom_type
()
const
{
bool
InstructForm
::
captures_bottom_type
(
FormDict
&
globals
)
const
{
if
(
_matrule
&&
_matrule
->
_rChild
&&
(
!
strcmp
(
_matrule
->
_rChild
->
_opType
,
"CastPP"
)
||
// new result type
!
strcmp
(
_matrule
->
_rChild
->
_opType
,
"CastX2P"
)
||
// new result type
...
...
@@ -748,6 +748,8 @@ bool InstructForm::captures_bottom_type() const {
else
if
(
is_ideal_load
()
==
Form
::
idealP
)
return
true
;
else
if
(
is_ideal_store
()
!=
Form
::
none
)
return
true
;
if
(
needs_base_oop_edge
(
globals
))
return
true
;
return
false
;
}
...
...
@@ -1061,7 +1063,7 @@ const char *InstructForm::reduce_left(FormDict &globals) const {
// Base class for this instruction, MachNode except for calls
const
char
*
InstructForm
::
mach_base_class
()
const
{
const
char
*
InstructForm
::
mach_base_class
(
FormDict
&
globals
)
const
{
if
(
is_ideal_call
()
==
Form
::
JAVA_STATIC
)
{
return
"MachCallStaticJavaNode"
;
}
...
...
@@ -1092,7 +1094,7 @@ const char *InstructForm::mach_base_class() const {
else
if
(
is_ideal_nop
())
{
return
"MachNopNode"
;
}
else
if
(
captures_bottom_type
())
{
else
if
(
captures_bottom_type
(
globals
))
{
return
"MachTypeNode"
;
}
else
{
return
"MachNode"
;
...
...
src/share/vm/adlc/formssel.hpp
浏览文件 @
0f0ccf63
...
...
@@ -188,7 +188,7 @@ public:
// This instruction captures the machine-independent bottom_type
// Expected use is for pointer vs oop determination for LoadP
virtual
bool
captures_bottom_type
()
const
;
virtual
bool
captures_bottom_type
(
FormDict
&
globals
)
const
;
virtual
const
char
*
cost
();
// Access ins_cost attribute
virtual
uint
num_opnds
();
// Count of num_opnds for MachNode class
...
...
@@ -229,7 +229,7 @@ public:
const
char
*
reduce_left
(
FormDict
&
globals
)
const
;
// Base class for this instruction, MachNode except for calls
virtual
const
char
*
mach_base_class
()
const
;
virtual
const
char
*
mach_base_class
(
FormDict
&
globals
)
const
;
// Check if this instruction can cisc-spill to 'alternate'
bool
cisc_spills_to
(
ArchDesc
&
AD
,
InstructForm
*
alternate
);
...
...
@@ -252,7 +252,7 @@ public:
bool
has_short_branch_form
()
{
return
_short_branch_form
!=
NULL
;
}
// Output short branch prototypes and method bodies
void
declare_short_branch_methods
(
FILE
*
fp_cpp
);
bool
define_short_branch_methods
(
FILE
*
fp_cpp
);
bool
define_short_branch_methods
(
ArchDesc
&
AD
,
FILE
*
fp_cpp
);
uint
alignment
()
{
return
_alignment
;
}
void
set_alignment
(
uint
val
)
{
_alignment
=
val
;
}
...
...
src/share/vm/adlc/output_c.cpp
浏览文件 @
0f0ccf63
...
...
@@ -1382,7 +1382,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
inst_num
,
unmatched_edge
);
}
// If new instruction captures bottom type
if
(
root_form
->
captures_bottom_type
()
)
{
if
(
root_form
->
captures_bottom_type
(
globals
)
)
{
// Get bottom type from instruction whose result we are replacing
fprintf
(
fp
,
" root->_bottom_type = inst%d->bottom_type();
\n
"
,
inst_num
);
}
...
...
@@ -2963,7 +2963,7 @@ void ArchDesc::defineClasses(FILE *fp) {
used
|=
instr
->
define_cisc_version
(
*
this
,
fp
);
// Output code to convert to the short branch version, if applicable
used
|=
instr
->
define_short_branch_methods
(
fp
);
used
|=
instr
->
define_short_branch_methods
(
*
this
,
fp
);
}
// Construct the method called by cisc_version() to copy inputs and operands.
...
...
@@ -3708,7 +3708,7 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
}
// Fill in the bottom_type where requested
if
(
inst
->
captures_bottom_type
()
)
{
if
(
inst
->
captures_bottom_type
(
_globalNames
)
)
{
fprintf
(
fp_cpp
,
"%s node->_bottom_type = _leaf->bottom_type();
\n
"
,
indent
);
}
if
(
inst
->
is_ideal_if
()
)
{
...
...
@@ -3762,7 +3762,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
// Create the MachNode object
fprintf
(
fp_cpp
,
" %sNode *node = new (C) %sNode();
\n
"
,
name
,
name
);
// Fill in the bottom_type where requested
if
(
this
->
captures_bottom_type
()
)
{
if
(
this
->
captures_bottom_type
(
AD
.
globalNames
()
)
)
{
fprintf
(
fp_cpp
,
" node->_bottom_type = bottom_type();
\n
"
);
}
...
...
@@ -3798,7 +3798,7 @@ void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
//---------------------------define_short_branch_methods-----------------------
// Build definitions for short branch methods
bool
InstructForm
::
define_short_branch_methods
(
FILE
*
fp_cpp
)
{
bool
InstructForm
::
define_short_branch_methods
(
ArchDesc
&
AD
,
FILE
*
fp_cpp
)
{
if
(
has_short_branch_form
())
{
InstructForm
*
short_branch
=
short_branch_form
();
const
char
*
name
=
short_branch
->
_ident
;
...
...
@@ -3813,7 +3813,7 @@ bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
fprintf
(
fp_cpp
,
" node->_fcnt = _fcnt;
\n
"
);
}
// Fill in the bottom_type where requested
if
(
this
->
captures_bottom_type
()
)
{
if
(
this
->
captures_bottom_type
(
AD
.
globalNames
()
)
)
{
fprintf
(
fp_cpp
,
" node->_bottom_type = bottom_type();
\n
"
);
}
...
...
src/share/vm/adlc/output_h.cpp
浏览文件 @
0f0ccf63
...
...
@@ -1493,7 +1493,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Build class definition for this instruction
fprintf
(
fp
,
"
\n
"
);
fprintf
(
fp
,
"class %sNode : public %s {
\n
"
,
instr
->
_ident
,
instr
->
mach_base_class
()
);
instr
->
_ident
,
instr
->
mach_base_class
(
_globalNames
)
);
fprintf
(
fp
,
"private:
\n
"
);
fprintf
(
fp
,
" MachOper *_opnd_array[%d];
\n
"
,
instr
->
num_opnds
()
);
if
(
instr
->
is_ideal_jump
()
)
{
...
...
@@ -1566,7 +1566,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Use MachNode::ideal_Opcode() for nodes based on MachNode class
// if the ideal_Opcode == Op_Node.
if
(
strcmp
(
"Node"
,
instr
->
ideal_Opcode
(
_globalNames
))
!=
0
||
strcmp
(
"MachNode"
,
instr
->
mach_base_class
())
!=
0
)
{
strcmp
(
"MachNode"
,
instr
->
mach_base_class
(
_globalNames
))
!=
0
)
{
fprintf
(
fp
,
" virtual int ideal_Opcode() const { return Op_%s; }
\n
"
,
instr
->
ideal_Opcode
(
_globalNames
)
);
}
...
...
@@ -1631,7 +1631,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Use MachNode::oper_input_base() for nodes based on MachNode class
// if the base == 1.
if
(
instr
->
oper_input_base
(
_globalNames
)
!=
1
||
strcmp
(
"MachNode"
,
instr
->
mach_base_class
())
!=
0
)
{
strcmp
(
"MachNode"
,
instr
->
mach_base_class
(
_globalNames
))
!=
0
)
{
fprintf
(
fp
,
" virtual uint oper_input_base() const { return %d; }
\n
"
,
instr
->
oper_input_base
(
_globalNames
));
}
...
...
@@ -1906,11 +1906,6 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf
(
fp
,
" const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN
\n
"
,
offset
,
offset
+
1
,
offset
+
1
);
}
else
if
(
instr
->
needs_base_oop_edge
(
_globalNames
)
)
{
// Special hack for ideal AddP. Bottom type is an oop IFF it has a
// legal base-pointer input. Otherwise it is NOT an oop.
fprintf
(
fp
,
" const Type *bottom_type() const { return AddPNode::mach_bottom_type(this); } // AddP
\n
"
);
}
else
if
(
instr
->
is_tls_instruction
())
{
// Special hack for tlsLoadP
fprintf
(
fp
,
" const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP
\n
"
);
...
...
src/share/vm/c1/c1_GraphBuilder.cpp
浏览文件 @
0f0ccf63
...
...
@@ -2978,7 +2978,11 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
bool
GraphBuilder
::
try_inline_intrinsics
(
ciMethod
*
callee
)
{
if
(
!
InlineNatives
)
INLINE_BAILOUT
(
"intrinsic method inlining disabled"
);
if
(
callee
->
is_synchronized
())
INLINE_BAILOUT
(
"intrinsic method is synchronized"
);
if
(
callee
->
is_synchronized
())
{
// We don't currently support any synchronized intrinsics
return
false
;
}
// callee seems like a good candidate
// determine id
bool
preserves_state
=
false
;
...
...
src/share/vm/code/codeCache.cpp
浏览文件 @
0f0ccf63
...
...
@@ -124,6 +124,23 @@ nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
return
(
nmethod
*
)
cb
;
}
nmethod
*
CodeCache
::
first_nmethod
()
{
assert_locked_or_safepoint
(
CodeCache_lock
);
CodeBlob
*
cb
=
first
();
while
(
cb
!=
NULL
&&
!
cb
->
is_nmethod
())
{
cb
=
next
(
cb
);
}
return
(
nmethod
*
)
cb
;
}
nmethod
*
CodeCache
::
next_nmethod
(
CodeBlob
*
cb
)
{
assert_locked_or_safepoint
(
CodeCache_lock
);
cb
=
next
(
cb
);
while
(
cb
!=
NULL
&&
!
cb
->
is_nmethod
())
{
cb
=
next
(
cb
);
}
return
(
nmethod
*
)
cb
;
}
CodeBlob
*
CodeCache
::
allocate
(
int
size
)
{
// Do not seize the CodeCache lock here--if the caller has not
...
...
@@ -414,7 +431,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
saved
->
set_speculatively_disconnected
(
false
);
saved
->
set_saved_nmethod_link
(
NULL
);
if
(
PrintMethodFlushing
)
{
saved
->
print_on
(
tty
,
" ### nmethod is reconnected"
);
saved
->
print_on
(
tty
,
" ### nmethod is reconnected
\n
"
);
}
if
(
LogCompilation
&&
(
xtty
!=
NULL
))
{
ttyLocker
ttyl
;
...
...
@@ -432,7 +449,8 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
}
void
CodeCache
::
remove_saved_code
(
nmethod
*
nm
)
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
// For conc swpr this will be called with CodeCache_lock taken by caller
assert_locked_or_safepoint
(
CodeCache_lock
);
assert
(
nm
->
is_speculatively_disconnected
(),
"shouldn't call for other nmethods"
);
nmethod
*
saved
=
_saved_nmethods
;
nmethod
*
prev
=
NULL
;
...
...
@@ -463,7 +481,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
nm
->
set_saved_nmethod_link
(
_saved_nmethods
);
_saved_nmethods
=
nm
;
if
(
PrintMethodFlushing
)
{
nm
->
print_on
(
tty
,
" ### nmethod is speculatively disconnected"
);
nm
->
print_on
(
tty
,
" ### nmethod is speculatively disconnected
\n
"
);
}
if
(
LogCompilation
&&
(
xtty
!=
NULL
))
{
ttyLocker
ttyl
;
...
...
src/share/vm/code/codeCache.hpp
浏览文件 @
0f0ccf63
...
...
@@ -102,6 +102,8 @@ class CodeCache : AllStatic {
static
CodeBlob
*
next
(
CodeBlob
*
cb
);
static
CodeBlob
*
alive
(
CodeBlob
*
cb
);
static
nmethod
*
alive_nmethod
(
CodeBlob
*
cb
);
static
nmethod
*
first_nmethod
();
static
nmethod
*
next_nmethod
(
CodeBlob
*
cb
);
static
int
nof_blobs
()
{
return
_number_of_blobs
;
}
// GC support
...
...
src/share/vm/code/nmethod.cpp
浏览文件 @
0f0ccf63
...
...
@@ -1014,9 +1014,7 @@ void nmethod::clear_inline_caches() {
void
nmethod
::
cleanup_inline_caches
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
()
&&
!
CompiledIC_lock
->
is_locked
()
&&
!
Patching_lock
->
is_locked
(),
"no threads must be updating the inline caches by them selfs"
);
assert_locked_or_safepoint
(
CompiledIC_lock
);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
...
...
@@ -1071,7 +1069,6 @@ void nmethod::mark_as_seen_on_stack() {
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
bool
nmethod
::
can_not_entrant_be_converted
()
{
assert
(
is_not_entrant
(),
"must be a non-entrant method"
);
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be called during a safepoint"
);
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
...
...
@@ -1127,7 +1124,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
_method
=
NULL
;
// Clear the method of this dead nmethod
}
// Make the class unloaded - i.e., change state and notify sweeper
check_safepoint
(
);
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be at safepoint"
);
if
(
is_in_use
())
{
// Transitioning directly from live to unloaded -- so
// we need to force a cache clean-up; remember this
...
...
@@ -1220,17 +1217,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert
(
NativeJump
::
instruction_size
==
nmethod
::
_zombie_instruction_size
,
""
);
}
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if
(
state
==
zombie
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be done at safepoint"
);
flush_dependencies
(
NULL
);
}
else
{
assert
(
state
==
not_entrant
,
"other cases may need to be handled differently"
);
}
was_alive
=
is_in_use
();
// Read state under lock
// Change state
...
...
@@ -1241,6 +1227,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
}
// leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if
(
state
==
zombie
)
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
flush_dependencies
(
NULL
);
}
else
{
assert
(
state
==
not_entrant
,
"other cases may need to be handled differently"
);
}
if
(
state
==
not_entrant
)
{
Events
::
log
(
"Make nmethod not entrant "
INTPTR_FORMAT
,
this
);
}
else
{
...
...
@@ -1310,21 +1307,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
return
true
;
}
#ifndef PRODUCT
void
nmethod
::
check_safepoint
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be at safepoint"
);
}
#endif
void
nmethod
::
flush
()
{
// Note that there are no valid oops in the nmethod anymore.
assert
(
is_zombie
()
||
(
is_osr_method
()
&&
is_unloaded
()),
"must be a zombie method"
);
assert
(
is_marked_for_reclamation
()
||
(
is_osr_method
()
&&
is_unloaded
()),
"must be marked for reclamation"
);
assert
(
!
is_locked_by_vm
(),
"locked methods shouldn't be flushed"
);
check_safepoint
(
);
assert_locked_or_safepoint
(
CodeCache_lock
);
// completely deallocate this method
EventMark
m
(
"flushing nmethod "
INTPTR_FORMAT
" %s"
,
this
,
""
);
...
...
@@ -1373,7 +1362,7 @@ void nmethod::flush() {
// notifies instanceKlasses that are reachable
void
nmethod
::
flush_dependencies
(
BoolObjectClosure
*
is_alive
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be done at safepoint"
);
assert
_locked_or_safepoint
(
CodeCache_lock
);
assert
(
Universe
::
heap
()
->
is_gc_active
()
==
(
is_alive
!=
NULL
),
"is_alive is non-NULL if and only if we are called during GC"
);
if
(
!
has_flushed_dependencies
())
{
...
...
@@ -2266,7 +2255,6 @@ void nmethod::print() const {
tty
->
print
(
" for method "
INTPTR_FORMAT
,
(
address
)
method
());
tty
->
print
(
" { "
);
if
(
version
())
tty
->
print
(
"v%d "
,
version
());
if
(
level
())
tty
->
print
(
"l%d "
,
level
());
if
(
is_in_use
())
tty
->
print
(
"in_use "
);
if
(
is_not_entrant
())
tty
->
print
(
"not_entrant "
);
if
(
is_zombie
())
tty
->
print
(
"zombie "
);
...
...
src/share/vm/code/nmethod.hpp
浏览文件 @
0f0ccf63
...
...
@@ -82,7 +82,6 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
struct
nmFlags
{
friend
class
VMStructs
;
unsigned
int
version
:
8
;
// version number (0 = first version)
unsigned
int
level
:
4
;
// optimization level
unsigned
int
age
:
4
;
// age (in # of sweep steps)
unsigned
int
state
:
2
;
// {alive, zombie, unloaded)
...
...
@@ -410,14 +409,13 @@ class nmethod : public CodeBlob {
void
flush_dependencies
(
BoolObjectClosure
*
is_alive
);
bool
has_flushed_dependencies
()
{
return
flags
.
hasFlushedDependencies
;
}
void
set_has_flushed_dependencies
()
{
check_safepoint
();
assert
(
!
has_flushed_dependencies
(),
"should only happen once"
);
flags
.
hasFlushedDependencies
=
1
;
}
bool
is_marked_for_reclamation
()
const
{
return
flags
.
markedForReclamation
;
}
void
mark_for_reclamation
()
{
check_safepoint
();
flags
.
markedForReclamation
=
1
;
}
void
unmark_for_reclamation
()
{
check_safepoint
();
flags
.
markedForReclamation
=
0
;
}
void
mark_for_reclamation
()
{
flags
.
markedForReclamation
=
1
;
}
void
unmark_for_reclamation
()
{
flags
.
markedForReclamation
=
0
;
}
bool
has_unsafe_access
()
const
{
return
flags
.
has_unsafe_access
;
}
void
set_has_unsafe_access
(
bool
z
)
{
flags
.
has_unsafe_access
=
z
;
}
...
...
@@ -428,9 +426,6 @@ class nmethod : public CodeBlob {
bool
is_speculatively_disconnected
()
const
{
return
flags
.
speculatively_disconnected
;
}
void
set_speculatively_disconnected
(
bool
z
)
{
flags
.
speculatively_disconnected
=
z
;
}
int
level
()
const
{
return
flags
.
level
;
}
void
set_level
(
int
newLevel
)
{
check_safepoint
();
flags
.
level
=
newLevel
;
}
int
comp_level
()
const
{
return
_comp_level
;
}
int
version
()
const
{
return
flags
.
version
;
}
...
...
src/share/vm/compiler/compileBroker.cpp
浏览文件 @
0f0ccf63
...
...
@@ -461,12 +461,25 @@ void CompileQueue::add(CompileTask* task) {
//
// Get the next CompileTask from a CompileQueue
CompileTask
*
CompileQueue
::
get
()
{
NMethodSweeper
::
possibly_sweep
();
MutexLocker
locker
(
lock
());
// Wait for an available CompileTask.
while
(
_first
==
NULL
)
{
// There is no work to be done right now. Wait.
lock
()
->
wait
();
if
(
UseCodeCacheFlushing
&&
(
!
CompileBroker
::
should_compile_new_jobs
()
||
CodeCache
::
needs_flushing
()))
{
// During the emergency sweeping periods, wake up and sweep occasionally
bool
timedout
=
lock
()
->
wait
(
!
Mutex
::
_no_safepoint_check_flag
,
NmethodSweepCheckInterval
*
1000
);
if
(
timedout
)
{
MutexUnlocker
ul
(
lock
());
// When otherwise not busy, run nmethod sweeping
NMethodSweeper
::
possibly_sweep
();
}
}
else
{
// During normal operation no need to wake up on timer
lock
()
->
wait
();
}
}
CompileTask
*
task
=
_first
;
...
...
src/share/vm/opto/addnode.cpp
浏览文件 @
0f0ccf63
...
...
@@ -714,71 +714,6 @@ uint AddPNode::match_edge(uint idx) const {
return
idx
>
Base
;
}
//---------------------------mach_bottom_type----------------------------------
// Utility function for use by ADLC. Implements bottom_type for matched AddP.
const
Type
*
AddPNode
::
mach_bottom_type
(
const
MachNode
*
n
)
{
Node
*
base
=
n
->
in
(
Base
);
const
Type
*
t
=
base
->
bottom_type
();
if
(
t
==
Type
::
TOP
)
{
// an untyped pointer
return
TypeRawPtr
::
BOTTOM
;
}
const
TypePtr
*
tp
=
t
->
isa_oopptr
();
if
(
tp
==
NULL
)
return
t
;
if
(
tp
->
_offset
==
TypePtr
::
OffsetBot
)
return
tp
;
// We must carefully add up the various offsets...
intptr_t
offset
=
0
;
const
TypePtr
*
tptr
=
NULL
;
uint
numopnds
=
n
->
num_opnds
();
uint
index
=
n
->
oper_input_base
();
for
(
uint
i
=
1
;
i
<
numopnds
;
i
++
)
{
MachOper
*
opnd
=
n
->
_opnds
[
i
];
// Check for any interesting operand info.
// In particular, check for both memory and non-memory operands.
// %%%%% Clean this up: use xadd_offset
intptr_t
con
=
opnd
->
constant
();
if
(
con
==
TypePtr
::
OffsetBot
)
goto
bottom_out
;
offset
+=
con
;
con
=
opnd
->
constant_disp
();
if
(
con
==
TypePtr
::
OffsetBot
)
goto
bottom_out
;
offset
+=
con
;
if
(
opnd
->
scale
()
!=
0
)
goto
bottom_out
;
// Check each operand input edge. Find the 1 allowed pointer
// edge. Other edges must be index edges; track exact constant
// inputs and otherwise assume the worst.
for
(
uint
j
=
opnd
->
num_edges
();
j
>
0
;
j
--
)
{
Node
*
edge
=
n
->
in
(
index
++
);
const
Type
*
et
=
edge
->
bottom_type
();
const
TypeX
*
eti
=
et
->
isa_intptr_t
();
if
(
eti
==
NULL
)
{
// there must be one pointer among the operands
guarantee
(
tptr
==
NULL
,
"must be only one pointer operand"
);
if
(
UseCompressedOops
&&
Universe
::
narrow_oop_shift
()
==
0
)
{
// 32-bits narrow oop can be the base of address expressions
tptr
=
et
->
make_ptr
()
->
isa_oopptr
();
}
else
{
// only regular oops are expected here
tptr
=
et
->
isa_oopptr
();
}
guarantee
(
tptr
!=
NULL
,
"non-int operand must be pointer"
);
if
(
tptr
->
higher_equal
(
tp
->
add_offset
(
tptr
->
offset
())))
tp
=
tptr
;
// Set more precise type for bailout
continue
;
}
if
(
eti
->
_hi
!=
eti
->
_lo
)
goto
bottom_out
;
offset
+=
eti
->
_lo
;
}
}
guarantee
(
tptr
!=
NULL
,
"must be exactly one pointer operand"
);
return
tptr
->
add_offset
(
offset
);
bottom_out:
return
tp
->
add_offset
(
TypePtr
::
OffsetBot
);
}
//=============================================================================
//------------------------------Identity---------------------------------------
Node
*
OrINode
::
Identity
(
PhaseTransform
*
phase
)
{
...
...
src/share/vm/opto/addnode.hpp
浏览文件 @
0f0ccf63
...
...
@@ -151,7 +151,6 @@ public:
// Do not match base-ptr edge
virtual
uint
match_edge
(
uint
idx
)
const
;
static
const
Type
*
mach_bottom_type
(
const
MachNode
*
n
);
// used by ad_<arch>.hpp
};
//------------------------------OrINode----------------------------------------
...
...
src/share/vm/opto/cfgnode.cpp
浏览文件 @
0f0ccf63
...
...
@@ -1654,6 +1654,64 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if
(
opt
!=
NULL
)
return
opt
;
}
if
(
in
(
1
)
!=
NULL
&&
in
(
1
)
->
Opcode
()
==
Op_AddP
&&
can_reshape
)
{
// Try to undo Phi of AddP:
// (Phi (AddP base base y) (AddP base2 base2 y))
// becomes:
// newbase := (Phi base base2)
// (AddP newbase newbase y)
//
// This occurs as a result of unsuccessful split_thru_phi and
// interferes with taking advantage of addressing modes. See the
// clone_shift_expressions code in matcher.cpp
Node
*
addp
=
in
(
1
);
const
Type
*
type
=
addp
->
in
(
AddPNode
::
Base
)
->
bottom_type
();
Node
*
y
=
addp
->
in
(
AddPNode
::
Offset
);
if
(
y
!=
NULL
&&
addp
->
in
(
AddPNode
::
Base
)
==
addp
->
in
(
AddPNode
::
Address
))
{
// make sure that all the inputs are similar to the first one,
// i.e. AddP with base == address and same offset as first AddP
bool
doit
=
true
;
for
(
uint
i
=
2
;
i
<
req
();
i
++
)
{
if
(
in
(
i
)
==
NULL
||
in
(
i
)
->
Opcode
()
!=
Op_AddP
||
in
(
i
)
->
in
(
AddPNode
::
Base
)
!=
in
(
i
)
->
in
(
AddPNode
::
Address
)
||
in
(
i
)
->
in
(
AddPNode
::
Offset
)
!=
y
)
{
doit
=
false
;
break
;
}
// Accumulate type for resulting Phi
type
=
type
->
meet
(
in
(
i
)
->
in
(
AddPNode
::
Base
)
->
bottom_type
());
}
Node
*
base
=
NULL
;
if
(
doit
)
{
// Check for neighboring AddP nodes in a tree.
// If they have a base, use that it.
for
(
DUIterator_Fast
kmax
,
k
=
this
->
fast_outs
(
kmax
);
k
<
kmax
;
k
++
)
{
Node
*
u
=
this
->
fast_out
(
k
);
if
(
u
->
is_AddP
())
{
Node
*
base2
=
u
->
in
(
AddPNode
::
Base
);
if
(
base2
!=
NULL
&&
!
base2
->
is_top
())
{
if
(
base
==
NULL
)
base
=
base2
;
else
if
(
base
!=
base2
)
{
doit
=
false
;
break
;
}
}
}
}
}
if
(
doit
)
{
if
(
base
==
NULL
)
{
base
=
new
(
phase
->
C
,
in
(
0
)
->
req
())
PhiNode
(
in
(
0
),
type
,
NULL
);
for
(
uint
i
=
1
;
i
<
req
();
i
++
)
{
base
->
init_req
(
i
,
in
(
i
)
->
in
(
AddPNode
::
Base
));
}
phase
->
is_IterGVN
()
->
register_new_node_with_optimizer
(
base
);
}
return
new
(
phase
->
C
,
4
)
AddPNode
(
base
,
base
,
y
);
}
}
}
// Split phis through memory merges, so that the memory merges will go away.
// Piggy-back this transformation on the search for a unique input....
// It will be as if the merged memory is the unique value of the phi.
...
...
src/share/vm/opto/escape.cpp
浏览文件 @
0f0ccf63
...
...
@@ -1989,20 +1989,15 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
case
Op_Allocate
:
{
Node
*
k
=
call
->
in
(
AllocateNode
::
KlassNode
);
const
TypeKlassPtr
*
kt
;
if
(
k
->
Opcode
()
==
Op_LoadKlass
)
{
kt
=
k
->
as_Load
()
->
type
()
->
isa_klassptr
();
}
else
{
// Also works for DecodeN(LoadNKlass).
kt
=
k
->
as_Type
()
->
type
()
->
isa_klassptr
();
}
const
TypeKlassPtr
*
kt
=
k
->
bottom_type
()
->
isa_klassptr
();
assert
(
kt
!=
NULL
,
"TypeKlassPtr required."
);
ciKlass
*
cik
=
kt
->
klass
();
ciInstanceKlass
*
ciik
=
cik
->
as_instance_klass
();
PointsToNode
::
EscapeState
es
;
uint
edge_to
;
if
(
cik
->
is_subclass_of
(
_compile
->
env
()
->
Thread_klass
())
||
ciik
->
has_finalizer
())
{
if
(
cik
->
is_subclass_of
(
_compile
->
env
()
->
Thread_klass
())
||
!
cik
->
is_instance_klass
()
||
// StressReflectiveCode
cik
->
as_instance_klass
()
->
has_finalizer
())
{
es
=
PointsToNode
::
GlobalEscape
;
edge_to
=
_phantom_object
;
// Could not be worse
}
else
{
...
...
@@ -2017,13 +2012,28 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
case
Op_AllocateArray
:
{
int
length
=
call
->
in
(
AllocateNode
::
ALength
)
->
find_int_con
(
-
1
);
if
(
length
<
0
||
length
>
EliminateAllocationArraySizeLimit
)
{
// Not scalar replaceable if the length is not constant or too big.
ptnode_adr
(
call_idx
)
->
_scalar_replaceable
=
false
;
Node
*
k
=
call
->
in
(
AllocateNode
::
KlassNode
);
const
TypeKlassPtr
*
kt
=
k
->
bottom_type
()
->
isa_klassptr
();
assert
(
kt
!=
NULL
,
"TypeKlassPtr required."
);
ciKlass
*
cik
=
kt
->
klass
();
PointsToNode
::
EscapeState
es
;
uint
edge_to
;
if
(
!
cik
->
is_array_klass
())
{
// StressReflectiveCode
es
=
PointsToNode
::
GlobalEscape
;
edge_to
=
_phantom_object
;
}
else
{
es
=
PointsToNode
::
NoEscape
;
edge_to
=
call_idx
;
int
length
=
call
->
in
(
AllocateNode
::
ALength
)
->
find_int_con
(
-
1
);
if
(
length
<
0
||
length
>
EliminateAllocationArraySizeLimit
)
{
// Not scalar replaceable if the length is not constant or too big.
ptnode_adr
(
call_idx
)
->
_scalar_replaceable
=
false
;
}
}
set_escape_state
(
call_idx
,
PointsToNode
::
NoEscape
);
add_pointsto_edge
(
resproj_idx
,
call_idx
);
set_escape_state
(
call_idx
,
es
);
add_pointsto_edge
(
resproj_idx
,
edge_to
);
_processed
.
set
(
resproj_idx
);
break
;
}
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
0f0ccf63
...
...
@@ -2764,6 +2764,9 @@ class CommandLineFlags {
product(intx, NmethodSweepFraction, 4, \
"Number of invocations of sweeper to cover all nmethods") \
\
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
notproduct(intx, MemProfilingInterval, 500, \
"Time between each invocation of the MemProfiler") \
\
...
...
src/share/vm/runtime/safepoint.cpp
浏览文件 @
0f0ccf63
...
...
@@ -472,7 +472,7 @@ void SafepointSynchronize::do_cleanup_tasks() {
}
TraceTime
t4
(
"sweeping nmethods"
,
TraceSafepointCleanupTime
);
NMethodSweeper
::
s
weep
();
NMethodSweeper
::
s
can_stacks
();
}
...
...
src/share/vm/runtime/sweeper.cpp
浏览文件 @
0f0ccf63
...
...
@@ -33,6 +33,8 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we
jint
NMethodSweeper
::
_locked_seen
=
0
;
jint
NMethodSweeper
::
_not_entrant_seen_on_stack
=
0
;
bool
NMethodSweeper
::
_rescan
=
false
;
bool
NMethodSweeper
::
_do_sweep
=
false
;
jint
NMethodSweeper
::
_sweep_started
=
0
;
bool
NMethodSweeper
::
_was_full
=
false
;
jint
NMethodSweeper
::
_advise_to_sweep
=
0
;
jlong
NMethodSweeper
::
_last_was_full
=
0
;
...
...
@@ -50,14 +52,20 @@ public:
};
static
MarkActivationClosure
mark_activation_closure
;
void
NMethodSweeper
::
s
weep
()
{
void
NMethodSweeper
::
s
can_stacks
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"must be executed at a safepoint"
);
if
(
!
MethodFlushing
)
return
;
_do_sweep
=
true
;
// No need to synchronize access, since this is always executed at a
// safepoint. If we aren't in the middle of scan and a rescan
// hasn't been requested then just return.
if
(
_current
==
NULL
&&
!
_rescan
)
return
;
// hasn't been requested then just return. If UseCodeCacheFlushing is on and
// code cache flushing is in progress, don't skip sweeping to help make progress
// clearing space in the code cache.
if
((
_current
==
NULL
&&
!
_rescan
)
&&
!
(
UseCodeCacheFlushing
&&
!
CompileBroker
::
should_compile_new_jobs
()))
{
_do_sweep
=
false
;
return
;
}
// Make sure CompiledIC_lock in unlocked, since we might update some
// inline caches. If it is, we just bail-out and try later.
...
...
@@ -68,7 +76,7 @@ void NMethodSweeper::sweep() {
if
(
_current
==
NULL
)
{
_seen
=
0
;
_invocations
=
NmethodSweepFraction
;
_current
=
CodeCache
::
first
();
_current
=
CodeCache
::
first
_nmethod
();
_traversals
+=
1
;
if
(
PrintMethodFlushing
)
{
tty
->
print_cr
(
"### Sweep: stack traversal %d"
,
_traversals
);
...
...
@@ -81,48 +89,9 @@ void NMethodSweeper::sweep() {
_not_entrant_seen_on_stack
=
0
;
}
if
(
PrintMethodFlushing
&&
Verbose
)
{
tty
->
print_cr
(
"### Sweep at %d out of %d. Invocations left: %d"
,
_seen
,
CodeCache
::
nof_blobs
(),
_invocations
);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int
todo
=
CodeCache
::
nof_blobs
();
if
(
_invocations
!=
1
)
{
todo
=
(
CodeCache
::
nof_blobs
()
-
_seen
)
/
_invocations
;
_invocations
--
;
}
for
(
int
i
=
0
;
i
<
todo
&&
_current
!=
NULL
;
i
++
)
{
CodeBlob
*
next
=
CodeCache
::
next
(
_current
);
// Read next before we potentially delete current
if
(
_current
->
is_nmethod
())
{
process_nmethod
((
nmethod
*
)
_current
);
}
_seen
++
;
_current
=
next
;
}
// Because we could stop on a codeBlob other than an nmethod we skip forward
// to the next nmethod (if any). codeBlobs other than nmethods can be freed
// async to us and make _current invalid while we sleep.
while
(
_current
!=
NULL
&&
!
_current
->
is_nmethod
())
{
_current
=
CodeCache
::
next
(
_current
);
}
if
(
_current
==
NULL
&&
!
_rescan
&&
(
_locked_seen
||
_not_entrant_seen_on_stack
))
{
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if
(
PrintMethodFlushing
)
{
tty
->
print_cr
(
"### Couldn't make progress on some nmethods so stopping sweep"
);
}
}
if
(
UseCodeCacheFlushing
)
{
if
(
!
CodeCache
::
needs_flushing
())
{
//
In
a safepoint, no race with setters
//
scan_stacks() runs during
a safepoint, no race with setters
_advise_to_sweep
=
0
;
}
...
...
@@ -155,13 +124,99 @@ void NMethodSweeper::sweep() {
}
}
void
NMethodSweeper
::
possibly_sweep
()
{
if
((
!
MethodFlushing
)
||
(
!
_do_sweep
))
return
;
if
(
_invocations
>
0
)
{
// Only one thread at a time will sweep
jint
old
=
Atomic
::
cmpxchg
(
1
,
&
_sweep_started
,
0
);
if
(
old
!=
0
)
{
return
;
}
sweep_code_cache
();
}
_sweep_started
=
0
;
}
void
NMethodSweeper
::
sweep_code_cache
()
{
#ifdef ASSERT
jlong
sweep_start
;
if
(
PrintMethodFlushing
)
{
sweep_start
=
os
::
javaTimeMillis
();
}
#endif
if
(
PrintMethodFlushing
&&
Verbose
)
{
tty
->
print_cr
(
"### Sweep at %d out of %d. Invocations left: %d"
,
_seen
,
CodeCache
::
nof_blobs
(),
_invocations
);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int
todo
=
CodeCache
::
nof_blobs
();
if
(
_invocations
>
1
)
{
todo
=
(
CodeCache
::
nof_blobs
()
-
_seen
)
/
_invocations
;
}
// Compilers may check to sweep more often than stack scans happen,
// don't keep trying once it is all scanned
_invocations
--
;
assert
(
!
SafepointSynchronize
::
is_at_safepoint
(),
"should not be in safepoint when we get here"
);
assert
(
!
CodeCache_lock
->
owned_by_self
(),
"just checking"
);
{
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
for
(
int
i
=
0
;
i
<
todo
&&
_current
!=
NULL
;
i
++
)
{
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
// Other blobs can be deleted by other threads
// Read next before we potentially delete current
CodeBlob
*
next
=
CodeCache
::
next_nmethod
(
_current
);
// Now ready to process nmethod and give up CodeCache_lock
{
MutexUnlockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
process_nmethod
((
nmethod
*
)
_current
);
}
_seen
++
;
_current
=
next
;
}
// Skip forward to the next nmethod (if any). Code blobs other than nmethods
// can be freed async to us and make _current invalid while we sleep.
_current
=
CodeCache
::
next_nmethod
(
_current
);
}
if
(
_current
==
NULL
&&
!
_rescan
&&
(
_locked_seen
||
_not_entrant_seen_on_stack
))
{
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if
(
PrintMethodFlushing
)
{
tty
->
print_cr
(
"### Couldn't make progress on some nmethods so stopping sweep"
);
}
}
#ifdef ASSERT
if
(
PrintMethodFlushing
)
{
jlong
sweep_end
=
os
::
javaTimeMillis
();
tty
->
print_cr
(
"### sweeper: sweep time(%d): "
INT64_FORMAT
,
_invocations
,
sweep_end
-
sweep_start
);
}
#endif
}
void
NMethodSweeper
::
process_nmethod
(
nmethod
*
nm
)
{
assert
(
!
CodeCache_lock
->
owned_by_self
(),
"just checking"
);
// Skip methods that are currently referenced by the VM
if
(
nm
->
is_locked_by_vm
())
{
// But still remember to clean-up inline caches for alive nmethods
if
(
nm
->
is_alive
())
{
// Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker
cl
(
CompiledIC_lock
);
nm
->
cleanup_inline_caches
();
}
else
{
_locked_seen
++
;
...
...
@@ -178,6 +233,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
if
(
PrintMethodFlushing
&&
Verbose
)
{
tty
->
print_cr
(
"### Nmethod %3d/"
PTR_FORMAT
" (marked for reclamation) being flushed"
,
nm
->
compile_id
(),
nm
);
}
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nm
->
flush
();
}
else
{
if
(
PrintMethodFlushing
&&
Verbose
)
{
...
...
@@ -197,10 +253,11 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
_rescan
=
true
;
}
else
{
// Still alive, clean up its inline caches
MutexLocker
cl
(
CompiledIC_lock
);
nm
->
cleanup_inline_caches
();
// we coudn't transition this nmethod so don't immediately
// request a rescan. If this method stays on the stack for a
// long time we don't want to keep rescanning
at every safepoint
.
// long time we don't want to keep rescanning
the code cache
.
_not_entrant_seen_on_stack
++
;
}
}
else
if
(
nm
->
is_unloaded
())
{
...
...
@@ -209,6 +266,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
tty
->
print_cr
(
"### Nmethod %3d/"
PTR_FORMAT
" (unloaded) being made zombie"
,
nm
->
compile_id
(),
nm
);
if
(
nm
->
is_osr_method
())
{
// No inline caches will ever point to osr methods, so we can just remove it
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nm
->
flush
();
}
else
{
nm
->
make_zombie
();
...
...
@@ -227,6 +285,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
}
// Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker
cl
(
CompiledIC_lock
);
nm
->
cleanup_inline_caches
();
}
}
...
...
@@ -235,8 +294,8 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// they will call a vm op that comes here. This code attempts to speculatively
// unload the oldest half of the nmethods (based on the compile job id) by
// saving the old code in a list in the CodeCache. Then
// execution resumes. If a method so marked is not called by the second
// s
afepoint from
the current one, the nmethod will be marked non-entrant and
// execution resumes. If a method so marked is not called by the second
sweeper
// s
tack traversal after
the current one, the nmethod will be marked non-entrant and
// got rid of by normal sweeping. If the method is called, the methodOop's
// _code field is restored and the methodOop/nmethod
// go back to their normal state.
...
...
@@ -364,8 +423,8 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
xtty
->
end_elem
();
}
// Shut off compiler. Sweeper will
run exiting from this safepoint
//
and turn it back on if it clears enough space
// Shut off compiler. Sweeper will
start over with a new stack scan and
//
traversal cycle and turn it back on if it clears enough space.
if
(
was_full
())
{
_last_was_full
=
os
::
javaTimeMillis
();
CompileBroker
::
set_should_compile_new_jobs
(
CompileBroker
::
stop_compilation
);
...
...
src/share/vm/runtime/sweeper.hpp
浏览文件 @
0f0ccf63
...
...
@@ -35,6 +35,8 @@ class NMethodSweeper : public AllStatic {
static
bool
_rescan
;
// Indicates that we should do a full rescan of the
// of the code cache looking for work to do.
static
bool
_do_sweep
;
// Flag to skip the conc sweep if no stack scan happened
static
jint
_sweep_started
;
// Flag to control conc sweeper
static
int
_locked_seen
;
// Number of locked nmethods encountered during the scan
static
int
_not_entrant_seen_on_stack
;
// Number of not entrant nmethod were are still on stack
...
...
@@ -48,7 +50,9 @@ class NMethodSweeper : public AllStatic {
public:
static
long
traversal_count
()
{
return
_traversals
;
}
static
void
sweep
();
// Invoked at the end of each safepoint
static
void
scan_stacks
();
// Invoked at the end of each safepoint
static
void
sweep_code_cache
();
// Concurrent part of sweep job
static
void
possibly_sweep
();
// Compiler threads call this to sweep
static
void
notify
(
nmethod
*
nm
)
{
// Perform a full scan of the code cache from the beginning. No
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录