Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
ce070a6e
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
ce070a6e
编写于
4月 26, 2013
作者:
D
dcubed
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
e01799dc
9210c12a
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
491 addition
and
110 deletion
+491
-110
src/share/vm/ci/ciEnv.cpp
src/share/vm/ci/ciEnv.cpp
+2
-1
src/share/vm/classfile/classFileParser.hpp
src/share/vm/classfile/classFileParser.hpp
+14
-2
src/share/vm/classfile/classLoaderData.cpp
src/share/vm/classfile/classLoaderData.cpp
+6
-0
src/share/vm/classfile/dictionary.cpp
src/share/vm/classfile/dictionary.cpp
+0
-13
src/share/vm/oops/constantPool.cpp
src/share/vm/oops/constantPool.cpp
+150
-31
src/share/vm/oops/constantPool.hpp
src/share/vm/oops/constantPool.hpp
+52
-3
src/share/vm/oops/cpCache.cpp
src/share/vm/oops/cpCache.cpp
+2
-1
src/share/vm/oops/instanceKlass.cpp
src/share/vm/oops/instanceKlass.cpp
+28
-26
src/share/vm/oops/instanceKlass.hpp
src/share/vm/oops/instanceKlass.hpp
+14
-7
src/share/vm/prims/jvmtiEnv.cpp
src/share/vm/prims/jvmtiEnv.cpp
+4
-2
src/share/vm/prims/jvmtiRedefineClasses.cpp
src/share/vm/prims/jvmtiRedefineClasses.cpp
+167
-11
src/share/vm/prims/jvmtiRedefineClasses.hpp
src/share/vm/prims/jvmtiRedefineClasses.hpp
+15
-1
src/share/vm/services/memBaseline.cpp
src/share/vm/services/memBaseline.cpp
+29
-7
src/share/vm/services/memBaseline.hpp
src/share/vm/services/memBaseline.hpp
+4
-1
src/share/vm/services/memTracker.cpp
src/share/vm/services/memTracker.cpp
+4
-4
未找到文件。
src/share/vm/ci/ciEnv.cpp
浏览文件 @
ce070a6e
...
...
@@ -483,7 +483,8 @@ ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
{
// We have to lock the cpool to keep the oop from being resolved
// while we are accessing it.
MonitorLockerEx
ml
(
cpool
->
lock
());
oop
cplock
=
cpool
->
lock
();
ObjectLocker
ol
(
cplock
,
THREAD
,
cplock
!=
NULL
);
constantTag
tag
=
cpool
->
tag_at
(
index
);
if
(
tag
.
is_klass
())
{
// The klass has been inserted into the constant pool
...
...
src/share/vm/classfile/classFileParser.hpp
浏览文件 @
ce070a6e
...
...
@@ -304,7 +304,19 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
inline
void
assert_property
(
bool
b
,
const
char
*
msg
,
TRAPS
)
{
#ifdef ASSERT
if
(
!
b
)
{
fatal
(
msg
);
}
if
(
!
b
)
{
ResourceMark
rm
(
THREAD
);
fatal
(
err_msg
(
msg
,
_class_name
->
as_C_string
()));
}
#endif
}
inline
void
assert_property
(
bool
b
,
const
char
*
msg
,
int
index
,
TRAPS
)
{
#ifdef ASSERT
if
(
!
b
)
{
ResourceMark
rm
(
THREAD
);
fatal
(
err_msg
(
msg
,
index
,
_class_name
->
as_C_string
()));
}
#endif
}
...
...
@@ -312,7 +324,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
if
(
_need_verify
)
{
guarantee_property
(
property
,
msg
,
index
,
CHECK
);
}
else
{
assert_property
(
property
,
msg
,
CHECK
);
assert_property
(
property
,
msg
,
index
,
CHECK
);
}
}
...
...
src/share/vm/classfile/classLoaderData.cpp
浏览文件 @
ce070a6e
...
...
@@ -277,6 +277,9 @@ void ClassLoaderData::remove_class(Klass* scratch_class) {
void
ClassLoaderData
::
unload
()
{
_unloading
=
true
;
// Tell serviceability tools these classes are unloading
classes_do
(
InstanceKlass
::
notify_unload_class
);
if
(
TraceClassLoaderData
)
{
ResourceMark
rm
;
tty
->
print
(
"[ClassLoaderData: unload loader data "
PTR_FORMAT
,
this
);
...
...
@@ -300,6 +303,9 @@ bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
ClassLoaderData
::~
ClassLoaderData
()
{
// Release C heap structures for all the classes.
classes_do
(
InstanceKlass
::
release_C_heap_structures
);
Metaspace
*
m
=
_metaspace
;
if
(
m
!=
NULL
)
{
_metaspace
=
NULL
;
...
...
src/share/vm/classfile/dictionary.cpp
浏览文件 @
ce070a6e
...
...
@@ -27,7 +27,6 @@
#include "classfile/systemDictionary.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "services/classLoadingService.hpp"
#include "utilities/hashtable.inline.hpp"
...
...
@@ -156,19 +155,7 @@ bool Dictionary::do_unloading() {
if
(
k_def_class_loader_data
==
loader_data
)
{
// This is the defining entry, so the referred class is about
// to be unloaded.
// Notify the debugger and clean up the class.
class_was_unloaded
=
true
;
// notify the debugger
if
(
JvmtiExport
::
should_post_class_unload
())
{
JvmtiExport
::
post_class_unload
(
ik
);
}
// notify ClassLoadingService of class unload
ClassLoadingService
::
notify_class_unloaded
(
ik
);
// Clean up C heap
ik
->
release_C_heap_structures
();
ik
->
constants
()
->
release_C_heap_structures
();
}
// Also remove this system dictionary entry.
purge_entry
=
true
;
...
...
src/share/vm/oops/constantPool.cpp
浏览文件 @
ce070a6e
...
...
@@ -40,6 +40,7 @@
#include "runtime/init.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/signature.hpp"
#include "runtime/synchronizer.hpp"
#include "runtime/vframe.hpp"
ConstantPool
*
ConstantPool
::
allocate
(
ClassLoaderData
*
loader_data
,
int
length
,
TRAPS
)
{
...
...
@@ -69,7 +70,6 @@ ConstantPool::ConstantPool(Array<u1>* tags) {
// only set to non-zero if constant pool is merged by RedefineClasses
set_version
(
0
);
set_lock
(
new
Monitor
(
Monitor
::
nonleaf
+
2
,
"A constant pool lock"
));
// initialize tag array
int
length
=
tags
->
length
();
...
...
@@ -95,9 +95,6 @@ void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
void
ConstantPool
::
release_C_heap_structures
()
{
// walk constant pool and decrement symbol reference counts
unreference_symbols
();
delete
_lock
;
set_lock
(
NULL
);
}
objArrayOop
ConstantPool
::
resolved_references
()
const
{
...
...
@@ -154,9 +151,6 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
ClassLoaderData
*
loader_data
=
pool_holder
()
->
class_loader_data
();
set_resolved_references
(
loader_data
->
add_handle
(
refs_handle
));
}
// Also need to recreate the mutex. Make sure this matches the constructor
set_lock
(
new
Monitor
(
Monitor
::
nonleaf
+
2
,
"A constant pool lock"
));
}
}
...
...
@@ -167,7 +161,23 @@ void ConstantPool::remove_unshareable_info() {
set_resolved_reference_length
(
resolved_references
()
!=
NULL
?
resolved_references
()
->
length
()
:
0
);
set_resolved_references
(
NULL
);
set_lock
(
NULL
);
}
oop
ConstantPool
::
lock
()
{
if
(
_pool_holder
)
{
// We re-use the _pool_holder's init_lock to reduce footprint.
// Notes on deadlocks:
// [1] This lock is a Java oop, so it can be recursively locked by
// the same thread without self-deadlocks.
// [2] Deadlock will happen if there is circular dependency between
// the <clinit> of two Java classes. However, in this case,
// the deadlock would have happened long before we reach
// ConstantPool::lock(), so reusing init_lock does not
// increase the possibility of deadlock.
return
_pool_holder
->
init_lock
();
}
else
{
return
NULL
;
}
}
int
ConstantPool
::
cp_to_object_index
(
int
cp_index
)
{
...
...
@@ -208,7 +218,9 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
Symbol
*
name
=
NULL
;
Handle
loader
;
{
MonitorLockerEx
ml
(
this_oop
->
lock
());
{
oop
cplock
=
this_oop
->
lock
();
ObjectLocker
ol
(
cplock
,
THREAD
,
cplock
!=
NULL
);
if
(
this_oop
->
tag_at
(
which
).
is_unresolved_klass
())
{
if
(
this_oop
->
tag_at
(
which
).
is_unresolved_klass_in_error
())
{
...
...
@@ -255,7 +267,8 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
bool
throw_orig_error
=
false
;
{
MonitorLockerEx
ml
(
this_oop
->
lock
());
oop
cplock
=
this_oop
->
lock
();
ObjectLocker
ol
(
cplock
,
THREAD
,
cplock
!=
NULL
);
// some other thread has beaten us and has resolved the class.
if
(
this_oop
->
tag_at
(
which
).
is_klass
())
{
...
...
@@ -323,7 +336,8 @@ Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS
}
return
k
();
}
else
{
MonitorLockerEx
ml
(
this_oop
->
lock
());
oop
cplock
=
this_oop
->
lock
();
ObjectLocker
ol
(
cplock
,
THREAD
,
cplock
!=
NULL
);
// Only updated constant pool - if it is resolved.
do_resolve
=
this_oop
->
tag_at
(
which
).
is_unresolved_klass
();
if
(
do_resolve
)
{
...
...
@@ -619,7 +633,8 @@ void ConstantPool::save_and_throw_exception(constantPoolHandle this_oop, int whi
int
tag
,
TRAPS
)
{
ResourceMark
rm
;
Symbol
*
error
=
PENDING_EXCEPTION
->
klass
()
->
name
();
MonitorLockerEx
ml
(
this_oop
->
lock
());
// lock cpool to change tag.
oop
cplock
=
this_oop
->
lock
();
ObjectLocker
ol
(
cplock
,
THREAD
,
cplock
!=
NULL
);
// lock cpool to change tag.
int
error_tag
=
(
tag
==
JVM_CONSTANT_MethodHandle
)
?
JVM_CONSTANT_MethodHandleInError
:
JVM_CONSTANT_MethodTypeInError
;
...
...
@@ -780,7 +795,8 @@ oop ConstantPool::resolve_constant_at_impl(constantPoolHandle this_oop, int inde
if
(
cache_index
>=
0
)
{
// Cache the oop here also.
Handle
result_handle
(
THREAD
,
result_oop
);
MonitorLockerEx
ml
(
this_oop
->
lock
());
// don't know if we really need this
oop
cplock
=
this_oop
->
lock
();
ObjectLocker
ol
(
cplock
,
THREAD
,
cplock
!=
NULL
);
// don't know if we really need this
oop
result
=
this_oop
->
resolved_references
()
->
obj_at
(
cache_index
);
// Benign race condition: resolved_references may already be filled in while we were trying to lock.
// The important thing here is that all threads pick up the same result.
...
...
@@ -1043,24 +1059,13 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
case
JVM_CONSTANT_InvokeDynamic
:
{
int
k1
=
invoke_dynamic_bootstrap_method_ref_index_at
(
index1
);
int
k2
=
cp2
->
invoke_dynamic_bootstrap_method_ref_index_at
(
index2
);
bool
match
=
compare_entry_to
(
k1
,
cp2
,
k2
,
CHECK_false
);
if
(
!
match
)
return
false
;
k1
=
invoke_dynamic_name_and_type_ref_index_at
(
index1
);
k2
=
cp2
->
invoke_dynamic_name_and_type_ref_index_at
(
index2
);
match
=
compare_entry_to
(
k1
,
cp2
,
k2
,
CHECK_false
);
if
(
!
match
)
return
false
;
int
argc
=
invoke_dynamic_argument_count_at
(
index1
);
if
(
argc
==
cp2
->
invoke_dynamic_argument_count_at
(
index2
))
{
for
(
int
j
=
0
;
j
<
argc
;
j
++
)
{
k1
=
invoke_dynamic_argument_index_at
(
index1
,
j
);
k2
=
cp2
->
invoke_dynamic_argument_index_at
(
index2
,
j
);
match
=
compare_entry_to
(
k1
,
cp2
,
k2
,
CHECK_false
);
if
(
!
match
)
return
false
;
}
return
true
;
// got through loop; all elements equal
}
int
k1
=
invoke_dynamic_name_and_type_ref_index_at
(
index1
);
int
k2
=
cp2
->
invoke_dynamic_name_and_type_ref_index_at
(
index2
);
int
i1
=
invoke_dynamic_bootstrap_specifier_index
(
index1
);
int
i2
=
cp2
->
invoke_dynamic_bootstrap_specifier_index
(
index2
);
bool
match
=
compare_entry_to
(
k1
,
cp2
,
k2
,
CHECK_false
)
&&
compare_operand_to
(
i1
,
cp2
,
i2
,
CHECK_false
);
return
match
;
}
break
;
case
JVM_CONSTANT_String
:
...
...
@@ -1095,6 +1100,80 @@ bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
}
// end compare_entry_to()
// Resize the operands array with delta_len and delta_size.
// Used in RedefineClasses for CP merge.
void
ConstantPool
::
resize_operands
(
int
delta_len
,
int
delta_size
,
TRAPS
)
{
int
old_len
=
operand_array_length
(
operands
());
int
new_len
=
old_len
+
delta_len
;
int
min_len
=
(
delta_len
>
0
)
?
old_len
:
new_len
;
int
old_size
=
operands
()
->
length
();
int
new_size
=
old_size
+
delta_size
;
int
min_size
=
(
delta_size
>
0
)
?
old_size
:
new_size
;
ClassLoaderData
*
loader_data
=
pool_holder
()
->
class_loader_data
();
Array
<
u2
>*
new_ops
=
MetadataFactory
::
new_array
<
u2
>
(
loader_data
,
new_size
,
CHECK
);
// Set index in the resized array for existing elements only
for
(
int
idx
=
0
;
idx
<
min_len
;
idx
++
)
{
int
offset
=
operand_offset_at
(
idx
);
// offset in original array
operand_offset_at_put
(
new_ops
,
idx
,
offset
+
2
*
delta_len
);
// offset in resized array
}
// Copy the bootstrap specifiers only
Copy
::
conjoint_memory_atomic
(
operands
()
->
adr_at
(
2
*
old_len
),
new_ops
->
adr_at
(
2
*
new_len
),
(
min_size
-
2
*
min_len
)
*
sizeof
(
u2
));
// Explicitly deallocate old operands array.
// Note, it is not needed for 7u backport.
if
(
operands
()
!=
NULL
)
{
// the safety check
MetadataFactory
::
free_array
<
u2
>
(
loader_data
,
operands
());
}
set_operands
(
new_ops
);
}
// end resize_operands()
// Extend the operands array with the length and size of the ext_cp operands.
// Used in RedefineClasses for CP merge.
void
ConstantPool
::
extend_operands
(
constantPoolHandle
ext_cp
,
TRAPS
)
{
int
delta_len
=
operand_array_length
(
ext_cp
->
operands
());
if
(
delta_len
==
0
)
{
return
;
// nothing to do
}
int
delta_size
=
ext_cp
->
operands
()
->
length
();
assert
(
delta_len
>
0
&&
delta_size
>
0
,
"extended operands array must be bigger"
);
if
(
operand_array_length
(
operands
())
==
0
)
{
ClassLoaderData
*
loader_data
=
pool_holder
()
->
class_loader_data
();
Array
<
u2
>*
new_ops
=
MetadataFactory
::
new_array
<
u2
>
(
loader_data
,
delta_size
,
CHECK
);
// The first element index defines the offset of second part
operand_offset_at_put
(
new_ops
,
0
,
2
*
delta_len
);
// offset in new array
set_operands
(
new_ops
);
}
else
{
resize_operands
(
delta_len
,
delta_size
,
CHECK
);
}
}
// end extend_operands()
// Shrink the operands array to a smaller array with new_len length.
// Used in RedefineClasses for CP merge.
void
ConstantPool
::
shrink_operands
(
int
new_len
,
TRAPS
)
{
int
old_len
=
operand_array_length
(
operands
());
if
(
new_len
==
old_len
)
{
return
;
// nothing to do
}
assert
(
new_len
<
old_len
,
"shrunken operands array must be smaller"
);
int
free_base
=
operand_next_offset_at
(
new_len
-
1
);
int
delta_len
=
new_len
-
old_len
;
int
delta_size
=
2
*
delta_len
+
free_base
-
operands
()
->
length
();
resize_operands
(
delta_len
,
delta_size
,
CHECK
);
}
// end shrink_operands()
void
ConstantPool
::
copy_operands
(
constantPoolHandle
from_cp
,
constantPoolHandle
to_cp
,
TRAPS
)
{
...
...
@@ -1357,6 +1436,46 @@ int ConstantPool::find_matching_entry(int pattern_i,
}
// end find_matching_entry()
// Compare this constant pool's bootstrap specifier at idx1 to the constant pool
// cp2's bootstrap specifier at idx2.
bool
ConstantPool
::
compare_operand_to
(
int
idx1
,
constantPoolHandle
cp2
,
int
idx2
,
TRAPS
)
{
int
k1
=
operand_bootstrap_method_ref_index_at
(
idx1
);
int
k2
=
cp2
->
operand_bootstrap_method_ref_index_at
(
idx2
);
bool
match
=
compare_entry_to
(
k1
,
cp2
,
k2
,
CHECK_false
);
if
(
!
match
)
{
return
false
;
}
int
argc
=
operand_argument_count_at
(
idx1
);
if
(
argc
==
cp2
->
operand_argument_count_at
(
idx2
))
{
for
(
int
j
=
0
;
j
<
argc
;
j
++
)
{
k1
=
operand_argument_index_at
(
idx1
,
j
);
k2
=
cp2
->
operand_argument_index_at
(
idx2
,
j
);
match
=
compare_entry_to
(
k1
,
cp2
,
k2
,
CHECK_false
);
if
(
!
match
)
{
return
false
;
}
}
return
true
;
// got through loop; all elements equal
}
return
false
;
}
// end compare_operand_to()
// Search constant pool search_cp for a bootstrap specifier that matches
// this constant pool's bootstrap specifier at pattern_i index.
// Return the index of a matching bootstrap specifier or (-1) if there is no match.
int
ConstantPool
::
find_matching_operand
(
int
pattern_i
,
constantPoolHandle
search_cp
,
int
search_len
,
TRAPS
)
{
for
(
int
i
=
0
;
i
<
search_len
;
i
++
)
{
bool
found
=
compare_operand_to
(
pattern_i
,
search_cp
,
i
,
CHECK_
(
-
1
));
if
(
found
)
{
return
i
;
}
}
return
-
1
;
// bootstrap specifier not found; return unused index (-1)
}
// end find_matching_operand()
#ifndef PRODUCT
const
char
*
ConstantPool
::
printable_name_at
(
int
which
)
{
...
...
src/share/vm/oops/constantPool.hpp
浏览文件 @
ce070a6e
...
...
@@ -111,7 +111,6 @@ class ConstantPool : public Metadata {
int
_version
;
}
_saved
;
Monitor
*
_lock
;
void
set_tags
(
Array
<
u1
>*
tags
)
{
_tags
=
tags
;
}
void
tag_at_put
(
int
which
,
jbyte
t
)
{
tags
()
->
at_put
(
which
,
t
);
}
...
...
@@ -567,6 +566,47 @@ class ConstantPool : public Metadata {
_indy_argc_offset
=
1
,
// u2 argc
_indy_argv_offset
=
2
// u2 argv[argc]
};
// These functions are used in RedefineClasses for CP merge
int
operand_offset_at
(
int
bootstrap_specifier_index
)
{
assert
(
0
<=
bootstrap_specifier_index
&&
bootstrap_specifier_index
<
operand_array_length
(
operands
()),
"Corrupted CP operands"
);
return
operand_offset_at
(
operands
(),
bootstrap_specifier_index
);
}
int
operand_bootstrap_method_ref_index_at
(
int
bootstrap_specifier_index
)
{
int
offset
=
operand_offset_at
(
bootstrap_specifier_index
);
return
operands
()
->
at
(
offset
+
_indy_bsm_offset
);
}
int
operand_argument_count_at
(
int
bootstrap_specifier_index
)
{
int
offset
=
operand_offset_at
(
bootstrap_specifier_index
);
int
argc
=
operands
()
->
at
(
offset
+
_indy_argc_offset
);
return
argc
;
}
int
operand_argument_index_at
(
int
bootstrap_specifier_index
,
int
j
)
{
int
offset
=
operand_offset_at
(
bootstrap_specifier_index
);
return
operands
()
->
at
(
offset
+
_indy_argv_offset
+
j
);
}
int
operand_next_offset_at
(
int
bootstrap_specifier_index
)
{
int
offset
=
operand_offset_at
(
bootstrap_specifier_index
)
+
_indy_argv_offset
+
operand_argument_count_at
(
bootstrap_specifier_index
);
return
offset
;
}
// Compare a bootsrap specifier in the operands arrays
bool
compare_operand_to
(
int
bootstrap_specifier_index1
,
constantPoolHandle
cp2
,
int
bootstrap_specifier_index2
,
TRAPS
);
// Find a bootsrap specifier in the operands array
int
find_matching_operand
(
int
bootstrap_specifier_index
,
constantPoolHandle
search_cp
,
int
operands_cur_len
,
TRAPS
);
// Resize the operands array with delta_len and delta_size
void
resize_operands
(
int
delta_len
,
int
delta_size
,
TRAPS
);
// Extend the operands array with the length and size of the ext_cp operands
void
extend_operands
(
constantPoolHandle
ext_cp
,
TRAPS
);
// Shrink the operands array to a smaller array with new_len length
void
shrink_operands
(
int
new_len
,
TRAPS
);
int
invoke_dynamic_bootstrap_method_ref_index_at
(
int
which
)
{
assert
(
tag_at
(
which
).
is_invoke_dynamic
(),
"Corrupted constant pool"
);
int
op_base
=
invoke_dynamic_operand_base
(
which
);
...
...
@@ -782,8 +822,17 @@ class ConstantPool : public Metadata {
void
set_resolved_reference_length
(
int
length
)
{
_saved
.
_resolved_reference_length
=
length
;
}
int
resolved_reference_length
()
const
{
return
_saved
.
_resolved_reference_length
;
}
void
set_lock
(
Monitor
*
lock
)
{
_lock
=
lock
;
}
Monitor
*
lock
()
{
return
_lock
;
}
// lock() may return null -- constant pool updates may happen before this lock is
// initialized, because the _pool_holder has not been fully initialized and
// has not been registered into the system dictionary. In this case, no other
// thread can be modifying this constantpool, so no synchronization is
// necessary.
//
// Use cplock() like this:
// oop cplock = cp->lock();
// ObjectLocker ol(cplock , THREAD, cplock != NULL);
oop
lock
();
// Decrease ref counts of symbols that are in the constant pool
// when the holder class is unloaded
...
...
src/share/vm/oops/cpCache.cpp
浏览文件 @
ce070a6e
...
...
@@ -266,7 +266,8 @@ void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
// the lock, so that when the losing writer returns, he can use the linked
// cache entry.
MonitorLockerEx
ml
(
cpool
->
lock
());
oop
cplock
=
cpool
->
lock
();
ObjectLocker
ol
(
cplock
,
Thread
::
current
(),
cplock
!=
NULL
);
if
(
!
is_f1_null
())
{
return
;
}
...
...
src/share/vm/oops/instanceKlass.cpp
浏览文件 @
ce070a6e
...
...
@@ -54,6 +54,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
#include "services/classLoadingService.hpp"
#include "services/threadService.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/macros.hpp"
...
...
@@ -418,25 +419,6 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
set_annotations
(
NULL
);
}
volatile
oop
InstanceKlass
::
init_lock
()
const
{
volatile
oop
lock
=
_init_lock
;
// read once
assert
((
oop
)
lock
!=
NULL
||
!
is_not_initialized
(),
// initialized or in_error state
"only fully initialized state can have a null lock"
);
return
lock
;
}
// Set the initialization lock to null so the object can be GC'ed. Any racing
// threads to get this lock will see a null lock and will not lock.
// That's okay because they all check for initialized state after getting
// the lock and return.
void
InstanceKlass
::
fence_and_clear_init_lock
()
{
// make sure previous stores are all done, notably the init_state.
OrderAccess
::
storestore
();
klass_oop_store
(
&
_init_lock
,
NULL
);
assert
(
!
is_not_initialized
(),
"class must be initialized now"
);
}
bool
InstanceKlass
::
should_be_initialized
()
const
{
return
!
is_initialized
();
}
...
...
@@ -473,7 +455,7 @@ void InstanceKlass::eager_initialize(Thread *thread) {
void
InstanceKlass
::
eager_initialize_impl
(
instanceKlassHandle
this_oop
)
{
EXCEPTION_MARK
;
volatile
oop
init_lock
=
this_oop
->
init_lock
();
ObjectLocker
ol
(
init_lock
,
THREAD
,
init_lock
!=
NULL
);
ObjectLocker
ol
(
init_lock
,
THREAD
);
// abort if someone beat us to the initialization
if
(
!
this_oop
->
is_not_initialized
())
return
;
// note: not equivalent to is_initialized()
...
...
@@ -492,7 +474,6 @@ void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
}
else
{
// linking successfull, mark class as initialized
this_oop
->
set_init_state
(
fully_initialized
);
this_oop
->
fence_and_clear_init_lock
();
// trace
if
(
TraceClassInitialization
)
{
ResourceMark
rm
(
THREAD
);
...
...
@@ -619,7 +600,7 @@ bool InstanceKlass::link_class_impl(
// verification & rewriting
{
volatile
oop
init_lock
=
this_oop
->
init_lock
();
ObjectLocker
ol
(
init_lock
,
THREAD
,
init_lock
!=
NULL
);
ObjectLocker
ol
(
init_lock
,
THREAD
);
// rewritten will have been set if loader constraint error found
// on an earlier link attempt
// don't verify or rewrite if already rewritten
...
...
@@ -742,7 +723,7 @@ void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
// Step 1
{
volatile
oop
init_lock
=
this_oop
->
init_lock
();
ObjectLocker
ol
(
init_lock
,
THREAD
,
init_lock
!=
NULL
);
ObjectLocker
ol
(
init_lock
,
THREAD
);
Thread
*
self
=
THREAD
;
// it's passed the current thread
...
...
@@ -890,9 +871,8 @@ void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS)
void
InstanceKlass
::
set_initialization_state_and_notify_impl
(
instanceKlassHandle
this_oop
,
ClassState
state
,
TRAPS
)
{
volatile
oop
init_lock
=
this_oop
->
init_lock
();
ObjectLocker
ol
(
init_lock
,
THREAD
,
init_lock
!=
NULL
);
ObjectLocker
ol
(
init_lock
,
THREAD
);
this_oop
->
set_init_state
(
state
);
this_oop
->
fence_and_clear_init_lock
();
ol
.
notify_all
(
CHECK
);
}
...
...
@@ -2312,7 +2292,29 @@ static void clear_all_breakpoints(Method* m) {
m
->
clear_all_breakpoints
();
}
void
InstanceKlass
::
notify_unload_class
(
InstanceKlass
*
ik
)
{
// notify the debugger
if
(
JvmtiExport
::
should_post_class_unload
())
{
JvmtiExport
::
post_class_unload
(
ik
);
}
// notify ClassLoadingService of class unload
ClassLoadingService
::
notify_class_unloaded
(
ik
);
}
void
InstanceKlass
::
release_C_heap_structures
(
InstanceKlass
*
ik
)
{
// Clean up C heap
ik
->
release_C_heap_structures
();
ik
->
constants
()
->
release_C_heap_structures
();
}
void
InstanceKlass
::
release_C_heap_structures
()
{
// Can't release the constant pool here because the constant pool can be
// deallocated separately from the InstanceKlass for default methods and
// redefine classes.
// Deallocate oop map cache
if
(
_oop_map_cache
!=
NULL
)
{
delete
_oop_map_cache
;
...
...
src/share/vm/oops/instanceKlass.hpp
浏览文件 @
ce070a6e
...
...
@@ -184,8 +184,9 @@ class InstanceKlass: public Klass {
oop
_protection_domain
;
// Class signers.
objArrayOop
_signers
;
// Initialization lock. Must be one per class and it has to be a VM internal
// object so java code cannot lock it (like the mirror)
// Lock for (1) initialization; (2) access to the ConstantPool of this class.
// Must be one per class and it has to be a VM internal object so java code
// cannot lock it (like the mirror).
// It has to be an object not a Mutex because it's held through java calls.
volatile
oop
_init_lock
;
...
...
@@ -236,7 +237,7 @@ class InstanceKlass: public Klass {
_misc_rewritten
=
1
<<
0
,
// methods rewritten.
_misc_has_nonstatic_fields
=
1
<<
1
,
// for sizing with UseCompressedOops
_misc_should_verify_class
=
1
<<
2
,
// allow caching of preverification
_misc_is_anonymous
=
1
<<
3
,
// has embedded _
inner_classe
s field
_misc_is_anonymous
=
1
<<
3
,
// has embedded _
host_klas
s field
_misc_is_contended
=
1
<<
4
,
// marked with contended annotation
_misc_has_default_methods
=
1
<<
5
// class/superclass/implemented interfaces has default methods
};
...
...
@@ -934,7 +935,9 @@ class InstanceKlass: public Klass {
// referenced by handles.
bool
on_stack
()
const
{
return
_constants
->
on_stack
();
}
void
release_C_heap_structures
();
// callbacks for actions during class unloading
static
void
notify_unload_class
(
InstanceKlass
*
ik
);
static
void
release_C_heap_structures
(
InstanceKlass
*
ik
);
// Parallel Scavenge and Parallel Old
PARALLEL_GC_DECLS
...
...
@@ -968,6 +971,7 @@ class InstanceKlass: public Klass {
#endif // INCLUDE_ALL_GCS
u2
idnum_allocated_count
()
const
{
return
_idnum_allocated_count
;
}
private:
// initialization state
#ifdef ASSERT
...
...
@@ -994,9 +998,10 @@ private:
{
OrderAccess
::
release_store_ptr
(
&
_methods_cached_itable_indices
,
indices
);
}
// Lock during initialization
volatile
oop
init_lock
()
const
;
public:
volatile
oop
init_lock
()
const
{
return
_init_lock
;
}
private:
void
set_init_lock
(
oop
value
)
{
klass_oop_store
(
&
_init_lock
,
value
);
}
void
fence_and_clear_init_lock
();
// after fully_initialized
// Offsets for memory management
oop
*
adr_protection_domain
()
const
{
return
(
oop
*
)
&
this
->
_protection_domain
;}
...
...
@@ -1022,6 +1027,8 @@ private:
// Returns the array class with this class as element type
Klass
*
array_klass_impl
(
bool
or_null
,
TRAPS
);
// Free CHeap allocated fields.
void
release_C_heap_structures
();
public:
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual
void
remove_unshareable_info
();
...
...
src/share/vm/prims/jvmtiEnv.cpp
浏览文件 @
ce070a6e
...
...
@@ -259,7 +259,8 @@ JvmtiEnv::RetransformClasses(jint class_count, const jclass* classes) {
// bytes to the InstanceKlass here because they have not been
// validated and we're not at a safepoint.
constantPoolHandle
constants
(
current_thread
,
ikh
->
constants
());
MonitorLockerEx
ml
(
constants
->
lock
());
// lock constant pool while we query it
oop
cplock
=
constants
->
lock
();
ObjectLocker
ol
(
cplock
,
current_thread
,
cplock
!=
NULL
);
// lock constant pool while we query it
JvmtiClassFileReconstituter
reconstituter
(
ikh
);
if
(
reconstituter
.
get_error
()
!=
JVMTI_ERROR_NONE
)
{
...
...
@@ -2417,7 +2418,8 @@ JvmtiEnv::GetConstantPool(oop k_mirror, jint* constant_pool_count_ptr, jint* con
instanceKlassHandle
ikh
(
thread
,
k_oop
);
constantPoolHandle
constants
(
thread
,
ikh
->
constants
());
MonitorLockerEx
ml
(
constants
->
lock
());
// lock constant pool while we query it
oop
cplock
=
constants
->
lock
();
ObjectLocker
ol
(
cplock
,
thread
,
cplock
!=
NULL
);
// lock constant pool while we query it
JvmtiConstantPoolReconstituter
reconstituter
(
ikh
);
if
(
reconstituter
.
get_error
()
!=
JVMTI_ERROR_NONE
)
{
...
...
src/share/vm/prims/jvmtiRedefineClasses.cpp
浏览文件 @
ce070a6e
...
...
@@ -415,20 +415,26 @@ void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp,
// this is an indirect CP entry so it needs special handling
case
JVM_CONSTANT_InvokeDynamic
:
{
// TBD: cross-checks and possible extra appends into CP and bsm operands
// are needed as well. This issue is tracked by a separate bug 8007037.
int
bss_idx
=
scratch_cp
->
invoke_dynamic_bootstrap_specifier_index
(
scratch_i
);
int
ref_i
=
scratch_cp
->
invoke_dynamic_name_and_type_ref_index_at
(
scratch_i
);
int
new_ref_i
=
find_or_append_indirect_entry
(
scratch_cp
,
ref_i
,
merge_cp_p
,
// Index of the bootstrap specifier in the operands array
int
old_bs_i
=
scratch_cp
->
invoke_dynamic_bootstrap_specifier_index
(
scratch_i
);
int
new_bs_i
=
find_or_append_operand
(
scratch_cp
,
old_bs_i
,
merge_cp_p
,
merge_cp_length_p
,
THREAD
);
if
(
new_ref_i
!=
ref_i
)
{
// The bootstrap method NameAndType_info index
int
old_ref_i
=
scratch_cp
->
invoke_dynamic_name_and_type_ref_index_at
(
scratch_i
);
int
new_ref_i
=
find_or_append_indirect_entry
(
scratch_cp
,
old_ref_i
,
merge_cp_p
,
merge_cp_length_p
,
THREAD
);
if
(
new_bs_i
!=
old_bs_i
)
{
RC_TRACE
(
0x00080000
,
(
"InvokeDynamic entry@%d name_and_type ref_index change: %d to %d"
,
*
merge_cp_length_p
,
ref_i
,
new_ref_i
));
(
"InvokeDynamic entry@%d bootstrap_method_attr_index change: %d to %d"
,
*
merge_cp_length_p
,
old_bs_i
,
new_bs_i
));
}
if
(
new_ref_i
!=
old_ref_i
)
{
RC_TRACE
(
0x00080000
,
(
"InvokeDynamic entry@%d name_and_type_index change: %d to %d"
,
*
merge_cp_length_p
,
old_ref_i
,
new_ref_i
));
}
(
*
merge_cp_p
)
->
invoke_dynamic_at_put
(
*
merge_cp_length_p
,
bss_idx
,
new_ref_i
);
(
*
merge_cp_p
)
->
invoke_dynamic_at_put
(
*
merge_cp_length_p
,
new_bs_i
,
new_ref_i
);
if
(
scratch_i
!=
*
merge_cp_length_p
)
{
// The new entry in *merge_cp_p is at a different index than
// the new entry in scratch_cp so we need to map the index values.
...
...
@@ -492,6 +498,105 @@ int VM_RedefineClasses::find_or_append_indirect_entry(constantPoolHandle scratch
}
// end find_or_append_indirect_entry()
// Append a bootstrap specifier into the merge_cp operands that is semantically equal
// to the scratch_cp operands bootstrap specifier passed by the old_bs_i index.
// Recursively append new merge_cp entries referenced by the new bootstrap specifier.
void
VM_RedefineClasses
::
append_operand
(
constantPoolHandle
scratch_cp
,
int
old_bs_i
,
constantPoolHandle
*
merge_cp_p
,
int
*
merge_cp_length_p
,
TRAPS
)
{
int
old_ref_i
=
scratch_cp
->
operand_bootstrap_method_ref_index_at
(
old_bs_i
);
int
new_ref_i
=
find_or_append_indirect_entry
(
scratch_cp
,
old_ref_i
,
merge_cp_p
,
merge_cp_length_p
,
THREAD
);
if
(
new_ref_i
!=
old_ref_i
)
{
RC_TRACE
(
0x00080000
,
(
"operands entry@%d bootstrap method ref_index change: %d to %d"
,
_operands_cur_length
,
old_ref_i
,
new_ref_i
));
}
Array
<
u2
>*
merge_ops
=
(
*
merge_cp_p
)
->
operands
();
int
new_bs_i
=
_operands_cur_length
;
// We have _operands_cur_length == 0 when the merge_cp operands is empty yet.
// However, the operand_offset_at(0) was set in the extend_operands() call.
int
new_base
=
(
new_bs_i
==
0
)
?
(
*
merge_cp_p
)
->
operand_offset_at
(
0
)
:
(
*
merge_cp_p
)
->
operand_next_offset_at
(
new_bs_i
-
1
);
int
argc
=
scratch_cp
->
operand_argument_count_at
(
old_bs_i
);
ConstantPool
::
operand_offset_at_put
(
merge_ops
,
_operands_cur_length
,
new_base
);
merge_ops
->
at_put
(
new_base
++
,
new_ref_i
);
merge_ops
->
at_put
(
new_base
++
,
argc
);
for
(
int
i
=
0
;
i
<
argc
;
i
++
)
{
int
old_arg_ref_i
=
scratch_cp
->
operand_argument_index_at
(
old_bs_i
,
i
);
int
new_arg_ref_i
=
find_or_append_indirect_entry
(
scratch_cp
,
old_arg_ref_i
,
merge_cp_p
,
merge_cp_length_p
,
THREAD
);
merge_ops
->
at_put
(
new_base
++
,
new_arg_ref_i
);
if
(
new_arg_ref_i
!=
old_arg_ref_i
)
{
RC_TRACE
(
0x00080000
,
(
"operands entry@%d bootstrap method argument ref_index change: %d to %d"
,
_operands_cur_length
,
old_arg_ref_i
,
new_arg_ref_i
));
}
}
if
(
old_bs_i
!=
_operands_cur_length
)
{
// The bootstrap specifier in *merge_cp_p is at a different index than
// that in scratch_cp so we need to map the index values.
map_operand_index
(
old_bs_i
,
new_bs_i
);
}
_operands_cur_length
++
;
}
// end append_operand()
int
VM_RedefineClasses
::
find_or_append_operand
(
constantPoolHandle
scratch_cp
,
int
old_bs_i
,
constantPoolHandle
*
merge_cp_p
,
int
*
merge_cp_length_p
,
TRAPS
)
{
int
new_bs_i
=
old_bs_i
;
// bootstrap specifier index
bool
match
=
(
old_bs_i
<
_operands_cur_length
)
&&
scratch_cp
->
compare_operand_to
(
old_bs_i
,
*
merge_cp_p
,
old_bs_i
,
THREAD
);
if
(
!
match
)
{
// forward reference in *merge_cp_p or not a direct match
int
found_i
=
scratch_cp
->
find_matching_operand
(
old_bs_i
,
*
merge_cp_p
,
_operands_cur_length
,
THREAD
);
if
(
found_i
!=
-
1
)
{
guarantee
(
found_i
!=
old_bs_i
,
"compare_operand_to() and find_matching_operand() disagree"
);
// found a matching operand somewhere else in *merge_cp_p so just need a mapping
new_bs_i
=
found_i
;
map_operand_index
(
old_bs_i
,
found_i
);
}
else
{
// no match found so we have to append this bootstrap specifier to *merge_cp_p
append_operand
(
scratch_cp
,
old_bs_i
,
merge_cp_p
,
merge_cp_length_p
,
THREAD
);
new_bs_i
=
_operands_cur_length
-
1
;
}
}
return
new_bs_i
;
}
// end find_or_append_operand()
void
VM_RedefineClasses
::
finalize_operands_merge
(
constantPoolHandle
merge_cp
,
TRAPS
)
{
if
(
merge_cp
->
operands
()
==
NULL
)
{
return
;
}
// Shrink the merge_cp operands
merge_cp
->
shrink_operands
(
_operands_cur_length
,
CHECK
);
if
(
RC_TRACE_ENABLED
(
0x00040000
))
{
// don't want to loop unless we are tracing
int
count
=
0
;
for
(
int
i
=
1
;
i
<
_operands_index_map_p
->
length
();
i
++
)
{
int
value
=
_operands_index_map_p
->
at
(
i
);
if
(
value
!=
-
1
)
{
RC_TRACE_WITH_THREAD
(
0x00040000
,
THREAD
,
(
"operands_index_map[%d]: old=%d new=%d"
,
count
,
i
,
value
));
count
++
;
}
}
}
// Clean-up
_operands_index_map_p
=
NULL
;
_operands_cur_length
=
0
;
_operands_index_map_count
=
0
;
}
// end finalize_operands_merge()
jvmtiError
VM_RedefineClasses
::
compare_and_normalize_class_versions
(
instanceKlassHandle
the_class
,
instanceKlassHandle
scratch_class
)
{
...
...
@@ -765,6 +870,31 @@ int VM_RedefineClasses::find_new_index(int old_index) {
}
// end find_new_index()
// Find new bootstrap specifier index value for old bootstrap specifier index
// value by seaching the index map. Returns unused index (-1) if there is
// no mapped value for the old bootstrap specifier index.
int
VM_RedefineClasses
::
find_new_operand_index
(
int
old_index
)
{
if
(
_operands_index_map_count
==
0
)
{
// map is empty so nothing can be found
return
-
1
;
}
if
(
old_index
==
-
1
||
old_index
>=
_operands_index_map_p
->
length
())
{
// The old_index is out of range so it is not mapped.
// This should not happen in regular constant pool merging use.
return
-
1
;
}
int
value
=
_operands_index_map_p
->
at
(
old_index
);
if
(
value
==
-
1
)
{
// the old_index is not mapped
return
-
1
;
}
return
value
;
}
// end find_new_operand_index()
// Returns true if the current mismatch is due to a resolved/unresolved
// class pair. Otherwise, returns false.
bool
VM_RedefineClasses
::
is_unresolved_class_mismatch
(
constantPoolHandle
cp1
,
...
...
@@ -1014,6 +1144,25 @@ void VM_RedefineClasses::map_index(constantPoolHandle scratch_cp,
}
// end map_index()
// Map old_index to new_index as needed.
void
VM_RedefineClasses
::
map_operand_index
(
int
old_index
,
int
new_index
)
{
if
(
find_new_operand_index
(
old_index
)
!=
-
1
)
{
// old_index is already mapped
return
;
}
if
(
old_index
==
new_index
)
{
// no mapping is needed
return
;
}
_operands_index_map_p
->
at_put
(
old_index
,
new_index
);
_operands_index_map_count
++
;
RC_TRACE
(
0x00040000
,
(
"mapped bootstrap specifier at index %d to %d"
,
old_index
,
new_index
));
}
// end map_index()
// Merge old_cp and scratch_cp and return the results of the merge via
// merge_cp_p. The number of entries in *merge_cp_p is returned via
// merge_cp_length_p. The entries in old_cp occupy the same locations
...
...
@@ -1086,6 +1235,7 @@ bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp,
}
// end for each old_cp entry
ConstantPool
::
copy_operands
(
old_cp
,
*
merge_cp_p
,
CHECK_0
);
(
*
merge_cp_p
)
->
extend_operands
(
scratch_cp
,
CHECK_0
);
// We don't need to sanity check that *merge_cp_length_p is within
// *merge_cp_p bounds since we have the minimum on-entry check above.
...
...
@@ -1198,6 +1348,8 @@ bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp,
CHECK_0
);
}
finalize_operands_merge
(
*
merge_cp_p
,
THREAD
);
RC_TRACE_WITH_THREAD
(
0x00020000
,
THREAD
,
(
"after pass 1b: merge_cp_len=%d, scratch_i=%d, index_map_len=%d"
,
*
merge_cp_length_p
,
scratch_i
,
_index_map_count
));
...
...
@@ -1270,6 +1422,11 @@ jvmtiError VM_RedefineClasses::merge_cp_and_rewrite(
_index_map_count
=
0
;
_index_map_p
=
new
intArray
(
scratch_cp
->
length
(),
-
1
);
_operands_cur_length
=
ConstantPool
::
operand_array_length
(
old_cp
->
operands
());
_operands_index_map_count
=
0
;
_operands_index_map_p
=
new
intArray
(
ConstantPool
::
operand_array_length
(
scratch_cp
->
operands
()),
-
1
);
// reference to the cp holder is needed for copy_operands()
merge_cp
->
set_pool_holder
(
scratch_class
());
bool
result
=
merge_constant_pools
(
old_cp
,
scratch_cp
,
&
merge_cp
,
...
...
@@ -1400,7 +1557,6 @@ bool VM_RedefineClasses::rewrite_cp_refs(instanceKlassHandle scratch_class,
return
true
;
}
// end rewrite_cp_refs()
// Rewrite constant pool references in the methods.
bool
VM_RedefineClasses
::
rewrite_cp_refs_in_methods
(
instanceKlassHandle
scratch_class
,
TRAPS
)
{
...
...
src/share/vm/prims/jvmtiRedefineClasses.hpp
浏览文件 @
ce070a6e
...
...
@@ -359,6 +359,13 @@ class VM_RedefineClasses: public VM_Operation {
// _index_map_p contains any entries.
int
_index_map_count
;
intArray
*
_index_map_p
;
// _operands_index_map_count is just an optimization for knowing if
// _operands_index_map_p contains any entries.
int
_operands_cur_length
;
int
_operands_index_map_count
;
intArray
*
_operands_index_map_p
;
// ptr to _class_count scratch_classes
Klass
**
_scratch_classes
;
jvmtiError
_res
;
...
...
@@ -422,12 +429,19 @@ class VM_RedefineClasses: public VM_Operation {
// Support for constant pool merging (these routines are in alpha order):
void
append_entry
(
constantPoolHandle
scratch_cp
,
int
scratch_i
,
constantPoolHandle
*
merge_cp_p
,
int
*
merge_cp_length_p
,
TRAPS
);
void
append_operand
(
constantPoolHandle
scratch_cp
,
int
scratch_bootstrap_spec_index
,
constantPoolHandle
*
merge_cp_p
,
int
*
merge_cp_length_p
,
TRAPS
);
void
finalize_operands_merge
(
constantPoolHandle
merge_cp
,
TRAPS
);
int
find_or_append_indirect_entry
(
constantPoolHandle
scratch_cp
,
int
scratch_i
,
constantPoolHandle
*
merge_cp_p
,
int
*
merge_cp_length_p
,
TRAPS
);
int
find_or_append_operand
(
constantPoolHandle
scratch_cp
,
int
scratch_bootstrap_spec_index
,
constantPoolHandle
*
merge_cp_p
,
int
*
merge_cp_length_p
,
TRAPS
);
int
find_new_index
(
int
old_index
);
int
find_new_operand_index
(
int
old_bootstrap_spec_index
);
bool
is_unresolved_class_mismatch
(
constantPoolHandle
cp1
,
int
index1
,
constantPoolHandle
cp2
,
int
index2
);
void
map_index
(
constantPoolHandle
scratch_cp
,
int
old_index
,
int
new_index
);
void
map_operand_index
(
int
old_bootstrap_spec_index
,
int
new_bootstrap_spec_index
);
bool
merge_constant_pools
(
constantPoolHandle
old_cp
,
constantPoolHandle
scratch_cp
,
constantPoolHandle
*
merge_cp_p
,
int
*
merge_cp_length_p
,
TRAPS
);
...
...
src/share/vm/services/memBaseline.cpp
浏览文件 @
ce070a6e
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012,
2013,
Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -23,9 +23,12 @@
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "services/memBaseline.hpp"
#include "services/memTracker.hpp"
MemType2Name
MemBaseline
::
MemType2NameMap
[
NUMBER_OF_MEMORY_TYPE
]
=
{
{
mtJavaHeap
,
"Java Heap"
},
{
mtClass
,
"Class"
},
...
...
@@ -149,6 +152,14 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records)
return
true
;
}
// check if there is a safepoint in progress, if so, block the thread
// for the safepoint
void
MemBaseline
::
check_safepoint
(
JavaThread
*
thr
)
{
if
(
SafepointSynchronize
::
is_synchronizing
())
{
SafepointSynchronize
::
block
(
thr
);
}
}
// baseline mmap'd memory records, generate overall summary and summaries by
// memory types
bool
MemBaseline
::
baseline_vm_summary
(
const
MemPointerArray
*
vm_records
)
{
...
...
@@ -344,16 +355,27 @@ bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
// baseline a snapshot. If summary_only = false, memory usages aggregated by
// callsites are also baselined.
// The method call can be lengthy, especially when detail tracking info is
// requested. So the method checks for safepoint explicitly.
bool
MemBaseline
::
baseline
(
MemSnapshot
&
snapshot
,
bool
summary_only
)
{
MutexLockerEx
snapshot_locker
(
snapshot
.
_lock
,
true
);
Thread
*
THREAD
=
Thread
::
current
();
assert
(
THREAD
->
is_Java_thread
(),
"must be a JavaThread"
);
MutexLocker
snapshot_locker
(
snapshot
.
_lock
);
reset
();
_baselined
=
baseline_malloc_summary
(
snapshot
.
_alloc_ptrs
)
&&
baseline_vm_summary
(
snapshot
.
_vm_ptrs
);
_baselined
=
baseline_malloc_summary
(
snapshot
.
_alloc_ptrs
);
if
(
_baselined
)
{
check_safepoint
((
JavaThread
*
)
THREAD
);
_baselined
=
baseline_vm_summary
(
snapshot
.
_vm_ptrs
);
}
_number_of_classes
=
snapshot
.
number_of_classes
();
if
(
!
summary_only
&&
MemTracker
::
track_callsite
()
&&
_baselined
)
{
_baselined
=
baseline_malloc_details
(
snapshot
.
_alloc_ptrs
)
&&
baseline_vm_details
(
snapshot
.
_vm_ptrs
);
check_safepoint
((
JavaThread
*
)
THREAD
);
_baselined
=
baseline_malloc_details
(
snapshot
.
_alloc_ptrs
);
if
(
_baselined
)
{
check_safepoint
((
JavaThread
*
)
THREAD
);
_baselined
=
baseline_vm_details
(
snapshot
.
_vm_ptrs
);
}
}
return
_baselined
;
}
...
...
src/share/vm/services/memBaseline.hpp
浏览文件 @
ce070a6e
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012,
2013,
Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -330,6 +330,9 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
// should not use copy constructor
MemBaseline
(
MemBaseline
&
copy
)
{
ShouldNotReachHere
();
}
// check and block at a safepoint
static
inline
void
check_safepoint
(
JavaThread
*
thr
);
public:
// create a memory baseline
MemBaseline
();
...
...
src/share/vm/services/memTracker.cpp
浏览文件 @
ce070a6e
...
...
@@ -573,7 +573,7 @@ void MemTracker::thread_exiting(JavaThread* thread) {
// baseline current memory snapshot
bool
MemTracker
::
baseline
()
{
MutexLocker
Ex
lock
(
_query_lock
,
true
);
MutexLocker
lock
(
_query_lock
);
MemSnapshot
*
snapshot
=
get_snapshot
();
if
(
snapshot
!=
NULL
)
{
return
_baseline
.
baseline
(
*
snapshot
,
false
);
...
...
@@ -584,7 +584,7 @@ bool MemTracker::baseline() {
// print memory usage from current snapshot
bool
MemTracker
::
print_memory_usage
(
BaselineOutputer
&
out
,
size_t
unit
,
bool
summary_only
)
{
MemBaseline
baseline
;
MutexLocker
Ex
lock
(
_query_lock
,
true
);
MutexLocker
lock
(
_query_lock
);
MemSnapshot
*
snapshot
=
get_snapshot
();
if
(
snapshot
!=
NULL
&&
baseline
.
baseline
(
*
snapshot
,
summary_only
))
{
BaselineReporter
reporter
(
out
,
unit
);
...
...
@@ -597,7 +597,7 @@ bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool sum
// Whitebox API for blocking until the current generation of NMT data has been merged
bool
MemTracker
::
wbtest_wait_for_data_merge
()
{
// NMT can't be shutdown while we're holding _query_lock
MutexLocker
Ex
lock
(
_query_lock
,
true
);
MutexLocker
lock
(
_query_lock
);
assert
(
_worker_thread
!=
NULL
,
"Invalid query"
);
// the generation at query time, so NMT will spin till this generation is processed
unsigned
long
generation_at_query_time
=
SequenceGenerator
::
current_generation
();
...
...
@@ -641,7 +641,7 @@ bool MemTracker::wbtest_wait_for_data_merge() {
// compare memory usage between current snapshot and baseline
bool
MemTracker
::
compare_memory_usage
(
BaselineOutputer
&
out
,
size_t
unit
,
bool
summary_only
)
{
MutexLocker
Ex
lock
(
_query_lock
,
true
);
MutexLocker
lock
(
_query_lock
);
if
(
_baseline
.
baselined
())
{
MemBaseline
baseline
;
MemSnapshot
*
snapshot
=
get_snapshot
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录