Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
0873bab8
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0873bab8
编写于
2月 08, 2010
作者:
J
johnc
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
6a213a49
f65fe92c
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
96 addition
and
31 deletion
+96
-31
src/share/vm/c1/c1_Runtime1.cpp
src/share/vm/c1/c1_Runtime1.cpp
+16
-8
src/share/vm/classfile/javaClasses.cpp
src/share/vm/classfile/javaClasses.cpp
+14
-3
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
+17
-1
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+2
-0
src/share/vm/includeDB_core
src/share/vm/includeDB_core
+1
-0
src/share/vm/memory/barrierSet.hpp
src/share/vm/memory/barrierSet.hpp
+0
-2
src/share/vm/memory/barrierSet.inline.hpp
src/share/vm/memory/barrierSet.inline.hpp
+4
-14
src/share/vm/memory/referenceProcessor.hpp
src/share/vm/memory/referenceProcessor.hpp
+5
-2
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+24
-0
src/share/vm/runtime/arguments.hpp
src/share/vm/runtime/arguments.hpp
+2
-0
src/share/vm/runtime/stubRoutines.cpp
src/share/vm/runtime/stubRoutines.cpp
+11
-1
未找到文件。
src/share/vm/c1/c1_Runtime1.cpp
浏览文件 @
0873bab8
...
...
@@ -1075,6 +1075,7 @@ enum {
};
// Below length is the # elements copied.
template
<
class
T
>
int
obj_arraycopy_work
(
oopDesc
*
src
,
T
*
src_addr
,
oopDesc
*
dst
,
T
*
dst_addr
,
int
length
)
{
...
...
@@ -1083,22 +1084,22 @@ template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
// barrier. The assert will fail if this is not the case.
// Note that we use the non-virtual inlineable variant of write_ref_array.
BarrierSet
*
bs
=
Universe
::
heap
()
->
barrier_set
();
assert
(
bs
->
has_write_ref_array_opt
(),
"Barrier set must have ref array opt
"
);
assert
(
bs
->
has_write_ref_array_opt
(),
"Barrier set must have ref array opt"
);
assert
(
bs
->
has_write_ref_array_pre_opt
(),
"For pre-barrier as well.
"
);
if
(
src
==
dst
)
{
// same object, no check
bs
->
write_ref_array_pre
(
dst_addr
,
length
);
Copy
::
conjoint_oops_atomic
(
src_addr
,
dst_addr
,
length
);
bs
->
write_ref_array
(
MemRegion
((
HeapWord
*
)
dst_addr
,
(
HeapWord
*
)(
dst_addr
+
length
)));
bs
->
write_ref_array
((
HeapWord
*
)
dst_addr
,
length
);
return
ac_ok
;
}
else
{
klassOop
bound
=
objArrayKlass
::
cast
(
dst
->
klass
())
->
element_klass
();
klassOop
stype
=
objArrayKlass
::
cast
(
src
->
klass
())
->
element_klass
();
if
(
stype
==
bound
||
Klass
::
cast
(
stype
)
->
is_subtype_of
(
bound
))
{
// Elements are guaranteed to be subtypes, so no check necessary
bs
->
write_ref_array_pre
(
dst_addr
,
length
);
Copy
::
conjoint_oops_atomic
(
src_addr
,
dst_addr
,
length
);
bs
->
write_ref_array
(
MemRegion
((
HeapWord
*
)
dst_addr
,
(
HeapWord
*
)(
dst_addr
+
length
)));
bs
->
write_ref_array
((
HeapWord
*
)
dst_addr
,
length
);
return
ac_ok
;
}
}
...
...
@@ -1162,9 +1163,16 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
#endif
if
(
num
==
0
)
return
;
Copy
::
conjoint_oops_atomic
((
oop
*
)
src
,
(
oop
*
)
dst
,
num
);
BarrierSet
*
bs
=
Universe
::
heap
()
->
barrier_set
();
bs
->
write_ref_array
(
MemRegion
(
dst
,
dst
+
num
));
assert
(
bs
->
has_write_ref_array_opt
(),
"Barrier set must have ref array opt"
);
assert
(
bs
->
has_write_ref_array_pre_opt
(),
"For pre-barrier as well."
);
if
(
UseCompressedOops
)
{
bs
->
write_ref_array_pre
((
narrowOop
*
)
dst
,
num
);
}
else
{
bs
->
write_ref_array_pre
((
oop
*
)
dst
,
num
);
}
Copy
::
conjoint_oops_atomic
((
oop
*
)
src
,
(
oop
*
)
dst
,
num
);
bs
->
write_ref_array
(
dst
,
num
);
JRT_END
...
...
src/share/vm/classfile/javaClasses.cpp
浏览文件 @
0873bab8
...
...
@@ -1121,10 +1121,23 @@ class BacktraceBuilder: public StackObj {
}
void
flush
()
{
// The following appears to have been an optimization to save from
// doing a barrier for each individual store into the _methods array,
// but rather to do it for the entire array after the series of writes.
// That optimization seems to have been lost when compressed oops was
// implemented. However, the extra card-marks below was left in place,
// but is now redundant because the individual stores into the
// _methods array already execute the barrier code. CR 6918185 has
// been filed so the original code may be restored by deferring the
// barriers until after the entire sequence of stores, thus re-enabling
// the intent of the original optimization. In the meantime the redundant
// card mark below is now disabled.
if
(
_dirty
&&
_methods
!=
NULL
)
{
#if 0
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
#endif
_dirty
=
false
;
}
}
...
...
@@ -1168,9 +1181,7 @@ class BacktraceBuilder: public StackObj {
method
=
mhandle
();
}
_methods
->
obj_at_put
(
_index
,
method
);
// bad for UseCompressedOops
// *_methods->obj_at_addr(_index) = method;
_methods
->
obj_at_put
(
_index
,
method
);
_bcis
->
ushort_at_put
(
_index
,
bci
);
_index
++
;
_dirty
=
true
;
...
...
src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
浏览文件 @
0873bab8
...
...
@@ -300,7 +300,23 @@ jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
int
count
;
jbyte
*
cached_ptr
=
add_card_count
(
card_ptr
,
&
count
,
defer
);
assert
(
cached_ptr
!=
NULL
,
"bad cached card ptr"
);
assert
(
!
is_young_card
(
cached_ptr
),
"shouldn't get a card in young region"
);
if
(
is_young_card
(
cached_ptr
))
{
// The region containing cached_ptr has been freed during a clean up
// pause, reallocated, and tagged as young.
assert
(
cached_ptr
!=
card_ptr
,
"shouldn't be"
);
// We've just inserted a new old-gen card pointer into the card count
// cache and evicted the previous contents of that count slot.
// The evicted card pointer has been determined to be in a young region
// and so cannot be the newly inserted card pointer (that will be
// in an old region).
// The count for newly inserted card will be set to zero during the
// insertion, so we don't want to defer the cleaning of the newly
// inserted card pointer.
assert
(
*
defer
==
false
,
"deferring non-hot card"
);
return
NULL
;
}
// The card pointer we obtained from card count cache is not hot
// so do not store it in the cache; return it for immediate
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
0873bab8
...
...
@@ -2505,6 +2505,7 @@ G1CollectedHeap* G1CollectedHeap::heap() {
}
void
G1CollectedHeap
::
gc_prologue
(
bool
full
/* Ignored */
)
{
// always_do_update_barrier = false;
assert
(
InlineCacheBuffer
::
is_empty
(),
"should have cleaned up ICBuffer"
);
// Call allocation profiler
AllocationProfiler
::
iterate_since_last_gc
();
...
...
@@ -2518,6 +2519,7 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
// is set.
COMPILER2_PRESENT
(
assert
(
DerivedPointerTable
::
is_empty
(),
"derived pointer present"
));
// always_do_update_barrier = true;
}
void
G1CollectedHeap
::
do_collection_pause
()
{
...
...
src/share/vm/includeDB_core
浏览文件 @
0873bab8
...
...
@@ -175,6 +175,7 @@ arguments.cpp jvmtiExport.hpp
arguments.cpp management.hpp
arguments.cpp oop.inline.hpp
arguments.cpp os_<os_family>.inline.hpp
arguments.cpp referenceProcessor.hpp
arguments.cpp universe.inline.hpp
arguments.cpp vm_version_<arch>.hpp
...
...
src/share/vm/memory/barrierSet.hpp
浏览文件 @
0873bab8
...
...
@@ -124,8 +124,6 @@ public:
// Below length is the # array elements being written
virtual
void
write_ref_array_pre
(
oop
*
dst
,
int
length
)
{}
virtual
void
write_ref_array_pre
(
narrowOop
*
dst
,
int
length
)
{}
// Below MemRegion mr is expected to be HeapWord-aligned
inline
void
write_ref_array
(
MemRegion
mr
);
// Below count is the # array elements being written, starting
// at the address "start", which may not necessarily be HeapWord-aligned
inline
void
write_ref_array
(
HeapWord
*
start
,
size_t
count
);
...
...
src/share/vm/memory/barrierSet.inline.hpp
浏览文件 @
0873bab8
...
...
@@ -42,16 +42,6 @@ void BarrierSet::write_ref_field(void* field, oop new_val) {
}
}
void
BarrierSet
::
write_ref_array
(
MemRegion
mr
)
{
assert
((
HeapWord
*
)
align_size_down
((
uintptr_t
)
mr
.
start
(),
HeapWordSize
)
==
mr
.
start
()
,
"Unaligned start"
);
assert
((
HeapWord
*
)
align_size_up
((
uintptr_t
)
mr
.
end
(),
HeapWordSize
)
==
mr
.
end
(),
"Unaligned end"
);
if
(
kind
()
==
CardTableModRef
)
{
((
CardTableModRefBS
*
)
this
)
->
inline_write_ref_array
(
mr
);
}
else
{
write_ref_array_work
(
mr
);
}
}
// count is number of array elements being written
void
BarrierSet
::
write_ref_array
(
HeapWord
*
start
,
size_t
count
)
{
assert
(
count
<=
(
size_t
)
max_intx
,
"count too large"
);
...
...
@@ -61,12 +51,12 @@ void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
// strictly necessary for current uses, but a case of good hygiene and,
// if you will, aesthetics) and the second upward (this is essential for
// current uses) to a HeapWord boundary, so we mark all cards overlapping
// this write. I
n the event that
this evolves in the future to calling a
// this write. I
f
this evolves in the future to calling a
// logging barrier of narrow oop granularity, like the pre-barrier for G1
// (mentioned here merely by way of example), we will need to change this
// interface,
much like the pre-barrier one above, so it is "exactly precise"
//
(if i may be allowed the adverbial redundancy for emphasis) and doe
s not
// include
narrow oop slots not include
d in the original write interval.
// interface,
so it is "exactly precise" (if i may be allowed the adverbial
//
redundancy for emphasis) and does not include narrow oop slot
s not
// included in the original write interval.
HeapWord
*
aligned_start
=
(
HeapWord
*
)
align_size_down
((
uintptr_t
)
start
,
HeapWordSize
);
HeapWord
*
aligned_end
=
(
HeapWord
*
)
align_size_up
((
uintptr_t
)
end
,
HeapWordSize
);
// If compressed oops were not being used, these should already be aligned
...
...
src/share/vm/memory/referenceProcessor.hpp
浏览文件 @
0873bab8
...
...
@@ -263,10 +263,13 @@ class ReferenceProcessor : public CHeapObj {
int
parallel_gc_threads
=
1
,
bool
mt_processing
=
false
,
bool
discovered_list_needs_barrier
=
false
);
// RefDiscoveryPolicy values
enum
{
enum
DiscoveryPolicy
{
ReferenceBasedDiscovery
=
0
,
ReferentBasedDiscovery
=
1
ReferentBasedDiscovery
=
1
,
DiscoveryPolicyMin
=
ReferenceBasedDiscovery
,
DiscoveryPolicyMax
=
ReferentBasedDiscovery
};
static
void
init_statics
();
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
0873bab8
...
...
@@ -1487,6 +1487,20 @@ bool Arguments::created_by_java_launcher() {
//===========================================================================================================
// Parsing of main arguments
bool
Arguments
::
verify_interval
(
uintx
val
,
uintx
min
,
uintx
max
,
const
char
*
name
)
{
// Returns true iff value is in the inclusive interval [min..max]
// false, otherwise.
if
(
val
>=
min
&&
val
<=
max
)
{
return
true
;
}
jio_fprintf
(
defaultStream
::
error_stream
(),
"%s of "
UINTX_FORMAT
" is invalid; must be between "
UINTX_FORMAT
" and "
UINTX_FORMAT
"
\n
"
,
name
,
val
,
min
,
max
);
return
false
;
}
bool
Arguments
::
verify_percentage
(
uintx
value
,
const
char
*
name
)
{
if
(
value
<=
100
)
{
return
true
;
...
...
@@ -1723,6 +1737,16 @@ bool Arguments::check_vm_args_consistency() {
status
=
false
;
}
status
=
status
&&
verify_interval
(
RefDiscoveryPolicy
,
ReferenceProcessor
::
DiscoveryPolicyMin
,
ReferenceProcessor
::
DiscoveryPolicyMax
,
"RefDiscoveryPolicy"
);
// Limit the lower bound of this flag to 1 as it is used in a division
// expression.
status
=
status
&&
verify_interval
(
TLABWasteTargetPercent
,
1
,
100
,
"TLABWasteTargetPercent"
);
return
status
;
}
...
...
src/share/vm/runtime/arguments.hpp
浏览文件 @
0873bab8
...
...
@@ -336,6 +336,8 @@ class Arguments : AllStatic {
static
bool
is_bad_option
(
const
JavaVMOption
*
option
,
jboolean
ignore
)
{
return
is_bad_option
(
option
,
ignore
,
NULL
);
}
static
bool
verify_interval
(
uintx
val
,
uintx
min
,
uintx
max
,
const
char
*
name
);
static
bool
verify_percentage
(
uintx
value
,
const
char
*
name
);
static
void
describe_range_error
(
ArgsRange
errcode
);
static
ArgsRange
check_memory_size
(
julong
size
,
julong
min_size
);
...
...
src/share/vm/runtime/stubRoutines.cpp
浏览文件 @
0873bab8
...
...
@@ -196,11 +196,19 @@ void stubRoutines_init2() { StubRoutines::initialize2(); }
// Default versions of arraycopy functions
//
static
void
gen_arraycopy_barrier_pre
(
oop
*
dest
,
size_t
count
)
{
assert
(
count
!=
0
,
"count should be non-zero"
);
assert
(
count
<=
(
size_t
)
max_intx
,
"count too large"
);
BarrierSet
*
bs
=
Universe
::
heap
()
->
barrier_set
();
assert
(
bs
->
has_write_ref_array_pre_opt
(),
"Must have pre-barrier opt"
);
bs
->
write_ref_array_pre
(
dest
,
(
int
)
count
);
}
static
void
gen_arraycopy_barrier
(
oop
*
dest
,
size_t
count
)
{
assert
(
count
!=
0
,
"count should be non-zero"
);
BarrierSet
*
bs
=
Universe
::
heap
()
->
barrier_set
();
assert
(
bs
->
has_write_ref_array_opt
(),
"Barrier set must have ref array opt"
);
bs
->
write_ref_array
(
MemRegion
((
HeapWord
*
)
dest
,
(
HeapWord
*
)(
dest
+
count
))
);
bs
->
write_ref_array
(
(
HeapWord
*
)
dest
,
count
);
}
JRT_LEAF
(
void
,
StubRoutines
::
jbyte_copy
(
jbyte
*
src
,
jbyte
*
dest
,
size_t
count
))
...
...
@@ -240,6 +248,7 @@ JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
SharedRuntime
::
_oop_array_copy_ctr
++
;
// Slow-path oop array copy
#endif // !PRODUCT
assert
(
count
!=
0
,
"count should be non-zero"
);
gen_arraycopy_barrier_pre
(
dest
,
count
);
Copy
::
conjoint_oops_atomic
(
src
,
dest
,
count
);
gen_arraycopy_barrier
(
dest
,
count
);
JRT_END
...
...
@@ -281,6 +290,7 @@ JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, siz
SharedRuntime
::
_oop_array_copy_ctr
++
;
// Slow-path oop array copy
#endif // !PRODUCT
assert
(
count
!=
0
,
"count should be non-zero"
);
gen_arraycopy_barrier_pre
((
oop
*
)
dest
,
count
);
Copy
::
arrayof_conjoint_oops
(
src
,
dest
,
count
);
gen_arraycopy_barrier
((
oop
*
)
dest
,
count
);
JRT_END
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录