Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
67a31c0f
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
67a31c0f
编写于
6月 20, 2008
作者:
J
jcoomes
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
f45f3f36
dfd22856
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
69 addition
and
25 deletion
+69
-25
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+18
-19
src/share/vm/memory/cardTableModRefBS.cpp
src/share/vm/memory/cardTableModRefBS.cpp
+51
-6
未找到文件。
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
67a31c0f
...
...
@@ -1004,6 +1004,9 @@ void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
DEBUG_ONLY
(
mark_bitmap
()
->
verify_clear
();)
DEBUG_ONLY
(
summary_data
().
verify_clear
();)
// Have worker threads release resources the next time they run a task.
gc_task_manager
()
->
release_all_resources
();
}
void
PSParallelCompact
::
post_compact
()
...
...
@@ -1949,12 +1952,6 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
TimeStamp
compaction_start
;
TimeStamp
collection_exit
;
// "serial_CM" is needed until the parallel implementation
// of the move and update is done.
ParCompactionManager
*
serial_CM
=
new
ParCompactionManager
();
// Don't initialize more than once.
// serial_CM->initialize(&summary_data(), mark_bitmap());
ParallelScavengeHeap
*
heap
=
gc_heap
();
GCCause
::
Cause
gc_cause
=
heap
->
gc_cause
();
PSYoungGen
*
young_gen
=
heap
->
young_gen
();
...
...
@@ -1969,6 +1966,10 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
PreGCValues
pre_gc_values
;
pre_compact
(
&
pre_gc_values
);
// Get the compaction manager reserved for the VM thread.
ParCompactionManager
*
const
vmthread_cm
=
ParCompactionManager
::
manager_array
(
gc_task_manager
()
->
workers
());
// Place after pre_compact() where the number of invocations is incremented.
AdaptiveSizePolicyOutput
(
size_policy
,
heap
->
total_collections
());
...
...
@@ -2008,7 +2009,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
bool
marked_for_unloading
=
false
;
marking_start
.
update
();
marking_phase
(
serial_CM
,
maximum_heap_compaction
);
marking_phase
(
vmthread_cm
,
maximum_heap_compaction
);
#ifndef PRODUCT
if
(
TraceParallelOldGCMarkingPhase
)
{
...
...
@@ -2039,7 +2040,7 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
#endif
bool
max_on_system_gc
=
UseMaximumCompactionOnSystemGC
&&
is_system_gc
;
summary_phase
(
serial_CM
,
maximum_heap_compaction
||
max_on_system_gc
);
summary_phase
(
vmthread_cm
,
maximum_heap_compaction
||
max_on_system_gc
);
#ifdef ASSERT
if
(
VerifyParallelOldWithMarkSweep
&&
...
...
@@ -2067,13 +2068,13 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
// code can use the the forwarding pointers to
// check the new pointer calculation. The restore_marks()
// has to be done before the real compact.
serial_CM
->
set_action
(
ParCompactionManager
::
VerifyUpdate
);
compact_perm
(
serial_CM
);
compact_serial
(
serial_CM
);
serial_CM
->
set_action
(
ParCompactionManager
::
ResetObjects
);
compact_perm
(
serial_CM
);
compact_serial
(
serial_CM
);
serial_CM
->
set_action
(
ParCompactionManager
::
UpdateAndCopy
);
vmthread_cm
->
set_action
(
ParCompactionManager
::
VerifyUpdate
);
compact_perm
(
vmthread_cm
);
compact_serial
(
vmthread_cm
);
vmthread_cm
->
set_action
(
ParCompactionManager
::
ResetObjects
);
compact_perm
(
vmthread_cm
);
compact_serial
(
vmthread_cm
);
vmthread_cm
->
set_action
(
ParCompactionManager
::
UpdateAndCopy
);
// For debugging only
PSMarkSweep
::
restore_marks
();
...
...
@@ -2084,16 +2085,14 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
compaction_start
.
update
();
// Does the perm gen always have to be done serially because
// klasses are used in the update of an object?
compact_perm
(
serial_CM
);
compact_perm
(
vmthread_cm
);
if
(
UseParallelOldGCCompacting
)
{
compact
();
}
else
{
compact_serial
(
serial_CM
);
compact_serial
(
vmthread_cm
);
}
delete
serial_CM
;
// Reset the mark bitmap, summary data, and do other bookkeeping. Must be
// done before resizing.
post_compact
();
...
...
src/share/vm/memory/cardTableModRefBS.cpp
浏览文件 @
67a31c0f
...
...
@@ -196,6 +196,8 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
assert
(
_whole_heap
.
contains
(
new_region
),
"attempt to cover area not in reserved area"
);
debug_only
(
verify_guard
();)
// collided is true if the expansion would push into another committed region
debug_only
(
bool
collided
=
false
;)
int
const
ind
=
find_covering_region_by_base
(
new_region
.
start
());
MemRegion
const
old_region
=
_covered
[
ind
];
assert
(
old_region
.
start
()
==
new_region
.
start
(),
"just checking"
);
...
...
@@ -211,12 +213,36 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
}
// Align the end up to a page size (starts are already aligned).
jbyte
*
const
new_end
=
byte_after
(
new_region
.
last
());
HeapWord
*
const
new_end_aligned
=
HeapWord
*
new_end_aligned
=
(
HeapWord
*
)
align_size_up
((
uintptr_t
)
new_end
,
_page_size
);
assert
(
new_end_aligned
>=
(
HeapWord
*
)
new_end
,
"align up, but less"
);
int
ri
=
0
;
for
(
ri
=
0
;
ri
<
_cur_covered_regions
;
ri
++
)
{
if
(
ri
!=
ind
)
{
if
(
_committed
[
ri
].
contains
(
new_end_aligned
))
{
assert
((
new_end_aligned
>=
_committed
[
ri
].
start
())
&&
(
_committed
[
ri
].
start
()
>
_committed
[
ind
].
start
()),
"New end of committed region is inconsistent"
);
new_end_aligned
=
_committed
[
ri
].
start
();
assert
(
new_end_aligned
>
_committed
[
ind
].
start
(),
"New end of committed region is before start"
);
debug_only
(
collided
=
true
;)
// Should only collide with 1 region
break
;
}
}
}
#ifdef ASSERT
for
(
++
ri
;
ri
<
_cur_covered_regions
;
ri
++
)
{
assert
(
!
_committed
[
ri
].
contains
(
new_end_aligned
),
"New end of committed region is in a second committed region"
);
}
#endif
// The guard page is always committed and should not be committed over.
HeapWord
*
const
new_end_for_commit
=
MIN2
(
new_end_aligned
,
_guard_region
.
start
());
HeapWord
*
const
new_end_for_commit
=
MIN2
(
new_end_aligned
,
_guard_region
.
start
());
if
(
new_end_for_commit
>
cur_committed
.
end
())
{
// Must commit new pages.
MemRegion
const
new_committed
=
...
...
@@ -239,9 +265,11 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
if
(
!
uncommit_region
.
is_empty
())
{
if
(
!
os
::
uncommit_memory
((
char
*
)
uncommit_region
.
start
(),
uncommit_region
.
byte_size
()))
{
// Do better than this for Merlin
vm_exit_out_of_memory
(
uncommit_region
.
byte_size
(),
"card table contraction"
);
assert
(
false
,
"Card table contraction failed"
);
// The call failed so don't change the end of the
// committed region. This is better than taking the
// VM down.
new_end_aligned
=
_committed
[
ind
].
end
();
}
}
}
...
...
@@ -257,8 +285,25 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
}
assert
(
index_for
(
new_region
.
last
())
<
(
int
)
_guard_index
,
"The guard card will be overwritten"
);
jbyte
*
const
end
=
byte_after
(
new_region
.
last
());
// This line commented out cleans the newly expanded region and
// not the aligned up expanded region.
// jbyte* const end = byte_after(new_region.last());
jbyte
*
const
end
=
(
jbyte
*
)
new_end_for_commit
;
assert
((
end
>=
byte_after
(
new_region
.
last
()))
||
collided
,
"Expect to be beyond new region unless impacting another region"
);
// do nothing if we resized downward.
#ifdef ASSERT
for
(
int
ri
=
0
;
ri
<
_cur_covered_regions
;
ri
++
)
{
if
(
ri
!=
ind
)
{
// The end of the new committed region should not
// be in any existing region unless it matches
// the start of the next region.
assert
(
!
_committed
[
ri
].
contains
(
end
)
||
(
_committed
[
ri
].
start
()
==
(
HeapWord
*
)
end
),
"Overlapping committed regions"
);
}
}
#endif
if
(
entry
<
end
)
{
memset
(
entry
,
clean_card
,
pointer_delta
(
end
,
entry
,
sizeof
(
jbyte
)));
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录