Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
9bd2f522
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9bd2f522
编写于
6月 08, 2012
作者:
J
jcoomes
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
91c5ad3a
83f713d9
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
111 addition
and
79 deletion
+111
-79
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
+1
-1
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+66
-49
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+16
-9
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+10
-10
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
+1
-1
src/share/vm/memory/binaryTreeDictionary.cpp
src/share/vm/memory/binaryTreeDictionary.cpp
+5
-2
src/share/vm/memory/binaryTreeDictionary.hpp
src/share/vm/memory/binaryTreeDictionary.hpp
+12
-7
未找到文件。
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
浏览文件 @
9bd2f522
...
...
@@ -293,7 +293,7 @@ void ConcurrentMarkThread::run() {
// Java thread is waiting for a full GC to happen (e.g., it
// called System.gc() with +ExplicitGCInvokesConcurrent).
_sts
.
join
();
g1h
->
increment_
full_collection
s_completed
(
true
/* concurrent */
);
g1h
->
increment_
old_marking_cycle
s_completed
(
true
/* concurrent */
);
_sts
.
leave
();
}
assert
(
_should_terminate
,
"just checking"
);
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
9bd2f522
...
...
@@ -1299,6 +1299,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
gc_prologue
(
true
);
increment_total_collections
(
true
/* full gc */
);
increment_old_marking_cycles_started
();
size_t
g1h_prev_used
=
used
();
assert
(
used
()
==
recalculate_used
(),
"Should be equal"
);
...
...
@@ -1492,22 +1493,28 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
JavaThread
::
dirty_card_queue_set
().
abandon_logs
();
assert
(
!
G1DeferredRSUpdate
||
(
G1DeferredRSUpdate
&&
(
dirty_card_queue_set
().
completed_buffers_num
()
==
0
)),
"Should not be any"
);
}
_young_list
->
reset_sampled_info
();
// At this point there should be no regions in the
// entire heap tagged as young.
assert
(
check_young_list_empty
(
true
/* check_heap */
),
"young list should be empty at this point"
);
_young_list
->
reset_sampled_info
();
// At this point there should be no regions in the
// entire heap tagged as young.
assert
(
check_young_list_empty
(
true
/* check_heap */
),
"young list should be empty at this point"
);
// Update the number of full collections that have been completed.
increment_full_collection
s_completed
(
false
/* concurrent */
);
// Update the number of full collections that have been completed.
increment_old_marking_cycle
s_completed
(
false
/* concurrent */
);
_hrs
.
verify_optional
();
verify_region_sets_optional
();
_hrs
.
verify_optional
();
verify_region_sets_optional
();
print_heap_after_gc
();
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm
()
->
update_sizes
();
}
print_heap_after_gc
();
g1mm
()
->
update_sizes
();
post_full_gc_dump
();
return
true
;
...
...
@@ -1888,7 +1895,8 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_retained_old_gc_alloc_region
(
NULL
),
_expand_heap_after_alloc_failure
(
true
),
_surviving_young_words
(
NULL
),
_full_collections_completed
(
0
),
_old_marking_cycles_started
(
0
),
_old_marking_cycles_completed
(
0
),
_in_cset_fast_test
(
NULL
),
_in_cset_fast_test_base
(
NULL
),
_dirty_cards_region_list
(
NULL
),
...
...
@@ -2360,7 +2368,16 @@ void G1CollectedHeap::allocate_dummy_regions() {
}
#endif // !PRODUCT
void
G1CollectedHeap
::
increment_full_collections_completed
(
bool
concurrent
)
{
void
G1CollectedHeap
::
increment_old_marking_cycles_started
()
{
assert
(
_old_marking_cycles_started
==
_old_marking_cycles_completed
||
_old_marking_cycles_started
==
_old_marking_cycles_completed
+
1
,
err_msg
(
"Wrong marking cycle count (started: %d, completed: %d)"
,
_old_marking_cycles_started
,
_old_marking_cycles_completed
));
_old_marking_cycles_started
++
;
}
void
G1CollectedHeap
::
increment_old_marking_cycles_completed
(
bool
concurrent
)
{
MonitorLockerEx
x
(
FullGCCount_lock
,
Mutex
::
_no_safepoint_check_flag
);
// We assume that if concurrent == true, then the caller is a
...
...
@@ -2368,11 +2385,6 @@ void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
// Set. If there's ever a cheap way to check this, we should add an
// assert here.
// We have already incremented _total_full_collections at the start
// of the GC, so total_full_collections() represents how many full
// collections have been started.
unsigned
int
full_collections_started
=
total_full_collections
();
// Given that this method is called at the end of a Full GC or of a
// concurrent cycle, and those can be nested (i.e., a Full GC can
// interrupt a concurrent cycle), the number of full collections
...
...
@@ -2382,21 +2394,21 @@ void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
// This is the case for the inner caller, i.e. a Full GC.
assert
(
concurrent
||
(
full_collections_started
==
_full_collection
s_completed
+
1
)
||
(
full_collections_started
==
_full_collection
s_completed
+
2
),
err_msg
(
"for inner caller (Full GC):
full_collection
s_started = %u "
"is inconsistent with _
full_collection
s_completed = %u"
,
full_collections_started
,
_full_collection
s_completed
));
(
_old_marking_cycles_started
==
_old_marking_cycle
s_completed
+
1
)
||
(
_old_marking_cycles_started
==
_old_marking_cycle
s_completed
+
2
),
err_msg
(
"for inner caller (Full GC):
_old_marking_cycle
s_started = %u "
"is inconsistent with _
old_marking_cycle
s_completed = %u"
,
_old_marking_cycles_started
,
_old_marking_cycle
s_completed
));
// This is the case for the outer caller, i.e. the concurrent cycle.
assert
(
!
concurrent
||
(
full_collections_started
==
_full_collection
s_completed
+
1
),
(
_old_marking_cycles_started
==
_old_marking_cycle
s_completed
+
1
),
err_msg
(
"for outer caller (concurrent cycle): "
"
full_collection
s_started = %u "
"is inconsistent with _
full_collection
s_completed = %u"
,
full_collections_started
,
_full_collection
s_completed
));
"
_old_marking_cycle
s_started = %u "
"is inconsistent with _
old_marking_cycle
s_completed = %u"
,
_old_marking_cycles_started
,
_old_marking_cycle
s_completed
));
_
full_collection
s_completed
+=
1
;
_
old_marking_cycle
s_completed
+=
1
;
// We need to clear the "in_progress" flag in the CM thread before
// we wake up any waiters (especially when ExplicitInvokesConcurrent
...
...
@@ -2432,7 +2444,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked
();
unsigned
int
gc_count_before
;
unsigned
int
full_gc
_count_before
;
unsigned
int
old_marking
_count_before
;
bool
retry_gc
;
do
{
...
...
@@ -2443,7 +2455,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
// Read the GC count while holding the Heap_lock
gc_count_before
=
total_collections
();
full_gc_count_before
=
total_full_collections
()
;
old_marking_count_before
=
_old_marking_cycles_started
;
}
if
(
should_do_concurrent_full_gc
(
cause
))
{
...
...
@@ -2458,7 +2470,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
VMThread
::
execute
(
&
op
);
if
(
!
op
.
pause_succeeded
())
{
if
(
full_gc_count_before
==
total_full_collections
()
)
{
if
(
old_marking_count_before
==
_old_marking_cycles_started
)
{
retry_gc
=
op
.
should_retry_gc
();
}
else
{
// A Full GC happened while we were trying to schedule the
...
...
@@ -2486,7 +2498,7 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
VMThread
::
execute
(
&
op
);
}
else
{
// Schedule a Full GC.
VM_G1CollectFull
op
(
gc_count_before
,
full_gc
_count_before
,
cause
);
VM_G1CollectFull
op
(
gc_count_before
,
old_marking
_count_before
,
cause
);
VMThread
::
execute
(
&
op
);
}
}
...
...
@@ -3613,7 +3625,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
if
(
g1_policy
()
->
during_initial_mark_pause
())
{
// We are about to start a marking cycle, so we increment the
// full collection counter.
increment_
total_full_collections
();
increment_
old_marking_cycles_started
();
}
// if the log level is "finer" is on, we'll print long statistics information
// in the collector policy code, so let's not print this as the output
...
...
@@ -3930,25 +3942,30 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
gc_epilogue
(
false
);
}
}
// The closing of the inner scope, immediately above, will complete
// logging at the "fine" level. The record_collection_pause_end() call
// above will complete logging at the "finer" level.
//
// It is not yet to safe, however, to tell the concurrent mark to
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
// The closing of the inner scope, immediately above, will complete
// logging at the "fine" level. The record_collection_pause_end() call
// above will complete logging at the "finer" level.
//
// It is not yet to safe, however, to tell the concurrent mark to
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
_hrs
.
verify_optional
();
verify_region_sets_optional
();
_hrs
.
verify_optional
();
verify_region_sets_optional
();
TASKQUEUE_STATS_ONLY
(
if
(
ParallelGCVerbose
)
print_taskqueue_stats
());
TASKQUEUE_STATS_ONLY
(
reset_taskqueue_stats
());
TASKQUEUE_STATS_ONLY
(
if
(
ParallelGCVerbose
)
print_taskqueue_stats
());
TASKQUEUE_STATS_ONLY
(
reset_taskqueue_stats
());
print_heap_after_gc
();
print_heap_after_gc
();
g1mm
()
->
update_sizes
();
// We must call G1MonitoringSupport::update_sizes() in the same scoping level
// as an active TraceMemoryManagerStats object (i.e. before the destructor for the
// TraceMemoryManagerStats is called) so that the G1 memory pools are updated
// before any GC notifications are raised.
g1mm
()
->
update_sizes
();
}
if
(
G1SummarizeRSetStats
&&
(
G1SummarizeRSetStatsPeriod
>
0
)
&&
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
9bd2f522
...
...
@@ -359,10 +359,13 @@ private:
// (c) cause == _g1_humongous_allocation
bool
should_do_concurrent_full_gc
(
GCCause
::
Cause
cause
);
// Keeps track of how many "full collections" (i.e., Full GCs or
// concurrent cycles) we have completed. The number of them we have
// started is maintained in _total_full_collections in CollectedHeap.
volatile
unsigned
int
_full_collections_completed
;
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
// concurrent cycles) we have started.
volatile
unsigned
int
_old_marking_cycles_started
;
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
// concurrent cycles) we have completed.
volatile
unsigned
int
_old_marking_cycles_completed
;
// This is a non-product method that is helpful for testing. It is
// called at the end of a GC and artificially expands the heap by
...
...
@@ -673,8 +676,12 @@ public:
(
size_t
)
_in_cset_fast_test_length
*
sizeof
(
bool
));
}
// This is called at the start of either a concurrent cycle or a Full
// GC to update the number of old marking cycles started.
void
increment_old_marking_cycles_started
();
// This is called at the end of either a concurrent cycle or a Full
// GC to update the number of
full collection
s completed. Those two
// GC to update the number of
old marking cycle
s completed. Those two
// can happen in a nested fashion, i.e., we start a concurrent
// cycle, a Full GC happens half-way through it which ends first,
// and then the cycle notices that a Full GC happened and ends
...
...
@@ -683,14 +690,14 @@ public:
// false, the caller is the inner caller in the nesting (i.e., the
// Full GC). If concurrent is true, the caller is the outer caller
// in this nesting (i.e., the concurrent cycle). Further nesting is
// not currently supported. The end of th
e th
is call also notifies
// not currently supported. The end of this call also notifies
// the FullGCCount_lock in case a Java thread is waiting for a full
// GC to happen (e.g., it called System.gc() with
// +ExplicitGCInvokesConcurrent).
void
increment_
full_collection
s_completed
(
bool
concurrent
);
void
increment_
old_marking_cycle
s_completed
(
bool
concurrent
);
unsigned
int
full_collection
s_completed
()
{
return
_
full_collection
s_completed
;
unsigned
int
old_marking_cycle
s_completed
()
{
return
_
old_marking_cycle
s_completed
;
}
G1HRPrinter
*
hr_printer
()
{
return
&
_hr_printer
;
}
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
浏览文件 @
9bd2f522
...
...
@@ -64,7 +64,7 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
_should_initiate_conc_mark
(
should_initiate_conc_mark
),
_target_pause_time_ms
(
target_pause_time_ms
),
_should_retry_gc
(
false
),
_
full_collection
s_completed_before
(
0
)
{
_
old_marking_cycle
s_completed_before
(
0
)
{
guarantee
(
target_pause_time_ms
>
0.0
,
err_msg
(
"target_pause_time_ms = %1.6lf should be positive"
,
target_pause_time_ms
));
...
...
@@ -112,11 +112,11 @@ void VM_G1IncCollectionPause::doit() {
GCCauseSetter
x
(
g1h
,
_gc_cause
);
if
(
_should_initiate_conc_mark
)
{
// It's safer to read
full_collection
s_completed() here, given
// It's safer to read
old_marking_cycle
s_completed() here, given
// that noone else will be updating it concurrently. Since we'll
// only need it if we're initiating a marking cycle, no point in
// setting it earlier.
_
full_collections_completed_before
=
g1h
->
full_collection
s_completed
();
_
old_marking_cycles_completed_before
=
g1h
->
old_marking_cycle
s_completed
();
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
...
...
@@ -181,17 +181,17 @@ void VM_G1IncCollectionPause::doit_epilogue() {
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
// In the doit() method we saved g1h->
full_collection
s_completed()
// in the _
full_collection
s_completed_before field. We have to
// wait until we observe that g1h->
full_collection
s_completed()
// In the doit() method we saved g1h->
old_marking_cycle
s_completed()
// in the _
old_marking_cycle
s_completed_before field. We have to
// wait until we observe that g1h->
old_marking_cycle
s_completed()
// has increased by at least one. This can happen if a) we started
// a cycle and it completes, b) a cycle already in progress
// completes, or c) a Full GC happens.
// If the condition has already been reached, there's no point in
// actually taking the lock and doing the wait.
if
(
g1h
->
full_collection
s_completed
()
<=
_
full_collection
s_completed_before
)
{
if
(
g1h
->
old_marking_cycle
s_completed
()
<=
_
old_marking_cycle
s_completed_before
)
{
// The following is largely copied from CMS
Thread
*
thr
=
Thread
::
current
();
...
...
@@ -200,8 +200,8 @@ void VM_G1IncCollectionPause::doit_epilogue() {
ThreadToNativeFromVM
native
(
jt
);
MutexLockerEx
x
(
FullGCCount_lock
,
Mutex
::
_no_safepoint_check_flag
);
while
(
g1h
->
full_collection
s_completed
()
<=
_
full_collection
s_completed_before
)
{
while
(
g1h
->
old_marking_cycle
s_completed
()
<=
_
old_marking_cycle
s_completed_before
)
{
FullGCCount_lock
->
wait
(
Mutex
::
_no_safepoint_check_flag
);
}
}
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
浏览文件 @
9bd2f522
...
...
@@ -80,7 +80,7 @@ private:
bool
_should_initiate_conc_mark
;
bool
_should_retry_gc
;
double
_target_pause_time_ms
;
unsigned
int
_
full_collection
s_completed_before
;
unsigned
int
_
old_marking_cycle
s_completed_before
;
public:
VM_G1IncCollectionPause
(
unsigned
int
gc_count_before
,
size_t
word_size
,
...
...
src/share/vm/memory/binaryTreeDictionary.cpp
浏览文件 @
9bd2f522
...
...
@@ -230,7 +230,7 @@ void TreeList<Chunk>::return_chunk_at_tail(TreeChunk<Chunk>* chunk) {
link_tail
(
chunk
);
assert
(
!
tail
()
||
size
()
==
tail
()
->
size
(),
"Wrong sized chunk in list"
);
FreeList
<
Chunk
>::
increment_count
();
increment_count
();
debug_only
(
increment_returned_bytes_by
(
chunk
->
size
()
*
sizeof
(
HeapWord
));)
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
assert
(
tail
()
==
NULL
||
tail
()
->
next
()
==
NULL
,
"list invariant"
);
...
...
@@ -258,7 +258,7 @@ void TreeList<Chunk>::return_chunk_at_head(TreeChunk<Chunk>* chunk) {
}
head
()
->
link_after
(
chunk
);
assert
(
!
head
()
||
size
()
==
head
()
->
size
(),
"Wrong sized chunk in list"
);
FreeList
<
Chunk
>::
increment_count
();
increment_count
();
debug_only
(
increment_returned_bytes_by
(
chunk
->
size
()
*
sizeof
(
HeapWord
));)
assert
(
head
()
==
NULL
||
head
()
->
prev
()
==
NULL
,
"list invariant"
);
assert
(
tail
()
==
NULL
||
tail
()
->
next
()
==
NULL
,
"list invariant"
);
...
...
@@ -909,6 +909,7 @@ class TreeCensusClosure : public StackObj {
template
<
class
Chunk
>
class
AscendTreeCensusClosure
:
public
TreeCensusClosure
<
Chunk
>
{
using
TreeCensusClosure
<
Chunk
>::
do_list
;
public:
void
do_tree
(
TreeList
<
Chunk
>*
tl
)
{
if
(
tl
!=
NULL
)
{
...
...
@@ -921,6 +922,7 @@ class AscendTreeCensusClosure : public TreeCensusClosure<Chunk> {
template
<
class
Chunk
>
class
DescendTreeCensusClosure
:
public
TreeCensusClosure
<
Chunk
>
{
using
TreeCensusClosure
<
Chunk
>::
do_list
;
public:
void
do_tree
(
TreeList
<
Chunk
>*
tl
)
{
if
(
tl
!=
NULL
)
{
...
...
@@ -987,6 +989,7 @@ class AscendTreeSearchClosure : public TreeSearchClosure {
template
<
class
Chunk
>
class
DescendTreeSearchClosure
:
public
TreeSearchClosure
<
Chunk
>
{
using
TreeSearchClosure
<
Chunk
>::
do_list
;
public:
bool
do_tree
(
TreeList
<
Chunk
>*
tl
)
{
if
(
tl
!=
NULL
)
{
...
...
src/share/vm/memory/binaryTreeDictionary.hpp
浏览文件 @
9bd2f522
...
...
@@ -60,13 +60,18 @@ class TreeList: public FreeList<Chunk> {
TreeList
<
Chunk
>*
left
()
const
{
return
_left
;
}
TreeList
<
Chunk
>*
right
()
const
{
return
_right
;
}
// Wrapper on call to base class, to get the template to compile.
Chunk
*
head
()
const
{
return
FreeList
<
Chunk
>::
head
();
}
Chunk
*
tail
()
const
{
return
FreeList
<
Chunk
>::
tail
();
}
void
set_head
(
Chunk
*
head
)
{
FreeList
<
Chunk
>::
set_head
(
head
);
}
void
set_tail
(
Chunk
*
tail
)
{
FreeList
<
Chunk
>::
set_tail
(
tail
);
}
size_t
size
()
const
{
return
FreeList
<
Chunk
>::
size
();
}
// Explicitly import these names into our namespace to fix name lookup with templates
using
FreeList
<
Chunk
>::
head
;
using
FreeList
<
Chunk
>::
set_head
;
using
FreeList
<
Chunk
>::
tail
;
using
FreeList
<
Chunk
>::
set_tail
;
using
FreeList
<
Chunk
>::
link_tail
;
using
FreeList
<
Chunk
>::
increment_count
;
NOT_PRODUCT
(
using
FreeList
<
Chunk
>::
increment_returned_bytes_by
;)
using
FreeList
<
Chunk
>::
verify_chunk_in_free_list
;
using
FreeList
<
Chunk
>::
size
;
// Accessors for links in tree.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录