Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
0f33c551
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0f33c551
编写于
4月 09, 2010
作者:
T
tonyp
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
9c9bc43d
6e17b39b
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
313 addition
and
79 deletion
+313
-79
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+83
-21
src/share/vm/gc_implementation/g1/concurrentMark.hpp
src/share/vm/gc_implementation/g1/concurrentMark.hpp
+41
-1
src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp
src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp
+21
-11
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+32
-19
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+80
-15
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+41
-7
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+2
-5
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+10
-0
src/share/vm/runtime/mutexLocker.cpp
src/share/vm/runtime/mutexLocker.cpp
+2
-0
src/share/vm/runtime/mutexLocker.hpp
src/share/vm/runtime/mutexLocker.hpp
+1
-0
未找到文件。
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
0f33c551
...
...
@@ -297,6 +297,11 @@ void CMRegionStack::push(MemRegion mr) {
}
}
// Currently we do not call this at all. Normally we would call it
// during the concurrent marking / remark phases but we now call
// the lock-based version instead. But we might want to resurrect this
// code in the future. So, we'll leave it here commented out.
#if 0
MemRegion CMRegionStack::pop() {
while (true) {
// Otherwise...
...
...
@@ -321,6 +326,41 @@ MemRegion CMRegionStack::pop() {
// Otherwise, we need to try again.
}
}
#endif // 0
void
CMRegionStack
::
push_with_lock
(
MemRegion
mr
)
{
assert
(
mr
.
word_size
()
>
0
,
"Precondition"
);
MutexLockerEx
x
(
CMRegionStack_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
isFull
())
{
_overflow
=
true
;
return
;
}
_base
[
_index
]
=
mr
;
_index
+=
1
;
}
MemRegion
CMRegionStack
::
pop_with_lock
()
{
MutexLockerEx
x
(
CMRegionStack_lock
,
Mutex
::
_no_safepoint_check_flag
);
while
(
true
)
{
if
(
_index
==
0
)
{
return
MemRegion
();
}
_index
-=
1
;
MemRegion
mr
=
_base
[
_index
];
if
(
mr
.
start
()
!=
NULL
)
{
assert
(
mr
.
end
()
!=
NULL
,
"invariant"
);
assert
(
mr
.
word_size
()
>
0
,
"invariant"
);
return
mr
;
}
else
{
// that entry was invalidated... let's skip it
assert
(
mr
.
end
()
==
NULL
,
"invariant"
);
}
}
}
bool
CMRegionStack
::
invalidate_entries_into_cset
()
{
bool
result
=
false
;
...
...
@@ -668,24 +708,46 @@ ConcurrentMark::~ConcurrentMark() {
//
void
ConcurrentMark
::
clearNextBitmap
()
{
guarantee
(
!
G1CollectedHeap
::
heap
()
->
mark_in_progress
(),
"Precondition."
);
// clear the mark bitmap (no grey objects to start with).
// We need to do this in chunks and offer to yield in between
// each chunk.
HeapWord
*
start
=
_nextMarkBitMap
->
startWord
();
HeapWord
*
end
=
_nextMarkBitMap
->
endWord
();
HeapWord
*
cur
=
start
;
size_t
chunkSize
=
M
;
while
(
cur
<
end
)
{
HeapWord
*
next
=
cur
+
chunkSize
;
if
(
next
>
end
)
next
=
end
;
MemRegion
mr
(
cur
,
next
);
_nextMarkBitMap
->
clearRange
(
mr
);
cur
=
next
;
do_yield_check
();
}
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
G1CollectorPolicy
*
g1p
=
g1h
->
g1_policy
();
// Make sure that the concurrent mark thread looks to still be in
// the current cycle.
guarantee
(
cmThread
()
->
during_cycle
(),
"invariant"
);
// We are finishing up the current cycle by clearing the next
// marking bitmap and getting it ready for the next cycle. During
// this time no other cycle can start. So, let's make sure that this
// is the case.
guarantee
(
!
g1h
->
mark_in_progress
(),
"invariant"
);
// clear the mark bitmap (no grey objects to start with).
// We need to do this in chunks and offer to yield in between
// each chunk.
HeapWord
*
start
=
_nextMarkBitMap
->
startWord
();
HeapWord
*
end
=
_nextMarkBitMap
->
endWord
();
HeapWord
*
cur
=
start
;
size_t
chunkSize
=
M
;
while
(
cur
<
end
)
{
HeapWord
*
next
=
cur
+
chunkSize
;
if
(
next
>
end
)
next
=
end
;
MemRegion
mr
(
cur
,
next
);
_nextMarkBitMap
->
clearRange
(
mr
);
cur
=
next
;
do_yield_check
();
// Repeat the asserts from above. We'll do them as asserts here to
// minimize their overhead on the product. However, we'll have
// them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert
(
cmThread
()
->
during_cycle
(),
"invariant"
);
assert
(
!
g1h
->
mark_in_progress
(),
"invariant"
);
}
// Repeat the asserts from above.
guarantee
(
cmThread
()
->
during_cycle
(),
"invariant"
);
guarantee
(
!
g1h
->
mark_in_progress
(),
"invariant"
);
}
class
NoteStartOfMarkHRClosure
:
public
HeapRegionClosure
{
...
...
@@ -3363,7 +3425,7 @@ void CMTask::drain_region_stack(BitMapClosure* bc) {
gclog_or_tty
->
print_cr
(
"[%d] draining region stack, size = %d"
,
_task_id
,
_cm
->
region_stack_size
());
MemRegion
mr
=
_cm
->
region_stack_pop
();
MemRegion
mr
=
_cm
->
region_stack_pop
_with_lock
();
// it returns MemRegion() if the pop fails
statsOnly
(
if
(
mr
.
start
()
!=
NULL
)
++
_region_stack_pops
);
...
...
@@ -3384,7 +3446,7 @@ void CMTask::drain_region_stack(BitMapClosure* bc) {
if
(
has_aborted
())
mr
=
MemRegion
();
else
{
mr
=
_cm
->
region_stack_pop
();
mr
=
_cm
->
region_stack_pop
_with_lock
();
// it returns MemRegion() if the pop fails
statsOnly
(
if
(
mr
.
start
()
!=
NULL
)
++
_region_stack_pops
);
}
...
...
@@ -3417,7 +3479,7 @@ void CMTask::drain_region_stack(BitMapClosure* bc) {
}
// Now push the part of the region we didn't scan on the
// region stack to make sure a task scans it later.
_cm
->
region_stack_push
(
newRegion
);
_cm
->
region_stack_push
_with_lock
(
newRegion
);
}
// break from while
mr
=
MemRegion
();
...
...
src/share/vm/gc_implementation/g1/concurrentMark.hpp
浏览文件 @
0f33c551
...
...
@@ -252,9 +252,19 @@ public:
// with other "push" operations (no pops).
void
push
(
MemRegion
mr
);
#if 0
// This is currently not used. See the comment in the .cpp file.
// Lock-free; assumes that it will only be called in parallel
// with other "pop" operations (no pushes).
MemRegion pop();
#endif // 0
// These two are the implementations that use a lock. They can be
// called concurrently with each other but they should not be called
// concurrently with the lock-free versions (push() / pop()).
void
push_with_lock
(
MemRegion
mr
);
MemRegion
pop_with_lock
();
bool
isEmpty
()
{
return
_index
==
0
;
}
bool
isFull
()
{
return
_index
==
_capacity
;
}
...
...
@@ -540,6 +550,10 @@ public:
// Manipulation of the region stack
bool
region_stack_push
(
MemRegion
mr
)
{
// Currently we only call the lock-free version during evacuation
// pauses.
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"world should be stopped"
);
_regionStack
.
push
(
mr
);
if
(
_regionStack
.
overflow
())
{
set_has_overflown
();
...
...
@@ -547,7 +561,33 @@ public:
}
return
true
;
}
MemRegion
region_stack_pop
()
{
return
_regionStack
.
pop
();
}
#if 0
// Currently this is not used. See the comment in the .cpp file.
MemRegion region_stack_pop() { return _regionStack.pop(); }
#endif // 0
bool
region_stack_push_with_lock
(
MemRegion
mr
)
{
// Currently we only call the lock-based version during either
// concurrent marking or remark.
assert
(
!
SafepointSynchronize
::
is_at_safepoint
()
||
!
concurrent
(),
"if we are at a safepoint it should be the remark safepoint"
);
_regionStack
.
push_with_lock
(
mr
);
if
(
_regionStack
.
overflow
())
{
set_has_overflown
();
return
false
;
}
return
true
;
}
MemRegion
region_stack_pop_with_lock
()
{
// Currently we only call the lock-based version during either
// concurrent marking or remark.
assert
(
!
SafepointSynchronize
::
is_at_safepoint
()
||
!
concurrent
(),
"if we are at a safepoint it should be the remark safepoint"
);
return
_regionStack
.
pop_with_lock
();
}
int
region_stack_size
()
{
return
_regionStack
.
size
();
}
bool
region_stack_overflow
()
{
return
_regionStack
.
overflow
();
}
bool
region_stack_empty
()
{
return
_regionStack
.
isEmpty
();
}
...
...
src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp
浏览文件 @
0f33c551
...
...
@@ -42,8 +42,8 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
private:
ConcurrentMark
*
_cm
;
bool
_started
;
bool
_in_progress
;
volatile
bool
_started
;
volatile
bool
_in_progress
;
void
sleepBeforeNextCycle
();
...
...
@@ -67,15 +67,25 @@ class ConcurrentMarkThread: public ConcurrentGCThread {
// Counting virtual time so far.
double
vtime_count_accum
()
{
return
_vtime_count_accum
;
}
ConcurrentMark
*
cm
()
{
return
_cm
;
}
void
set_started
()
{
_started
=
true
;
}
void
clear_started
()
{
_started
=
false
;
}
bool
started
()
{
return
_started
;
}
void
set_in_progress
()
{
_in_progress
=
true
;
}
void
clear_in_progress
()
{
_in_progress
=
false
;
}
bool
in_progress
()
{
return
_in_progress
;
}
ConcurrentMark
*
cm
()
{
return
_cm
;
}
void
set_started
()
{
_started
=
true
;
}
void
clear_started
()
{
_started
=
false
;
}
bool
started
()
{
return
_started
;
}
void
set_in_progress
()
{
_in_progress
=
true
;
}
void
clear_in_progress
()
{
_in_progress
=
false
;
}
bool
in_progress
()
{
return
_in_progress
;
}
// This flag returns true from the moment a marking cycle is
// initiated (during the initial-mark pause when started() is set)
// to the moment when the cycle completes (just after the next
// marking bitmap has been cleared and in_progress() is
// cleared). While this flag is true we will not start another cycle
// so that cycles do not overlap. We cannot use just in_progress()
// as the CM thread might take some time to wake up before noticing
// that started() is set and set in_progress().
bool
during_cycle
()
{
return
started
()
||
in_progress
();
}
// Yield for GC
void
yield
();
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
0f33c551
...
...
@@ -902,6 +902,10 @@ public:
void
G1CollectedHeap
::
do_collection
(
bool
full
,
bool
clear_all_soft_refs
,
size_t
word_size
)
{
if
(
GC_locker
::
check_active_before_gc
())
{
return
;
// GC is disabled (e.g. JNI GetXXXCritical operation)
}
ResourceMark
rm
;
if
(
PrintHeapAtGC
)
{
...
...
@@ -916,10 +920,6 @@ void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs,
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"should be at safepoint"
);
assert
(
Thread
::
current
()
==
VMThread
::
vm_thread
(),
"should be in vm thread"
);
if
(
GC_locker
::
is_active
())
{
return
;
// GC is disabled (e.g. JNI GetXXXCritical operation)
}
{
IsGCActiveMark
x
;
...
...
@@ -2658,6 +2658,10 @@ struct PrepareForRSScanningClosure : public HeapRegionClosure {
void
G1CollectedHeap
::
do_collection_pause_at_safepoint
()
{
if
(
GC_locker
::
check_active_before_gc
())
{
return
;
// GC is disabled (e.g. JNI GetXXXCritical operation)
}
if
(
PrintHeapAtGC
)
{
Universe
::
print_heap_before_gc
();
}
...
...
@@ -2665,6 +2669,11 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
{
ResourceMark
rm
;
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
g1_policy
()
->
decide_on_conc_mark_initiation
();
char
verbose_str
[
128
];
sprintf
(
verbose_str
,
"GC pause "
);
if
(
g1_policy
()
->
in_young_gc_mode
())
{
...
...
@@ -2673,7 +2682,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
else
strcat
(
verbose_str
,
"(partial)"
);
}
if
(
g1_policy
()
->
should_initiate_conc_mark
())
if
(
g1_policy
()
->
during_initial_mark_pause
())
strcat
(
verbose_str
,
" (initial-mark)"
);
// if PrintGCDetails is on, we'll print long statistics information
...
...
@@ -2697,10 +2706,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
"young list should be well formed"
);
}
if
(
GC_locker
::
is_active
())
{
return
;
// GC is disabled (e.g. JNI GetXXXCritical operation)
}
bool
abandoned
=
false
;
{
// Call to jvmpi::post_class_unload_events must occur outside of active GC
IsGCActiveMark
x
;
...
...
@@ -2756,7 +2761,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
_young_list
->
print
();
#endif // SCAN_ONLY_VERBOSE
if
(
g1_policy
()
->
should_initiate_conc_mark
())
{
if
(
g1_policy
()
->
during_initial_mark_pause
())
{
concurrent_mark
()
->
checkpointRootsInitialPre
();
}
save_marks
();
...
...
@@ -2858,7 +2863,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
}
if
(
g1_policy
()
->
in_young_gc_mode
()
&&
g1_policy
()
->
should_initiate_conc_mark
())
{
g1_policy
()
->
during_initial_mark_pause
())
{
concurrent_mark
()
->
checkpointRootsInitialPost
();
set_marking_started
();
// CAUTION: after the doConcurrentMark() call below,
...
...
@@ -2937,6 +2942,9 @@ void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
// the same region
assert
(
r
==
NULL
||
!
r
->
is_gc_alloc_region
(),
"shouldn't already be a GC alloc region"
);
assert
(
r
==
NULL
||
!
r
->
isHumongous
(),
"humongous regions shouldn't be used as GC alloc regions"
);
HeapWord
*
original_top
=
NULL
;
if
(
r
!=
NULL
)
original_top
=
r
->
top
();
...
...
@@ -3079,12 +3087,17 @@ void G1CollectedHeap::get_gc_alloc_regions() {
if
(
alloc_region
->
in_collection_set
()
||
alloc_region
->
top
()
==
alloc_region
->
end
()
||
alloc_region
->
top
()
==
alloc_region
->
bottom
())
{
// we will discard the current GC alloc region if it's in the
// collection set (it can happen!), if it's already full (no
// point in using it), or if it's empty (this means that it
// was emptied during a cleanup and it should be on the free
// list now).
alloc_region
->
top
()
==
alloc_region
->
bottom
()
||
alloc_region
->
isHumongous
())
{
// we will discard the current GC alloc region if
// * it's in the collection set (it can happen!),
// * it's already full (no point in using it),
// * it's empty (this means that it was emptied during
// a cleanup and it should be on the free list now), or
// * it's humongous (this means that it was emptied
// during a cleanup and was added to the free list, but
// has been subseqently used to allocate a humongous
// object that may be less than the region size).
alloc_region
=
NULL
;
}
...
...
@@ -3977,7 +3990,7 @@ public:
OopsInHeapRegionClosure
*
scan_perm_cl
;
OopsInHeapRegionClosure
*
scan_so_cl
;
if
(
_g1h
->
g1_policy
()
->
should_initiate_conc_mark
())
{
if
(
_g1h
->
g1_policy
()
->
during_initial_mark_pause
())
{
scan_root_cl
=
&
scan_mark_root_cl
;
scan_perm_cl
=
&
scan_mark_perm_cl
;
scan_so_cl
=
&
scan_mark_heap_rs_cl
;
...
...
@@ -4140,7 +4153,7 @@ G1CollectedHeap::scan_scan_only_set(OopsInHeapRegionClosure* oc,
FilterAndMarkInHeapRegionAndIntoCSClosure
scan_and_mark
(
this
,
&
boc
,
concurrent_mark
());
OopsInHeapRegionClosure
*
foc
;
if
(
g1_policy
()
->
should_initiate_conc_mark
())
if
(
g1_policy
()
->
during_initial_mark_pause
())
foc
=
&
scan_and_mark
;
else
foc
=
&
scan_only
;
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
0f33c551
...
...
@@ -178,8 +178,8 @@ G1CollectorPolicy::G1CollectorPolicy() :
// so the hack is to do the cast QQQ FIXME
_pauses_btwn_concurrent_mark
((
size_t
)
G1PausesBtwnConcMark
),
_n_marks_since_last_pause
(
0
),
_
conc_mark_initiated
(
false
),
_
should_initiate_conc_mark
(
false
),
_
initiate_conc_mark_if_possible
(
false
),
_
during_initial_mark_pause
(
false
),
_should_revert_to_full_young_gcs
(
false
),
_last_full_young_gc
(
false
),
...
...
@@ -198,7 +198,9 @@ G1CollectorPolicy::G1CollectorPolicy() :
_recorded_survivor_regions
(
0
),
_recorded_survivor_head
(
NULL
),
_recorded_survivor_tail
(
NULL
),
_survivors_age_table
(
true
)
_survivors_age_table
(
true
),
_gc_overhead_perc
(
0.0
)
{
// Set up the region size and associated fields. Given that the
...
...
@@ -275,6 +277,11 @@ G1CollectorPolicy::G1CollectorPolicy() :
// calculate_young_list_target_config during initialization
_max_survivor_regions
=
G1FixedSurvivorSpaceSize
/
HeapRegion
::
GrainBytes
;
assert
(
GCTimeRatio
>
0
,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0"
);
_gc_overhead_perc
=
100.0
*
(
1.0
/
(
1.0
+
GCTimeRatio
));
initialize_all
();
}
...
...
@@ -786,7 +793,7 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
elapsed_time_ms
,
calculations
,
full_young_gcs
()
?
"full"
:
"partial"
,
should_initiate_conc_mark
()
?
" i-m"
:
""
,
during_initial_mark_pause
()
?
" i-m"
:
""
,
_in_marking_window
,
_in_marking_window_im
);
#endif // TRACE_CALC_YOUNG_CONFIG
...
...
@@ -1033,7 +1040,8 @@ void G1CollectorPolicy::record_full_collection_end() {
set_full_young_gcs
(
true
);
_last_full_young_gc
=
false
;
_should_revert_to_full_young_gcs
=
false
;
_should_initiate_conc_mark
=
false
;
clear_initiate_conc_mark_if_possible
();
clear_during_initial_mark_pause
();
_known_garbage_bytes
=
0
;
_known_garbage_ratio
=
0.0
;
_in_marking_window
=
false
;
...
...
@@ -1179,7 +1187,8 @@ void G1CollectorPolicy::record_concurrent_mark_init_start() {
void
G1CollectorPolicy
::
record_concurrent_mark_init_end_pre
(
double
mark_init_elapsed_time_ms
)
{
_during_marking
=
true
;
_should_initiate_conc_mark
=
false
;
assert
(
!
initiate_conc_mark_if_possible
(),
"we should have cleared it by now"
);
clear_during_initial_mark_pause
();
_cur_mark_stop_world_time_ms
=
mark_init_elapsed_time_ms
;
}
...
...
@@ -1250,7 +1259,6 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
}
_n_pauses_at_mark_end
=
_n_pauses
;
_n_marks_since_last_pause
++
;
_conc_mark_initiated
=
false
;
}
void
...
...
@@ -1446,17 +1454,24 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
#endif // PRODUCT
if
(
in_young_gc_mode
())
{
last_pause_included_initial_mark
=
_should_initiate_conc_mark
;
last_pause_included_initial_mark
=
during_initial_mark_pause
()
;
if
(
last_pause_included_initial_mark
)
record_concurrent_mark_init_end_pre
(
0.0
);
size_t
min_used_targ
=
(
_g1
->
capacity
()
/
100
)
*
InitiatingHeapOccupancyPercent
;
if
(
cur_used_bytes
>
min_used_targ
)
{
if
(
cur_used_bytes
<=
_prev_collection_pause_used_at_end_bytes
)
{
}
else
if
(
!
_g1
->
mark_in_progress
()
&&
!
_last_full_young_gc
)
{
_should_initiate_conc_mark
=
true
;
if
(
!
_g1
->
mark_in_progress
()
&&
!
_last_full_young_gc
)
{
assert
(
!
last_pause_included_initial_mark
,
"invariant"
);
if
(
cur_used_bytes
>
min_used_targ
&&
cur_used_bytes
>
_prev_collection_pause_used_at_end_bytes
)
{
assert
(
!
during_initial_mark_pause
(),
"we should not see this here"
);
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
set_initiate_conc_mark_if_possible
();
}
}
...
...
@@ -1747,7 +1762,7 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
bool
new_in_marking_window
=
_in_marking_window
;
bool
new_in_marking_window_im
=
false
;
if
(
_should_initiate_conc_mark
)
{
if
(
during_initial_mark_pause
()
)
{
new_in_marking_window
=
true
;
new_in_marking_window_im
=
true
;
}
...
...
@@ -2166,7 +2181,13 @@ void G1CollectorPolicy::check_if_region_is_too_expensive(double
if
(
predicted_time_ms
>
_expensive_region_limit_ms
)
{
if
(
!
in_young_gc_mode
())
{
set_full_young_gcs
(
true
);
_should_initiate_conc_mark
=
true
;
// We might want to do something different here. However,
// right now we don't support the non-generational G1 mode
// (and in fact we are planning to remove the associated code,
// see CR 6814390). So, let's leave it as is and this will be
// removed some time in the future
ShouldNotReachHere
();
set_during_initial_mark_pause
();
}
else
// no point in doing another partial one
_should_revert_to_full_young_gcs
=
true
;
...
...
@@ -2288,7 +2309,7 @@ G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
}
size_t
G1CollectorPolicy
::
expansion_amount
()
{
if
((
int
)(
recent_avg_pause_time_ratio
()
*
100.0
)
>
G1GCPercent
)
{
if
((
recent_avg_pause_time_ratio
()
*
100.0
)
>
_gc_overhead_perc
)
{
// We will double the existing space, or take
// G1ExpandByPercentOfAvailable % of the available expansion
// space, whichever is smaller, bounded below by a minimum
...
...
@@ -2689,6 +2710,50 @@ bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
}
#endif
void
G1CollectorPolicy
::
decide_on_conc_mark_initiation
()
{
// We are about to decide on whether this pause will be an
// initial-mark pause.
// First, during_initial_mark_pause() should not be already set. We
// will set it here if we have to. However, it should be cleared by
// the end of the pause (it's only set for the duration of an
// initial-mark pause).
assert
(
!
during_initial_mark_pause
(),
"pre-condition"
);
if
(
initiate_conc_mark_if_possible
())
{
// We had noticed on a previous pause that the heap occupancy has
// gone over the initiating threshold and we should start a
// concurrent marking cycle. So we might initiate one.
bool
during_cycle
=
_g1
->
concurrent_mark
()
->
cmThread
()
->
during_cycle
();
if
(
!
during_cycle
)
{
// The concurrent marking thread is not "during a cycle", i.e.,
// it has completed the last one. So we can go ahead and
// initiate a new cycle.
set_during_initial_mark_pause
();
// And we can now clear initiate_conc_mark_if_possible() as
// we've already acted on it.
clear_initiate_conc_mark_if_possible
();
}
else
{
// The concurrent marking thread is still finishing up the
// previous cycle. If we start one right now the two cycles
// overlap. In particular, the concurrent marking thread might
// be in the process of clearing the next marking bitmap (which
// we will use for the next cycle if we start one). Starting a
// cycle now will be bad given that parts of the marking
// information might get cleared by the marking thread. And we
// cannot wait for the marking thread to finish the cycle as it
// periodically yields while clearing the next marking bitmap
// and, if it's in a yield point, it's waiting for us to
// finish. So, at this point we will not start a cycle and we'll
// let the concurrent marking thread complete the last one.
}
}
}
void
G1CollectorPolicy_BestRegionsFirst
::
record_collection_pause_start
(
double
start_time_sec
,
size_t
start_used
)
{
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
浏览文件 @
0f33c551
...
...
@@ -215,6 +215,8 @@ protected:
SurvRateGroup
*
_survivor_surv_rate_group
;
// add here any more surv rate groups
double
_gc_overhead_perc
;
bool
during_marking
()
{
return
_during_marking
;
}
...
...
@@ -722,11 +724,31 @@ protected:
size_t
_n_marks_since_last_pause
;
// True iff CM has been initiated.
bool
_conc_mark_initiated
;
// At the end of a pause we check the heap occupancy and we decide
// whether we will start a marking cycle during the next pause. If
// we decide that we want to do that, we will set this parameter to
// true. So, this parameter will stay true between the end of a
// pause and the beginning of a subsequent pause (not necessarily
// the next one, see the comments on the next field) when we decide
// that we will indeed start a marking cycle and do the initial-mark
// work.
volatile
bool
_initiate_conc_mark_if_possible
;
// If initiate_conc_mark_if_possible() is set at the beginning of a
// pause, it is a suggestion that the pause should start a marking
// cycle by doing the initial-mark work. However, it is possible
// that the concurrent marking thread is still finishing up the
// previous marking cycle (e.g., clearing the next marking
// bitmap). If that is the case we cannot start a new cycle and
// we'll have to wait for the concurrent marking thread to finish
// what it is doing. In this case we will postpone the marking cycle
// initiation decision for the next pause. When we eventually decide
// to start a cycle, we will set _during_initial_mark_pause which
// will stay true until the end of the initial-mark pause and it's
// the condition that indicates that a pause is doing the
// initial-mark work.
volatile
bool
_during_initial_mark_pause
;
// True iff CM should be initiated
bool
_should_initiate_conc_mark
;
bool
_should_revert_to_full_young_gcs
;
bool
_last_full_young_gc
;
...
...
@@ -979,9 +1001,21 @@ public:
// Add "hr" to the CS.
void
add_to_collection_set
(
HeapRegion
*
hr
);
bool
should_initiate_conc_mark
()
{
return
_should_initiate_conc_mark
;
}
void
set_should_initiate_conc_mark
()
{
_should_initiate_conc_mark
=
true
;
}
void
unset_should_initiate_conc_mark
(){
_should_initiate_conc_mark
=
false
;
}
bool
initiate_conc_mark_if_possible
()
{
return
_initiate_conc_mark_if_possible
;
}
void
set_initiate_conc_mark_if_possible
()
{
_initiate_conc_mark_if_possible
=
true
;
}
void
clear_initiate_conc_mark_if_possible
()
{
_initiate_conc_mark_if_possible
=
false
;
}
bool
during_initial_mark_pause
()
{
return
_during_initial_mark_pause
;
}
void
set_during_initial_mark_pause
()
{
_during_initial_mark_pause
=
true
;
}
void
clear_during_initial_mark_pause
(){
_during_initial_mark_pause
=
false
;
}
// This is called at the very beginning of an evacuation pause (it
// has to be the first thing that the pause does). If
// initiate_conc_mark_if_possible() is true, and the concurrent
// marking thread has completed its work during the previous cycle,
// it will set during_initial_mark_pause() to so that the pause does
// the initial-mark work and start a marking cycle.
void
decide_on_conc_mark_initiation
();
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
0f33c551
...
...
@@ -40,9 +40,6 @@
develop(bool, G1Gen, true, \
"If true, it will enable the generational G1") \
\
develop(intx, G1GCPercent, 10, \
"The desired percent time spent on GC") \
\
develop(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \
\
...
...
@@ -270,11 +267,11 @@
product(uintx, G1HeapRegionSize, 0, \
"Size of the G1 regions.") \
\
experimental(bool, G1UseParallelRSetUpdating,
false,
\
experimental(bool, G1UseParallelRSetUpdating,
true,
\
"Enables the parallelization of remembered set updating " \
"during evacuation pauses") \
\
experimental(bool, G1UseParallelRSetScanning,
false,
\
experimental(bool, G1UseParallelRSetScanning,
true,
\
"Enables the parallelization of remembered set scanning " \
"during evacuation pauses") \
\
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
0f33c551
...
...
@@ -1353,6 +1353,16 @@ void Arguments::set_g1_gc_flags() {
MarkStackSize
/
K
,
MarkStackSizeMax
/
K
);
tty
->
print_cr
(
"ConcGCThreads: %u"
,
ConcGCThreads
);
}
if
(
FLAG_IS_DEFAULT
(
GCTimeRatio
)
||
GCTimeRatio
==
0
)
{
// In G1, we want the default GC overhead goal to be higher than
// say in PS. So we set it here to 10%. Otherwise the heap might
// be expanded more aggressively than we would like it to. In
// fact, even 10% seems to not be high enough in some cases
// (especially small GC stress tests that the main thing they do
// is allocation). We might consider increase it further.
FLAG_SET_DEFAULT
(
GCTimeRatio
,
9
);
}
}
void
Arguments
::
set_heap_size
()
{
...
...
src/share/vm/runtime/mutexLocker.cpp
浏览文件 @
0f33c551
...
...
@@ -70,6 +70,7 @@ Monitor* FullGCCount_lock = NULL;
Monitor
*
CMark_lock
=
NULL
;
Monitor
*
ZF_mon
=
NULL
;
Monitor
*
Cleanup_mon
=
NULL
;
Mutex
*
CMRegionStack_lock
=
NULL
;
Mutex
*
SATB_Q_FL_lock
=
NULL
;
Monitor
*
SATB_Q_CBL_mon
=
NULL
;
Mutex
*
Shared_SATB_Q_lock
=
NULL
;
...
...
@@ -167,6 +168,7 @@ void mutex_init() {
def
(
CMark_lock
,
Monitor
,
nonleaf
,
true
);
// coordinate concurrent mark thread
def
(
ZF_mon
,
Monitor
,
leaf
,
true
);
def
(
Cleanup_mon
,
Monitor
,
nonleaf
,
true
);
def
(
CMRegionStack_lock
,
Mutex
,
leaf
,
true
);
def
(
SATB_Q_FL_lock
,
Mutex
,
special
,
true
);
def
(
SATB_Q_CBL_mon
,
Monitor
,
nonleaf
,
true
);
def
(
Shared_SATB_Q_lock
,
Mutex
,
nonleaf
,
true
);
...
...
src/share/vm/runtime/mutexLocker.hpp
浏览文件 @
0f33c551
...
...
@@ -63,6 +63,7 @@ extern Monitor* FullGCCount_lock; // in support of "concurrent" f
extern
Monitor
*
CMark_lock
;
// used for concurrent mark thread coordination
extern
Monitor
*
ZF_mon
;
// used for G1 conc zero-fill.
extern
Monitor
*
Cleanup_mon
;
// used for G1 conc cleanup.
extern
Mutex
*
CMRegionStack_lock
;
// used for protecting accesses to the CM region stack
extern
Mutex
*
SATB_Q_FL_lock
;
// Protects SATB Q
// buffer free list.
extern
Monitor
*
SATB_Q_CBL_mon
;
// Protects SATB Q
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录