Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
32abbfea
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
32abbfea
编写于
5月 07, 2009
作者:
J
jcoomes
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
f8515126
8a64ebc2
变更
12
显示空白变更内容
内联
并排
Showing
12 changed file
with
90 addition
and
165 deletion
+90
-165
src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
...hare/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
+6
-4
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+8
-7
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
+0
-14
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+10
-32
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+3
-2
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+22
-21
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+1
-1
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+2
-2
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+26
-60
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+1
-19
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+6
-2
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+5
-1
未找到文件。
src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp
浏览文件 @
32abbfea
...
...
@@ -53,7 +53,9 @@ void ConcurrentG1RefineThread::traversalBasedRefinement() {
ResourceMark
rm
;
HandleMark
hm
;
if
(
TraceG1Refine
)
gclog_or_tty
->
print_cr
(
"G1-Refine starting pass"
);
if
(
G1TraceConcurrentRefinement
)
{
gclog_or_tty
->
print_cr
(
"G1-Refine starting pass"
);
}
_sts
.
join
();
bool
no_sleep
=
_cg1r
->
refine
();
_sts
.
leave
();
...
...
@@ -207,9 +209,9 @@ void ConcurrentG1RefineThread::run() {
void
ConcurrentG1RefineThread
::
yield
()
{
if
(
TraceG1Refine
)
gclog_or_tty
->
print_cr
(
"G1-Refine-yield"
);
if
(
G1TraceConcurrentRefinement
)
gclog_or_tty
->
print_cr
(
"G1-Refine-yield"
);
_sts
.
yield
(
"G1 refine"
);
if
(
TraceG1Refine
)
gclog_or_tty
->
print_cr
(
"G1-Refine-yield-end"
);
if
(
G1TraceConcurrentRefinement
)
gclog_or_tty
->
print_cr
(
"G1-Refine-yield-end"
);
}
void
ConcurrentG1RefineThread
::
stop
()
{
...
...
@@ -230,7 +232,7 @@ void ConcurrentG1RefineThread::stop() {
Terminator_lock
->
wait
();
}
}
if
(
TraceG1Refine
)
gclog_or_tty
->
print_cr
(
"G1-Refine-stop"
);
if
(
G1TraceConcurrentRefinement
)
gclog_or_tty
->
print_cr
(
"G1-Refine-stop"
);
}
void
ConcurrentG1RefineThread
::
print
()
{
...
...
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
32abbfea
...
...
@@ -448,8 +448,8 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
gclog_or_tty
->
print_cr
(
"[global] init, heap start = "
PTR_FORMAT
", "
"heap end = "
PTR_FORMAT
,
_heap_start
,
_heap_end
);
_markStack
.
allocate
(
G1
CM
StackSize
);
_regionStack
.
allocate
(
G1
CM
RegionStackSize
);
_markStack
.
allocate
(
G1
Mark
StackSize
);
_regionStack
.
allocate
(
G1
Mark
RegionStackSize
);
// Create & start a ConcurrentMark thread.
if
(
G1ConcMark
)
{
...
...
@@ -499,20 +499,21 @@ ConcurrentMark::ConcurrentMark(ReservedSpace rs,
_marking_task_overhead
=
1.0
;
}
else
{
if
(
ParallelMarkingThreads
>
0
)
{
// notice that ParallelMarkingThreads overwrites G1MarkingOverheadPerc
// notice that ParallelMarkingThreads overwrites G1MarkingOverheadPerc
ent
// if both are set
_parallel_marking_threads
=
ParallelMarkingThreads
;
_sleep_factor
=
0.0
;
_marking_task_overhead
=
1.0
;
}
else
if
(
G1MarkingOverheadPerc
>
0
)
{
}
else
if
(
G1MarkingOverheadPerc
ent
>
0
)
{
// we will calculate the number of parallel marking threads
// based on a target overhead with respect to the soft real-time
// goal
double
marking_overhead
=
(
double
)
G1MarkingOverheadPerc
/
100.0
;
double
marking_overhead
=
(
double
)
G1MarkingOverheadPerc
ent
/
100.0
;
double
overall_cm_overhead
=
(
double
)
G1MaxPauseTimeMS
*
marking_overhead
/
(
double
)
G1TimeSliceMS
;
(
double
)
MaxGCPauseMillis
*
marking_overhead
/
(
double
)
GCPauseIntervalMillis
;
double
cpu_ratio
=
1.0
/
(
double
)
os
::
processor_count
();
double
marking_thread_num
=
ceil
(
overall_cm_overhead
/
cpu_ratio
);
double
marking_task_overhead
=
...
...
@@ -1747,7 +1748,7 @@ void ConcurrentMark::cleanup() {
g1h
->
increment_total_collections
();
#ifndef PRODUCT
if
(
G1VerifyConcMark
)
{
if
(
VerifyDuringGC
)
{
G1CollectedHeap
::
heap
()
->
prepare_for_verify
();
G1CollectedHeap
::
heap
()
->
verify
(
true
,
false
);
}
...
...
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
浏览文件 @
32abbfea
...
...
@@ -136,9 +136,6 @@ void ConcurrentMarkThread::run() {
iter
++
;
if
(
!
cm
()
->
has_aborted
())
{
_cm
->
markFromRoots
();
}
else
{
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-skip-mark-from-roots"
);
}
double
mark_end_time
=
os
::
elapsedVTime
();
...
...
@@ -163,9 +160,6 @@ void ConcurrentMarkThread::run() {
sprintf
(
verbose_str
,
"GC remark"
);
VM_CGC_Operation
op
(
&
final_cl
,
verbose_str
);
VMThread
::
execute
(
&
op
);
}
else
{
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-skip-remark"
);
}
if
(
cm
()
->
restart_for_overflow
()
&&
G1TraceMarkStackOverflow
)
{
...
...
@@ -208,8 +202,6 @@ void ConcurrentMarkThread::run() {
count_end_sec
-
count_start_sec
);
}
}
}
else
{
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-skip-end-game"
);
}
double
end_time
=
os
::
elapsedVTime
();
_vtime_count_accum
+=
(
end_time
-
counting_start_time
);
...
...
@@ -230,7 +222,6 @@ void ConcurrentMarkThread::run() {
VM_CGC_Operation
op
(
&
cl_cl
,
verbose_str
);
VMThread
::
execute
(
&
op
);
}
else
{
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-skip-cleanup"
);
G1CollectedHeap
::
heap
()
->
set_marking_complete
();
}
...
...
@@ -287,9 +278,7 @@ void ConcurrentMarkThread::run() {
void
ConcurrentMarkThread
::
yield
()
{
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-yield"
);
_sts
.
yield
(
"Concurrent Mark"
);
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-yield-end"
);
}
void
ConcurrentMarkThread
::
stop
()
{
...
...
@@ -299,7 +288,6 @@ void ConcurrentMarkThread::stop() {
while
(
!
_has_terminated
)
{
Terminator_lock
->
wait
();
}
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-stop"
);
}
void
ConcurrentMarkThread
::
print
()
{
...
...
@@ -314,12 +302,10 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
// below while the world is otherwise stopped.
MutexLockerEx
x
(
CGC_lock
,
Mutex
::
_no_safepoint_check_flag
);
while
(
!
started
())
{
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-sleeping"
);
CGC_lock
->
wait
(
Mutex
::
_no_safepoint_check_flag
);
}
set_in_progress
();
clear_started
();
if
(
TraceConcurrentMark
)
gclog_or_tty
->
print_cr
(
"CM-starting"
);
}
// Note: this method, although exported by the ConcurrentMarkSweepThread,
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
32abbfea
...
...
@@ -528,7 +528,7 @@ HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
res
->
zero_fill_state
()
==
HeapRegion
::
Allocated
)),
"Non-young alloc Regions must be zero filled (and non-H)"
);
if
(
G1
Trace
Regions
)
{
if
(
G1
Print
Regions
)
{
if
(
res
!=
NULL
)
{
gclog_or_tty
->
print_cr
(
"new alloc region %d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
,
...
...
@@ -2282,13 +2282,13 @@ void G1CollectedHeap::print_tracing_info() const {
// to that.
g1_policy
()
->
print_tracing_info
();
}
if
(
SummarizeG1RS
Stats
)
{
if
(
G1SummarizeRSet
Stats
)
{
g1_rem_set
()
->
print_summary_info
();
}
if
(
SummarizeG1Conc
Mark
)
{
if
(
G1SummarizeConcurrent
Mark
)
{
concurrent_mark
()
->
print_summary_info
();
}
if
(
SummarizeG1
ZFStats
)
{
if
(
G1Summarize
ZFStats
)
{
ConcurrentZFThread
::
print_summary_info
();
}
g1_policy
()
->
print_yg_surv_rate_info
();
...
...
@@ -3255,7 +3255,7 @@ void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
HeapRegion
*
r
=
heap_region_containing
(
old
);
if
(
!
r
->
evacuation_failed
())
{
r
->
set_evacuation_failed
(
true
);
if
(
G1
Trace
Regions
)
{
if
(
G1
Print
Regions
)
{
gclog_or_tty
->
print
(
"evacuation failed in heap region "
PTR_FORMAT
" "
"["
PTR_FORMAT
","
PTR_FORMAT
")
\n
"
,
r
,
r
->
bottom
(),
r
->
end
());
...
...
@@ -3466,7 +3466,7 @@ private:
}
static
size_t
gclab_word_size
()
{
return
ParallelGCG1
AllocBufferSize
/
HeapWordSize
;
return
G1ParallelGC
AllocBufferSize
/
HeapWordSize
;
}
static
size_t
bitmap_size_in_bits
()
{
...
...
@@ -3616,7 +3616,7 @@ private:
public:
G1ParGCAllocBuffer
()
:
ParGCAllocBuffer
(
ParallelGCG1
AllocBufferSize
/
HeapWordSize
),
ParGCAllocBuffer
(
G1ParallelGC
AllocBufferSize
/
HeapWordSize
),
_during_marking
(
G1CollectedHeap
::
heap
()
->
mark_in_progress
()),
_bitmap
(
G1CollectedHeap
::
heap
()
->
reserved_region
().
start
()),
_retired
(
false
)
...
...
@@ -3812,14 +3812,14 @@ public:
HeapWord
*
obj
=
NULL
;
if
(
word_sz
*
100
<
(
size_t
)(
ParallelGCG1
AllocBufferSize
/
HeapWordSize
)
*
(
size_t
)(
G1ParallelGC
AllocBufferSize
/
HeapWordSize
)
*
ParallelGCBufferWastePct
)
{
G1ParGCAllocBuffer
*
alloc_buf
=
alloc_buffer
(
purpose
);
add_to_alloc_buffer_waste
(
alloc_buf
->
words_remaining
());
alloc_buf
->
retire
(
false
,
false
);
HeapWord
*
buf
=
_g1h
->
par_allocate_during_gc
(
purpose
,
ParallelGCG1
AllocBufferSize
/
HeapWordSize
);
_g1h
->
par_allocate_during_gc
(
purpose
,
G1ParallelGC
AllocBufferSize
/
HeapWordSize
);
if
(
buf
==
NULL
)
return
NULL
;
// Let caller handle allocation failure.
// Otherwise.
alloc_buf
->
set_buf
(
buf
);
...
...
@@ -4331,7 +4331,7 @@ public:
_g1h
->
g1_policy
()
->
record_obj_copy_time
(
i
,
elapsed_ms
-
term_ms
);
_g1h
->
g1_policy
()
->
record_termination_time
(
i
,
term_ms
);
}
if
(
G1UseSurvivorSpace
)
{
if
(
G1UseSurvivorSpace
s
)
{
_g1h
->
g1_policy
()
->
record_thread_age_table
(
pss
.
age_table
());
}
_g1h
->
update_surviving_young_words
(
pss
.
surviving_young_words
()
+
1
);
...
...
@@ -4435,28 +4435,6 @@ g1_process_strong_roots(bool collecting_perm_gen,
// XXX What should this be doing in the parallel case?
g1_policy
()
->
record_collection_pause_end_CH_strong_roots
();
if
(
G1VerifyRemSet
)
{
// :::: FIXME ::::
// The stupid remembered set doesn't know how to filter out dead
// objects, which the smart one does, and so when it is created
// and then compared the number of entries in each differs and
// the verification code fails.
guarantee
(
false
,
"verification code is broken, see note"
);
// Let's make sure that the current rem set agrees with the stupidest
// one possible!
bool
refs_enabled
=
ref_processor
()
->
discovery_enabled
();
if
(
refs_enabled
)
ref_processor
()
->
disable_discovery
();
StupidG1RemSet
stupid
(
this
);
count_closure
.
n
=
0
;
stupid
.
oops_into_collection_set_do
(
&
count_closure
,
worker_i
);
int
stupid_n
=
count_closure
.
n
;
count_closure
.
n
=
0
;
g1_rem_set
()
->
oops_into_collection_set_do
(
&
count_closure
,
worker_i
);
guarantee
(
count_closure
.
n
==
stupid_n
,
"Old and new rem sets differ."
);
gclog_or_tty
->
print_cr
(
"
\n
Found %d pointers in heap RS."
,
count_closure
.
n
);
if
(
refs_enabled
)
ref_processor
()
->
enable_discovery
();
}
if
(
scan_so
!=
NULL
)
{
scan_scan_only_set
(
scan_so
,
worker_i
);
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
浏览文件 @
32abbfea
...
...
@@ -37,8 +37,9 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing_raw
(
const
void
*
addr
)
const
{
assert
(
_g1_reserved
.
contains
(
addr
),
"invariant"
);
size_t
index
=
((
intptr_t
)
addr
-
(
intptr_t
)
_g1_reserved
.
start
()
)
size_t
index
=
pointer_delta
(
addr
,
_g1_reserved
.
start
(),
1
)
>>
HeapRegion
::
LogOfHRGrainBytes
;
HeapRegion
*
res
=
_hrs
->
at
(
index
);
assert
(
res
==
_hrs
->
addr_to_region
(
addr
),
"sanity"
);
return
res
;
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
32abbfea
...
...
@@ -136,7 +136,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
_scanned_cards_seq
(
new
TruncatedSeq
(
TruncatedSeqLength
)),
_rs_lengths_seq
(
new
TruncatedSeq
(
TruncatedSeqLength
)),
_pause_time_target_ms
((
double
)
G1MaxPauseTimeMS
),
_pause_time_target_ms
((
double
)
MaxGCPauseMillis
),
// </NEW PREDICTION>
...
...
@@ -220,7 +220,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
_par_last_termination_times_ms
=
new
double
[
_parallel_gc_threads
];
// start conservatively
_expensive_region_limit_ms
=
0.5
*
(
double
)
G1MaxPauseTimeMS
;
_expensive_region_limit_ms
=
0.5
*
(
double
)
MaxGCPauseMillis
;
// <NEW PREDICTION>
...
...
@@ -249,12 +249,12 @@ G1CollectorPolicy::G1CollectorPolicy() :
// </NEW PREDICTION>
double
time_slice
=
(
double
)
G
1TimeSliceMS
/
1000.0
;
double
max_gc_time
=
(
double
)
G1MaxPauseTimeMS
/
1000.0
;
double
time_slice
=
(
double
)
G
CPauseIntervalMillis
/
1000.0
;
double
max_gc_time
=
(
double
)
MaxGCPauseMillis
/
1000.0
;
guarantee
(
max_gc_time
<
time_slice
,
"Max GC time should not be greater than the time slice"
);
_mmu_tracker
=
new
G1MMUTrackerQueue
(
time_slice
,
max_gc_time
);
_sigma
=
(
double
)
G1ConfidencePerc
/
100.0
;
_sigma
=
(
double
)
G1ConfidencePerc
ent
/
100.0
;
// start conservatively (around 50ms is about right)
_concurrent_mark_init_times_ms
->
add
(
0.05
);
...
...
@@ -262,7 +262,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
_concurrent_mark_cleanup_times_ms
->
add
(
0.20
);
_tenuring_threshold
=
MaxTenuringThreshold
;
if
(
G1UseSurvivorSpace
)
{
if
(
G1UseSurvivorSpace
s
)
{
// if G1FixedSurvivorSpaceSize is 0 which means the size is not
// fixed, then _max_survivor_regions will be calculated at
// calculate_young_list_target_config during initialization
...
...
@@ -451,7 +451,7 @@ void G1CollectorPolicy::calculate_young_list_target_config(size_t rs_lengths) {
guarantee
(
adaptive_young_list_length
(),
"pre-condition"
);
double
start_time_sec
=
os
::
elapsedTime
();
size_t
min_reserve_perc
=
MAX2
((
size_t
)
2
,
(
size_t
)
G1MinReservePerc
);
size_t
min_reserve_perc
=
MAX2
((
size_t
)
2
,
(
size_t
)
G1MinReservePerc
ent
);
min_reserve_perc
=
MIN2
((
size_t
)
50
,
min_reserve_perc
);
size_t
reserve_regions
=
(
size_t
)
((
double
)
min_reserve_perc
*
(
double
)
_g1
->
n_regions
()
/
100.0
);
...
...
@@ -1109,7 +1109,7 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_short_lived_surv_rate_group
->
record_scan_only_prefix
(
short_lived_so_length
);
tag_scan_only
(
short_lived_so_length
);
if
(
G1UseSurvivorSpace
)
{
if
(
G1UseSurvivorSpace
s
)
{
_survivors_age_table
.
clear
();
}
...
...
@@ -1826,11 +1826,11 @@ void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
_rs_lengths_seq
->
add
((
double
)
_max_rs_lengths
);
double
expensive_region_limit_ms
=
(
double
)
G1MaxPauseTimeMS
-
predict_constant_other_time_ms
();
(
double
)
MaxGCPauseMillis
-
predict_constant_other_time_ms
();
if
(
expensive_region_limit_ms
<
0.0
)
{
// this means that the other time was predicted to be longer than
// than the max pause time
expensive_region_limit_ms
=
(
double
)
G1MaxPauseTimeMS
;
expensive_region_limit_ms
=
(
double
)
MaxGCPauseMillis
;
}
_expensive_region_limit_ms
=
expensive_region_limit_ms
;
...
...
@@ -2093,24 +2093,24 @@ void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
}
double
G1CollectorPolicy
::
recent_avg_time_for_pauses_ms
()
{
if
(
_recent_pause_times_ms
->
num
()
==
0
)
return
(
double
)
G1MaxPauseTimeMS
;
if
(
_recent_pause_times_ms
->
num
()
==
0
)
return
(
double
)
MaxGCPauseMillis
;
else
return
_recent_pause_times_ms
->
avg
();
}
double
G1CollectorPolicy
::
recent_avg_time_for_CH_strong_ms
()
{
if
(
_recent_CH_strong_roots_times_ms
->
num
()
==
0
)
return
(
double
)
G1MaxPauseTimeMS
/
3.0
;
return
(
double
)
MaxGCPauseMillis
/
3.0
;
else
return
_recent_CH_strong_roots_times_ms
->
avg
();
}
double
G1CollectorPolicy
::
recent_avg_time_for_G1_strong_ms
()
{
if
(
_recent_G1_strong_roots_times_ms
->
num
()
==
0
)
return
(
double
)
G1MaxPauseTimeMS
/
3.0
;
return
(
double
)
MaxGCPauseMillis
/
3.0
;
else
return
_recent_G1_strong_roots_times_ms
->
avg
();
}
double
G1CollectorPolicy
::
recent_avg_time_for_evac_ms
()
{
if
(
_recent_evac_times_ms
->
num
()
==
0
)
return
(
double
)
G1MaxPauseTimeMS
/
3.0
;
if
(
_recent_evac_times_ms
->
num
()
==
0
)
return
(
double
)
MaxGCPauseMillis
/
3.0
;
else
return
_recent_evac_times_ms
->
avg
();
}
...
...
@@ -2197,17 +2197,18 @@ G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
}
size_t
G1CollectorPolicy
::
expansion_amount
()
{
if
((
int
)(
recent_avg_pause_time_ratio
()
*
100.0
)
>
G1GCPct
)
{
// We will double the existing space, or take G1ExpandByPctOfAvail % of
// the available expansion space, whichever is smaller, bounded below
// by a minimum expansion (unless that's all that's left.)
if
((
int
)(
recent_avg_pause_time_ratio
()
*
100.0
)
>
G1GCPercent
)
{
// We will double the existing space, or take
// G1ExpandByPercentOfAvailable % of the available expansion
// space, whichever is smaller, bounded below by a minimum
// expansion (unless that's all that's left.)
const
size_t
min_expand_bytes
=
1
*
M
;
size_t
reserved_bytes
=
_g1
->
g1_reserved_obj_bytes
();
size_t
committed_bytes
=
_g1
->
capacity
();
size_t
uncommitted_bytes
=
reserved_bytes
-
committed_bytes
;
size_t
expand_bytes
;
size_t
expand_bytes_via_pct
=
uncommitted_bytes
*
G1ExpandByP
ctOfAvail
/
100
;
uncommitted_bytes
*
G1ExpandByP
ercentOfAvailable
/
100
;
expand_bytes
=
MIN2
(
expand_bytes_via_pct
,
committed_bytes
);
expand_bytes
=
MAX2
(
expand_bytes
,
min_expand_bytes
);
expand_bytes
=
MIN2
(
expand_bytes
,
uncommitted_bytes
);
...
...
@@ -2591,7 +2592,7 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
// Calculates survivor space parameters.
void
G1CollectorPolicy
::
calculate_survivors_policy
()
{
if
(
!
G1UseSurvivorSpace
)
{
if
(
!
G1UseSurvivorSpace
s
)
{
return
;
}
if
(
G1FixedSurvivorSpaceSize
==
0
)
{
...
...
@@ -2851,7 +2852,7 @@ record_concurrent_mark_cleanup_end(size_t freed_bytes,
// estimate of the number of live bytes.
void
G1CollectorPolicy
::
add_to_collection_set
(
HeapRegion
*
hr
)
{
if
(
G1
Trace
Regions
)
{
if
(
G1
Print
Regions
)
{
gclog_or_tty
->
print_cr
(
"added region to cset %d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", young %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
...
...
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
浏览文件 @
32abbfea
...
...
@@ -57,7 +57,7 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
mark_sweep_phase1
(
marked_for_unloading
,
clear_all_softrefs
);
if
(
G1VerifyConcMark
)
{
if
(
VerifyDuringGC
)
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
g1h
->
checkConcurrentMark
();
}
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
32abbfea
...
...
@@ -523,7 +523,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
// and they are causing failures. When we resolve said race
// conditions, we'll revert back to parallel remembered set
// updating and scanning. See CRs 6677707 and 6677708.
if
(
G1
EnableParallelRSetUpdating
||
(
worker_i
==
0
))
{
if
(
G1
ParallelRSetUpdatingEnabled
||
(
worker_i
==
0
))
{
updateRS
(
worker_i
);
scanNewRefsRS
(
oc
,
worker_i
);
}
else
{
...
...
@@ -532,7 +532,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_g1p
->
record_update_rs_time
(
worker_i
,
0.0
);
_g1p
->
record_scan_new_refs_time
(
worker_i
,
0.0
);
}
if
(
G1
EnableParallelRSetScanning
||
(
worker_i
==
0
))
{
if
(
G1
ParallelRSetScanningEnabled
||
(
worker_i
==
0
))
{
scanRS
(
oc
,
worker_i
);
}
else
{
_g1p
->
record_scan_rs_start_time
(
worker_i
,
os
::
elapsedTime
());
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
32abbfea
...
...
@@ -28,46 +28,34 @@
#define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
\
product(intx,
ParallelGCG1
AllocBufferSize, 8*K, \
product(intx,
G1ParallelGC
AllocBufferSize, 8*K, \
"Size of parallel G1 allocation buffers in to-space.") \
\
product(intx, G1TimeSliceMS, 500, \
"Time slice for MMU specification") \
\
product(intx, G1MaxPauseTimeMS, 200, \
"Max GC time per MMU time slice") \
\
product(intx, G1ConfidencePerc, 50, \
product(intx, G1ConfidencePercent, 50, \
"Confidence level for MMU/pause predictions") \
\
product(intx, G1MarkingOverheadPerc
, 0, \
develop(intx, G1MarkingOverheadPercent
, 0, \
"Overhead of concurrent marking") \
\
product
(bool, G1AccountConcurrentOverhead, false, \
develop
(bool, G1AccountConcurrentOverhead, false, \
"Whether soft real-time compliance in G1 will take into account" \
"concurrent overhead") \
\
product(intx, G1YoungGenSize, 0, \
"Size of the G1 young generation, 0 is the adaptive policy") \
\
product
(bool, G1Gen, true, \
develop
(bool, G1Gen, true, \
"If true, it will enable the generational G1") \
\
develop(intx, G1GCP
c
t, 10, \
develop(intx, G1GCP
ercen
t, 10, \
"The desired percent time spent on GC") \
\
product
(intx, G1PolicyVerbose, 0, \
develop
(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \
\
develop(bool, G1UseHRIntoRS, true, \
"Determines whether the 'advanced' HR Into rem set is used.") \
\
product(bool, G1VerifyRemSet, false, \
"If true, verify the rem set functioning at each GC") \
\
product(bool, G1VerifyConcMark, false, \
"If true, verify the conc marking code at full GC time") \
\
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
\
...
...
@@ -77,38 +65,28 @@
develop(bool, G1TraceMarkStackOverflow, false, \
"If true, extra debugging code for CM restart for ovflw.") \
\
product(bool, G1VerifyMarkingInEvac, false, \
"If true, verify marking info during evacuation") \
\
develop(intx, G1PausesBtwnConcMark, -1, \
"If positive, fixed number of pauses between conc markings") \
\
product(intx, G1EfficiencyPctCausesMark, 80, \
"The cum gc efficiency since mark fall-off that causes " \
"new marking") \
\
product(bool, TraceConcurrentMark, false, \
"Trace concurrent mark") \
\
product(bool, SummarizeG1ConcMark, false, \
diagnostic(bool, G1SummarizeConcurrentMark, false, \
"Summarize concurrent mark info") \
\
product(bool, SummarizeG1RSStats, false,
\
diagnostic(bool, G1SummarizeRSetStats, false,
\
"Summarize remembered set processing info") \
\
product(bool, SummarizeG1ZFStats, false,
\
diagnostic(bool, G1SummarizeZFStats, false,
\
"Summarize zero-filling info") \
\
product(bool, TraceG1Refine, false,
\
develop(bool, G1TraceConcurrentRefinement, false,
\
"Trace G1 concurrent refinement") \
\
develop(bool, G1ConcMark, true, \
"If true, run concurrent marking for G1") \
\
product(intx, G1
CMStackSize, 2 * 1024 * 1024,
\
product(intx, G1
MarkStackSize, 2 * 1024 * 1024,
\
"Size of the mark stack for concurrent marking.") \
\
product(intx, G1
CMRegionStackSize, 1024 * 1024,
\
product(intx, G1
MarkRegionStackSize, 1024 * 1024,
\
"Size of the region stack for concurrent marking.") \
\
develop(bool, G1ConcRefine, true, \
...
...
@@ -121,7 +99,7 @@
"Number of heap regions of alloc ahead of starting collection " \
"pause to start concurrent refinement (initially)") \
\
product
(bool, G1SmoothConcRefine, true, \
develop
(bool, G1SmoothConcRefine, true, \
"Attempts to smooth out the overhead of concurrent refinement") \
\
develop(bool, G1ConcZeroFill, true, \
...
...
@@ -157,7 +135,7 @@
develop(bool, G1SATBPrintStubs, false, \
"If true, print generated stubs for the SATB barrier") \
\
product(intx, G1ExpandByP
ctOfAvail, 20,
\
product(intx, G1ExpandByP
ercentOfAvailable, 20,
\
"When expanding, % of uncommitted space to claim.") \
\
develop(bool, G1RSBarrierRegionFilter, true, \
...
...
@@ -179,18 +157,9 @@
"If true, verify that no dirty cards remain after RS log " \
"processing.") \
\
product(intx, G1MinPausesBetweenMarks, 2, \
"Number of inefficient pauses necessary to trigger marking.") \
\
product(intx, G1InefficientPausePct, 80, \
"Threshold of an 'inefficient' pauses (as % of cum efficiency.") \
\
develop(bool, G1RSCountHisto, false, \
"If true, print a histogram of RS occupancies after each pause") \
\
product(bool, G1TraceFileOverwrite, false, \
"Allow the trace file to be overwritten") \
\
develop(intx, G1PrintRegionLivenessInfo, 0, \
"When > 0, print the occupancies of the <n> best and worst" \
"regions.") \
...
...
@@ -198,9 +167,6 @@
develop(bool, G1PrintParCleanupStats, false, \
"When true, print extra stats about parallel cleanup.") \
\
product(bool, G1DoAgeCohortChecks, false, \
"When true, check well-formedness of age cohort structures.") \
\
develop(bool, G1DisablePreBarrier, false, \
"Disable generation of pre-barrier (i.e., marking barrier) ") \
\
...
...
@@ -214,17 +180,17 @@
develop(intx, G1ConcRSLogCacheSize, 10, \
"Log base 2 of the length of conc RS hot-card cache.") \
\
product
(bool, G1ConcRSCountTraversals, false, \
develop
(bool, G1ConcRSCountTraversals, false, \
"If true, gather data about the number of times CR traverses " \
"cards ") \
\
product
(intx, G1ConcRSHotCardLimit, 4, \
develop
(intx, G1ConcRSHotCardLimit, 4, \
"The threshold that defines (>=) a hot card.") \
\
develop(bool, G1PrintOopAppls, false, \
"When true, print applications of closures to external locs.") \
\
product
(intx, G1LogRSRegionEntries, 7, \
develop
(intx, G1LogRSRegionEntries, 7, \
"Log_2 of max number of regions for which we keep bitmaps.") \
\
develop(bool, G1RecordHRRSOops, false, \
...
...
@@ -254,11 +220,11 @@
"It determines whether the system will calculate an optimum " \
"scan-only set.") \
\
product(intx, G1MinReservePerc
, 10,
\
product(intx, G1MinReservePerc
ent, 10,
\
"It determines the minimum reserve we should have in the heap " \
"to minimize the probability of promotion failure.") \
\
product(bool, G1TraceRegions, false,
\
diagnostic(bool, G1PrintRegions, false,
\
"If set G1 will print information on which regions are being " \
"allocated and which are reclaimed.") \
\
...
...
@@ -268,24 +234,24 @@
develop(bool, G1HRRSFlushLogBuffersOnVerify, false, \
"Forces flushing of log buffers before verification.") \
\
product(bool, G1UseSurvivorSpace
, true,
\
product(bool, G1UseSurvivorSpace
s, true,
\
"When true, use survivor space.") \
\
product
(bool, G1FixedTenuringThreshold, false, \
develop
(bool, G1FixedTenuringThreshold, false, \
"When set, G1 will not adjust the tenuring threshold") \
\
product
(bool, G1FixedEdenSize, false, \
develop
(bool, G1FixedEdenSize, false, \
"When set, G1 will not allocate unused survivor space regions") \
\
product
(uintx, G1FixedSurvivorSpaceSize, 0, \
develop
(uintx, G1FixedSurvivorSpaceSize, 0, \
"If non-0 is the size of the G1 survivor space, " \
"otherwise SurvivorRatio is used to determine the size") \
\
experimental(bool, G1
EnableParallelRSetUpdating, false,
\
experimental(bool, G1
ParallelRSetUpdatingEnabled, false,
\
"Enables the parallelization of remembered set updating " \
"during evacuation pauses") \
\
experimental(bool, G1
EnableParallelRSetScanning, false,
\
experimental(bool, G1
ParallelRSetScanningEnabled, false,
\
"Enables the parallelization of remembered set scanning " \
"during evacuation pauses")
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
32abbfea
...
...
@@ -160,12 +160,6 @@ HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
if
(
!
g1h
->
is_obj_dead
(
cur_oop
,
hr
))
{
// Bottom lies entirely below top, so we can call the
// non-memRegion version of oop_iterate below.
#ifndef PRODUCT
if
(
G1VerifyMarkingInEvac
)
{
VerifyLiveClosure
vl_cl
(
g1h
);
cur_oop
->
oop_iterate
(
&
vl_cl
);
}
#endif
cur_oop
->
oop_iterate
(
cl
);
}
cur
=
next_obj
;
...
...
@@ -197,12 +191,6 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
// or it was allocated after marking finished, then we add it. Otherwise
// we can safely ignore the object.
if
(
!
g1h
->
is_obj_dead
(
oop
(
bottom
),
_hr
))
{
#ifndef PRODUCT
if
(
G1VerifyMarkingInEvac
)
{
VerifyLiveClosure
vl_cl
(
g1h
);
oop
(
bottom
)
->
oop_iterate
(
&
vl_cl
,
mr
);
}
#endif
oop_size
=
oop
(
bottom
)
->
oop_iterate
(
cl2
,
mr
);
}
else
{
oop_size
=
oop
(
bottom
)
->
size
();
...
...
@@ -232,12 +220,6 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
// Last object. Need to do dead-obj filtering here too.
if
(
!
g1h
->
is_obj_dead
(
oop
(
bottom
),
_hr
))
{
#ifndef PRODUCT
if
(
G1VerifyMarkingInEvac
)
{
VerifyLiveClosure
vl_cl
(
g1h
);
oop
(
bottom
)
->
oop_iterate
(
&
vl_cl
,
mr
);
}
#endif
oop
(
bottom
)
->
oop_iterate
(
cl2
,
mr
);
}
}
...
...
@@ -713,7 +695,7 @@ void HeapRegion::verify(bool allow_dirty) const {
G1CollectedHeap
::
heap
()
->
print
();
gclog_or_tty
->
print_cr
(
""
);
}
if
(
G1VerifyConcMark
&&
if
(
VerifyDuringGC
&&
G1VerifyConcMarkPrintReachable
&&
vl_cl
.
failures
())
{
g1
->
concurrent_mark
()
->
print_prev_bitmap_reachable
();
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
32abbfea
...
...
@@ -1288,10 +1288,14 @@ void Arguments::set_g1_gc_flags() {
Abstract_VM_Version
::
parallel_worker_threads
());
if
(
ParallelGCThreads
==
0
)
{
FLAG_SET_DEFAULT
(
ParallelGCThreads
,
Abstract_VM_Version
::
parallel_worker_threads
());
Abstract_VM_Version
::
parallel_worker_threads
());
}
no_shared_spaces
();
// Set the maximum pause time goal to be a reasonable default.
if
(
FLAG_IS_DEFAULT
(
MaxGCPauseMillis
))
{
FLAG_SET_DEFAULT
(
MaxGCPauseMillis
,
200
);
}
}
void
Arguments
::
set_server_heap_size
()
{
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
32abbfea
...
...
@@ -1819,7 +1819,11 @@ class CommandLineFlags {
"Decay factor to TenuredGenerationSizeIncrement") \
\
product(uintx, MaxGCPauseMillis, max_uintx, \
"Adaptive size policy maximum GC pause time goal in msec") \
"Adaptive size policy maximum GC pause time goal in msec, " \
"or (G1 Only) the max. GC time per MMU time slice") \
\
product(intx, GCPauseIntervalMillis, 500, \
"Time slice for MMU specification") \
\
product(uintx, MaxGCMinorPauseMillis, max_uintx, \
"Adaptive size policy maximum GC minor pause time goal in msec") \
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录