Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
8c6a3df6
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8c6a3df6
编写于
8月 24, 2012
作者:
B
brutisso
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
d207797a
2d2c4b9c
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
103 addition
and
133 deletion
+103
-133
src/cpu/sparc/vm/vm_version_sparc.cpp
src/cpu/sparc/vm/vm_version_sparc.cpp
+4
-4
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
+20
-4
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+12
-8
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+0
-1
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+46
-70
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+6
-39
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
+1
-0
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+5
-5
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+9
-2
未找到文件。
src/cpu/sparc/vm/vm_version_sparc.cpp
浏览文件 @
8c6a3df6
...
...
@@ -106,10 +106,10 @@ void VM_Version::initialize() {
if
(
FLAG_IS_DEFAULT
(
OptoLoopAlignment
))
{
FLAG_SET_DEFAULT
(
OptoLoopAlignment
,
4
);
}
// When using CMS
, we cannot use memset() in BOT updates because
//
the sun4v/CMT version in libc_psr uses BIS which exposes
// "phantom zeros" to concurrent readers. See 6948537.
if
(
FLAG_IS_DEFAULT
(
UseMemSetInBOT
)
&&
UseConcMarkSweepGC
)
{
// When using CMS
or G1, we cannot use memset() in BOT updates
//
because the sun4v/CMT version in libc_psr uses BIS which
//
exposes
"phantom zeros" to concurrent readers. See 6948537.
if
(
FLAG_IS_DEFAULT
(
UseMemSetInBOT
)
&&
(
UseConcMarkSweepGC
||
UseG1GC
)
)
{
FLAG_SET_DEFAULT
(
UseMemSetInBOT
,
false
);
}
#ifdef _LP64
...
...
src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp
浏览文件 @
8c6a3df6
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -159,14 +159,30 @@ private:
"right address out of range"
);
assert
(
left
<
right
,
"Heap addresses out of order"
);
size_t
num_cards
=
pointer_delta
(
right
,
left
)
>>
LogN_words
;
memset
(
&
_offset_array
[
index_for
(
left
)],
offset
,
num_cards
);
if
(
UseMemSetInBOT
)
{
memset
(
&
_offset_array
[
index_for
(
left
)],
offset
,
num_cards
);
}
else
{
size_t
i
=
index_for
(
left
);
const
size_t
end
=
i
+
num_cards
;
for
(;
i
<
end
;
i
++
)
{
_offset_array
[
i
]
=
offset
;
}
}
}
void
set_offset_array
(
size_t
left
,
size_t
right
,
u_char
offset
)
{
assert
(
right
<
_vs
.
committed_size
(),
"right address out of range"
);
assert
(
left
<=
right
,
"indexes out of order"
);
assert
(
left
<=
right
,
"indexes out of order"
);
size_t
num_cards
=
right
-
left
+
1
;
memset
(
&
_offset_array
[
left
],
offset
,
num_cards
);
if
(
UseMemSetInBOT
)
{
memset
(
&
_offset_array
[
left
],
offset
,
num_cards
);
}
else
{
size_t
i
=
left
;
const
size_t
end
=
i
+
num_cards
;
for
(;
i
<
end
;
i
++
)
{
_offset_array
[
i
]
=
offset
;
}
}
}
void
check_offset_array
(
size_t
index
,
HeapWord
*
high
,
HeapWord
*
low
)
const
{
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
8c6a3df6
...
...
@@ -1934,6 +1934,14 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
clear_cset_start_regions
();
guarantee
(
_task_queues
!=
NULL
,
"task_queues allocation failure."
);
#ifdef SPARC
// Issue a stern warning, but allow use for experimentation and debugging.
if
(
VM_Version
::
is_sun4v
()
&&
UseMemSetInBOT
)
{
assert
(
!
FLAG_IS_DEFAULT
(
UseMemSetInBOT
),
"Error"
);
warning
(
"Experimental flag -XX:+UseMemSetInBOT is known to cause instability"
" on sun4v; please understand that you are using at your own risk!"
);
}
#endif
}
jint
G1CollectedHeap
::
initialize
()
{
...
...
@@ -3582,15 +3590,11 @@ size_t G1CollectedHeap::pending_card_num() {
DirtyCardQueueSet
&
dcqs
=
JavaThread
::
dirty_card_queue_set
();
size_t
buffer_size
=
dcqs
.
buffer_size
();
size_t
buffer_num
=
dcqs
.
completed_buffers_num
();
return
buffer_size
*
buffer_num
+
extra_cards
;
}
size_t
G1CollectedHeap
::
max_pending_card_num
()
{
DirtyCardQueueSet
&
dcqs
=
JavaThread
::
dirty_card_queue_set
();
size_t
buffer_size
=
dcqs
.
buffer_size
();
size_t
buffer_num
=
dcqs
.
completed_buffers_num
();
int
thread_num
=
Threads
::
number_of_threads
();
return
(
buffer_num
+
thread_num
)
*
buffer_size
;
// PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
// in bytes - not the number of 'entries'. We need to convert
// into a number of cards.
return
(
buffer_size
*
buffer_num
+
extra_cards
)
/
oopSize
;
}
size_t
G1CollectedHeap
::
cards_scanned
()
{
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
8c6a3df6
...
...
@@ -1706,7 +1706,6 @@ public:
void
stop_conc_gc_threads
();
size_t
pending_card_num
();
size_t
max_pending_card_num
();
size_t
cards_scanned
();
protected:
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
8c6a3df6
...
...
@@ -90,7 +90,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_alloc_rate_ms_seq
(
new
TruncatedSeq
(
TruncatedSeqLength
)),
_prev_collection_pause_end_ms
(
0.0
),
_pending_card_diff_seq
(
new
TruncatedSeq
(
TruncatedSeqLength
)),
_rs_length_diff_seq
(
new
TruncatedSeq
(
TruncatedSeqLength
)),
_cost_per_card_ms_seq
(
new
TruncatedSeq
(
TruncatedSeqLength
)),
_young_cards_per_entry_ratio_seq
(
new
TruncatedSeq
(
TruncatedSeqLength
)),
...
...
@@ -197,7 +196,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
int
index
=
MIN2
(
_parallel_gc_threads
-
1
,
7
);
_pending_card_diff_seq
->
add
(
0.0
);
_rs_length_diff_seq
->
add
(
rs_length_diff_defaults
[
index
]);
_cost_per_card_ms_seq
->
add
(
cost_per_card_ms_defaults
[
index
]);
_young_cards_per_entry_ratio_seq
->
add
(
...
...
@@ -657,7 +655,7 @@ double G1CollectorPolicy::predict_survivor_regions_evac_time() {
for
(
HeapRegion
*
r
=
_recorded_survivor_head
;
r
!=
NULL
&&
r
!=
_recorded_survivor_tail
->
get_next_young_region
();
r
=
r
->
get_next_young_region
())
{
survivor_regions_evac_time
+=
predict_region_elapsed_time_ms
(
r
,
true
);
survivor_regions_evac_time
+=
predict_region_elapsed_time_ms
(
r
,
gcs_are_young
()
);
}
return
survivor_regions_evac_time
;
}
...
...
@@ -801,9 +799,8 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_cur_collection_pause_used_at_start_bytes
=
start_used
;
_cur_collection_pause_used_regions_at_start
=
_g1
->
used_regions
();
_pending_cards
=
_g1
->
pending_card_num
();
_max_pending_cards
=
_g1
->
max_pending_card_num
();
_
bytes_in_collection_set_before_gc
=
0
;
_
collection_set_bytes_used_before
=
0
;
_bytes_copied_during_gc
=
0
;
YoungList
*
young_list
=
_g1
->
young_list
();
...
...
@@ -1036,12 +1033,6 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
// do that for any other surv rate groupsx
if
(
update_stats
)
{
size_t
diff
=
0
;
if
(
_max_pending_cards
>=
_pending_cards
)
{
diff
=
_max_pending_cards
-
_pending_cards
;
}
_pending_card_diff_seq
->
add
((
double
)
diff
);
double
cost_per_card_ms
=
0.0
;
if
(
_pending_cards
>
0
)
{
cost_per_card_ms
=
phase_times
()
->
_update_rs_time
/
(
double
)
_pending_cards
;
...
...
@@ -1126,9 +1117,9 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
_constant_other_time_ms_seq
->
add
(
constant_other_time_ms
);
double
survival_ratio
=
0.0
;
if
(
_
bytes_in_collection_set_before_gc
>
0
)
{
if
(
_
collection_set_bytes_used_before
>
0
)
{
survival_ratio
=
(
double
)
_bytes_copied_during_gc
/
(
double
)
_
bytes_in_collection_set_before_gc
;
(
double
)
_
collection_set_bytes_used_before
;
}
_pending_cards_seq
->
add
((
double
)
_pending_cards
);
...
...
@@ -1228,6 +1219,15 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
dcqs
.
notify_if_necessary
();
}
double
G1CollectorPolicy
::
predict_base_elapsed_time_ms
(
size_t
pending_cards
,
size_t
scanned_cards
)
{
return
predict_rs_update_time_ms
(
pending_cards
)
+
predict_rs_scan_time_ms
(
scanned_cards
)
+
predict_constant_other_time_ms
();
}
double
G1CollectorPolicy
::
predict_base_elapsed_time_ms
(
size_t
pending_cards
)
{
size_t
rs_length
=
predict_rs_length_diff
();
...
...
@@ -1240,21 +1240,28 @@ G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
return
predict_base_elapsed_time_ms
(
pending_cards
,
card_num
);
}
double
G1CollectorPolicy
::
predict_base_elapsed_time_ms
(
size_t
pending_cards
,
size_t
scanned_cards
)
{
return
predict_rs_update_time_ms
(
pending_cards
)
+
predict_rs_scan_time_ms
(
scanned_cards
)
+
predict_constant_other_time_ms
();
size_t
G1CollectorPolicy
::
predict_bytes_to_copy
(
HeapRegion
*
hr
)
{
size_t
bytes_to_copy
;
if
(
hr
->
is_marked
())
bytes_to_copy
=
hr
->
max_live_bytes
();
else
{
assert
(
hr
->
is_young
()
&&
hr
->
age_in_surv_rate_group
()
!=
-
1
,
"invariant"
);
int
age
=
hr
->
age_in_surv_rate_group
();
double
yg_surv_rate
=
predict_yg_surv_rate
(
age
,
hr
->
surv_rate_group
());
bytes_to_copy
=
(
size_t
)
((
double
)
hr
->
used
()
*
yg_surv_rate
);
}
return
bytes_to_copy
;
}
double
G1CollectorPolicy
::
predict_region_elapsed_time_ms
(
HeapRegion
*
hr
,
bool
young
)
{
bool
for_young_gc
)
{
size_t
rs_length
=
hr
->
rem_set
()
->
occupied
();
size_t
card_num
;
if
(
gcs_are_young
())
{
// Predicting the number of cards is based on which type of GC
// we're predicting for.
if
(
for_young_gc
)
{
card_num
=
predict_young_card_num
(
rs_length
);
}
else
{
card_num
=
predict_non_young_card_num
(
rs_length
);
...
...
@@ -1265,25 +1272,14 @@ G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
predict_rs_scan_time_ms
(
card_num
)
+
predict_object_copy_time_ms
(
bytes_to_copy
);
if
(
young
)
// The prediction of the "other" time for this region is based
// upon the region type and NOT the GC type.
if
(
hr
->
is_young
())
{
region_elapsed_time_ms
+=
predict_young_other_time_ms
(
1
);
else
}
else
{
region_elapsed_time_ms
+=
predict_non_young_other_time_ms
(
1
);
return
region_elapsed_time_ms
;
}
size_t
G1CollectorPolicy
::
predict_bytes_to_copy
(
HeapRegion
*
hr
)
{
size_t
bytes_to_copy
;
if
(
hr
->
is_marked
())
bytes_to_copy
=
hr
->
max_live_bytes
();
else
{
assert
(
hr
->
is_young
()
&&
hr
->
age_in_surv_rate_group
()
!=
-
1
,
"invariant"
);
int
age
=
hr
->
age_in_surv_rate_group
();
double
yg_surv_rate
=
predict_yg_surv_rate
(
age
,
hr
->
surv_rate_group
());
bytes_to_copy
=
(
size_t
)
((
double
)
hr
->
used
()
*
yg_surv_rate
);
}
return
bytes_to_copy
;
return
region_elapsed_time_ms
;
}
void
...
...
@@ -1342,22 +1338,6 @@ size_t G1CollectorPolicy::expansion_amount() {
}
}
class
CountCSClosure
:
public
HeapRegionClosure
{
G1CollectorPolicy
*
_g1_policy
;
public:
CountCSClosure
(
G1CollectorPolicy
*
g1_policy
)
:
_g1_policy
(
g1_policy
)
{}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
_g1_policy
->
_bytes_in_collection_set_before_gc
+=
r
->
used
();
return
false
;
}
};
void
G1CollectorPolicy
::
count_CS_bytes_used
()
{
CountCSClosure
cs_closure
(
this
);
_g1
->
collection_set_iterate
(
&
cs_closure
);
}
void
G1CollectorPolicy
::
print_tracing_info
()
const
{
_trace_gen0_time_data
.
print
();
_trace_gen1_time_data
.
print
();
...
...
@@ -1696,7 +1676,7 @@ void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_l
// retiring the current allocation region) or a concurrent
// refine thread (RSet sampling).
double
region_elapsed_time_ms
=
predict_region_elapsed_time_ms
(
hr
,
true
);
double
region_elapsed_time_ms
=
predict_region_elapsed_time_ms
(
hr
,
gcs_are_young
()
);
size_t
used_bytes
=
hr
->
used
();
_inc_cset_recorded_rs_lengths
+=
rs_length
;
_inc_cset_predicted_elapsed_time_ms
+=
region_elapsed_time_ms
;
...
...
@@ -1731,7 +1711,7 @@ void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
_inc_cset_recorded_rs_lengths_diffs
+=
rs_lengths_diff
;
double
old_elapsed_time_ms
=
hr
->
predicted_elapsed_time_ms
();
double
new_region_elapsed_time_ms
=
predict_region_elapsed_time_ms
(
hr
,
true
);
double
new_region_elapsed_time_ms
=
predict_region_elapsed_time_ms
(
hr
,
gcs_are_young
()
);
double
elapsed_ms_diff
=
new_region_elapsed_time_ms
-
old_elapsed_time_ms
;
_inc_cset_predicted_elapsed_time_ms_diffs
+=
elapsed_ms_diff
;
...
...
@@ -1854,8 +1834,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
}
void
G1CollectorPolicy
::
finalize_cset
(
double
target_pause_time_ms
)
{
// Set this here - in case we're not doing young collections.
double
non_young_start_time_sec
=
os
::
elapsedTime
();
double
young_start_time_sec
=
os
::
elapsedTime
();
YoungList
*
young_list
=
_g1
->
young_list
();
finalize_incremental_cset_building
();
...
...
@@ -1869,17 +1848,14 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
double
predicted_pause_time_ms
=
base_time_ms
;
double
time_remaining_ms
=
target_pause_time_ms
-
base_time_ms
;
ergo_verbose
3
(
ErgoCSetConstruction
|
ErgoHigh
,
ergo_verbose
4
(
ErgoCSetConstruction
|
ErgoHigh
,
"start choosing CSet"
,
ergo_format_size
(
"_pending_cards"
)
ergo_format_ms
(
"predicted base time"
)
ergo_format_ms
(
"remaining time"
)
ergo_format_ms
(
"target pause time"
),
base_time_ms
,
time_remaining_ms
,
target_pause_time_ms
);
HeapRegion
*
hr
;
double
young_start_time_sec
=
os
::
elapsedTime
();
_pending_cards
,
base_time_ms
,
time_remaining_ms
,
target_pause_time_ms
);
_collection_set_bytes_used_before
=
0
;
_last_gc_was_young
=
gcs_are_young
()
?
true
:
false
;
if
(
_last_gc_was_young
)
{
...
...
@@ -1895,7 +1871,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
uint
survivor_region_length
=
young_list
->
survivor_length
();
uint
eden_region_length
=
young_list
->
length
()
-
survivor_region_length
;
init_cset_region_lengths
(
eden_region_length
,
survivor_region_length
);
hr
=
young_list
->
first_survivor_region
();
HeapRegion
*
hr
=
young_list
->
first_survivor_region
();
while
(
hr
!=
NULL
)
{
assert
(
hr
->
is_survivor
(),
"badly formed young list"
);
hr
->
set_young
();
...
...
@@ -1926,8 +1903,8 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
phase_times
()
->
_recorded_young_cset_choice_time_ms
=
(
young_end_time_sec
-
young_start_time_sec
)
*
1000.0
;
//
We are doing young collections so reset this
.
non_young_start_time_sec
=
young_end_time_sec
;
//
Set the start of the non-young choice time
.
double
non_young_start_time_sec
=
young_end_time_sec
;
if
(
!
gcs_are_young
())
{
CollectionSetChooser
*
cset_chooser
=
_collectionSetChooser
;
...
...
@@ -1937,6 +1914,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
uint
expensive_region_num
=
0
;
bool
check_time_remaining
=
adaptive_young_list_length
();
HeapRegion
*
hr
=
cset_chooser
->
peek
();
while
(
hr
!=
NULL
)
{
if
(
old_cset_region_length
()
>=
max_old_cset_length
)
{
...
...
@@ -1950,7 +1928,7 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
break
;
}
double
predicted_time_ms
=
predict_region_elapsed_time_ms
(
hr
,
false
);
double
predicted_time_ms
=
predict_region_elapsed_time_ms
(
hr
,
gcs_are_young
()
);
if
(
check_time_remaining
)
{
if
(
predicted_time_ms
>
time_remaining_ms
)
{
// Too expensive for the current CSet.
...
...
@@ -2025,8 +2003,6 @@ void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
stop_incremental_cset_building
();
count_CS_bytes_used
();
ergo_verbose5
(
ErgoCSetConstruction
,
"finish choosing CSet"
,
ergo_format_region
(
"eden"
)
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
浏览文件 @
8c6a3df6
...
...
@@ -228,7 +228,6 @@ private:
TruncatedSeq
*
_alloc_rate_ms_seq
;
double
_prev_collection_pause_end_ms
;
TruncatedSeq
*
_pending_card_diff_seq
;
TruncatedSeq
*
_rs_length_diff_seq
;
TruncatedSeq
*
_cost_per_card_ms_seq
;
TruncatedSeq
*
_young_cards_per_entry_ratio_seq
;
...
...
@@ -295,7 +294,6 @@ private:
double
_pause_time_target_ms
;
size_t
_pending_cards
;
size_t
_max_pending_cards
;
public:
// Accessors
...
...
@@ -325,28 +323,6 @@ public:
_max_rs_lengths
=
rs_lengths
;
}
size_t
predict_pending_card_diff
()
{
double
prediction
=
get_new_neg_prediction
(
_pending_card_diff_seq
);
if
(
prediction
<
0.00001
)
{
return
0
;
}
else
{
return
(
size_t
)
prediction
;
}
}
size_t
predict_pending_cards
()
{
size_t
max_pending_card_num
=
_g1
->
max_pending_card_num
();
size_t
diff
=
predict_pending_card_diff
();
size_t
prediction
;
if
(
diff
>
max_pending_card_num
)
{
prediction
=
max_pending_card_num
;
}
else
{
prediction
=
max_pending_card_num
-
diff
;
}
return
prediction
;
}
size_t
predict_rs_length_diff
()
{
return
(
size_t
)
get_new_prediction
(
_rs_length_diff_seq
);
}
...
...
@@ -439,7 +415,7 @@ public:
double
predict_base_elapsed_time_ms
(
size_t
pending_cards
,
size_t
scanned_cards
);
size_t
predict_bytes_to_copy
(
HeapRegion
*
hr
);
double
predict_region_elapsed_time_ms
(
HeapRegion
*
hr
,
bool
young
);
double
predict_region_elapsed_time_ms
(
HeapRegion
*
hr
,
bool
for_young_gc
);
void
set_recorded_rs_lengths
(
size_t
rs_lengths
);
...
...
@@ -495,12 +471,6 @@ public:
}
private:
size_t
_bytes_in_collection_set_before_gc
;
size_t
_bytes_copied_during_gc
;
// Used to count used bytes in CS.
friend
class
CountCSClosure
;
// Statistics kept per GC stoppage, pause or full.
TruncatedSeq
*
_recent_prev_end_times_for_all_gcs_sec
;
...
...
@@ -514,9 +484,13 @@ private:
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
// pause.
// pause, and incremented in finalize_cset() when adding old regions
// (if any) to the collection set.
size_t
_collection_set_bytes_used_before
;
// The number of bytes copied during the GC.
size_t
_bytes_copied_during_gc
;
// The associated information that is maintained while the incremental
// collection set is being built with young regions. Used to populate
// the recorded info for the evacuation pause.
...
...
@@ -646,9 +620,6 @@ private:
bool
predict_will_fit
(
uint
young_length
,
double
base_time_ms
,
uint
base_free_regions
,
double
target_pause_time_ms
);
// Count the number of bytes used in the CS.
void
count_CS_bytes_used
();
public:
G1CollectorPolicy
();
...
...
@@ -666,10 +637,6 @@ public:
// higher, recalculate the young list target length prediction.
void
revise_young_list_target_length_if_necessary
();
size_t
bytes_in_collection_set
()
{
return
_bytes_in_collection_set_before_gc
;
}
// This should be called after the heap is resized.
void
record_new_heap_size
(
uint
new_number_of_regions
);
...
...
src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
浏览文件 @
8c6a3df6
...
...
@@ -125,6 +125,7 @@ public:
#define ergo_format_double(_name_) ", " _name_ ": %1.2f"
#define ergo_format_perc(_name_) ", " _name_ ": %1.2f %%"
#define ergo_format_ms(_name_) ", " _name_ ": %1.2f ms"
#define ergo_format_size(_name_) ", " _name_ ": "SIZE_FORMAT
// Double parameter format strings
#define ergo_format_byte_perc(_name_) \
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
8c6a3df6
...
...
@@ -287,17 +287,17 @@
"The number of times we'll force an overflow during " \
"concurrent marking") \
\
develop(uintx, G1DefaultMinNewGenPercent, 20,
\
experimental(uintx, G1DefaultMinNewGenPercent, 20,
\
"Percentage (0-100) of the heap size to use as minimum " \
"young gen size.") \
\
develop(uintx, G1DefaultMaxNewGenPercent, 80,
\
experimental(uintx, G1DefaultMaxNewGenPercent, 80,
\
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.") \
\
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 90,
\
experimental(uintx, G1OldCSetRegionLiveThresholdPercent, 90,
\
"Threshold for regions to be added to the collection set. " \
"Regions with more live bytes tha
t
this will not be collected.") \
"Regions with more live bytes tha
n
this will not be collected.") \
\
product(uintx, G1HeapWastePercent, 5, \
"Amount of space, expressed as a percentage of the heap size, " \
...
...
@@ -306,7 +306,7 @@
product(uintx, G1MixedGCCountTarget, 4, \
"The target number of mixed GCs after a marking cycle.") \
\
develop(uintx, G1OldCSetRegionThresholdPercent, 10,
\
experimental(uintx, G1OldCSetRegionThresholdPercent, 10,
\
"An upper bound for the number of old CSet regions expressed " \
"as a percentage of the heap size.") \
\
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
8c6a3df6
...
...
@@ -384,10 +384,17 @@ void HeapRegion::par_clear() {
}
void
HeapRegion
::
calc_gc_efficiency
()
{
// GC efficiency is the ratio of how much space would be
// reclaimed over how long we predict it would take to reclaim it.
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
G1CollectorPolicy
*
g1p
=
g1h
->
g1_policy
();
_gc_efficiency
=
(
double
)
reclaimable_bytes
()
/
g1p
->
predict_region_elapsed_time_ms
(
this
,
false
);
// Retrieve a prediction of the elapsed time for this region for
// a mixed gc because the region will only be evacuated during a
// mixed gc.
double
region_elapsed_time_ms
=
g1p
->
predict_region_elapsed_time_ms
(
this
,
false
/* for_young_gc */
);
_gc_efficiency
=
(
double
)
reclaimable_bytes
()
/
region_elapsed_time_ms
;
}
void
HeapRegion
::
set_startsHumongous
(
HeapWord
*
new_top
,
HeapWord
*
new_end
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录