Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
16bbccbf
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
16bbccbf
编写于
10月 14, 2010
作者:
J
jcoomes
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
78b3ada8
0e9650db
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
192 addition
and
303 deletion
+192
-303
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+86
-43
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+16
-92
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
+0
-2
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+35
-47
src/share/vm/gc_implementation/g1/g1RemSet.hpp
src/share/vm/gc_implementation/g1/g1RemSet.hpp
+39
-110
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
+10
-6
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+0
-3
src/share/vm/gc_implementation/includeDB_gc_g1
src/share/vm/gc_implementation/includeDB_gc_g1
+6
-0
未找到文件。
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
16bbccbf
...
@@ -791,7 +791,7 @@ class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
...
@@ -791,7 +791,7 @@ class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
int
_worker_i
;
int
_worker_i
;
public:
public:
RebuildRSOutOfRegionClosure
(
G1CollectedHeap
*
g1
,
int
worker_i
=
0
)
:
RebuildRSOutOfRegionClosure
(
G1CollectedHeap
*
g1
,
int
worker_i
=
0
)
:
_cl
(
g1
->
g1_rem_set
()
->
as_HRInto_G1RemSet
()
,
worker_i
),
_cl
(
g1
->
g1_rem_set
(),
worker_i
),
_worker_i
(
worker_i
),
_worker_i
(
worker_i
),
_g1h
(
g1
)
_g1h
(
g1
)
{
}
{
}
...
@@ -890,7 +890,7 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
...
@@ -890,7 +890,7 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
abandon_cur_alloc_region
();
abandon_cur_alloc_region
();
abandon_gc_alloc_regions
();
abandon_gc_alloc_regions
();
assert
(
_cur_alloc_region
==
NULL
,
"Invariant."
);
assert
(
_cur_alloc_region
==
NULL
,
"Invariant."
);
g1_rem_set
()
->
as_HRInto_G1RemSet
()
->
cleanupHRRS
();
g1_rem_set
()
->
cleanupHRRS
();
tear_down_region_lists
();
tear_down_region_lists
();
set_used_regions_to_need_zero_fill
();
set_used_regions_to_need_zero_fill
();
...
@@ -1506,15 +1506,11 @@ jint G1CollectedHeap::initialize() {
...
@@ -1506,15 +1506,11 @@ jint G1CollectedHeap::initialize() {
}
}
// Also create a G1 rem set.
// Also create a G1 rem set.
if
(
G1UseHRIntoRS
)
{
if
(
mr_bs
()
->
is_a
(
BarrierSet
::
CardTableModRef
))
{
if
(
mr_bs
()
->
is_a
(
BarrierSet
::
CardTableModRef
))
{
_g1_rem_set
=
new
G1RemSet
(
this
,
(
CardTableModRefBS
*
)
mr_bs
());
_g1_rem_set
=
new
HRInto_G1RemSet
(
this
,
(
CardTableModRefBS
*
)
mr_bs
());
}
else
{
vm_exit_during_initialization
(
"G1 requires a cardtable mod ref bs."
);
return
JNI_ENOMEM
;
}
}
else
{
}
else
{
_g1_rem_set
=
new
StupidG1RemSet
(
this
);
vm_exit_during_initialization
(
"G1 requires a cardtable mod ref bs."
);
return
JNI_ENOMEM
;
}
}
// Carve out the G1 part of the heap.
// Carve out the G1 part of the heap.
...
@@ -2706,8 +2702,7 @@ size_t G1CollectedHeap::max_pending_card_num() {
...
@@ -2706,8 +2702,7 @@ size_t G1CollectedHeap::max_pending_card_num() {
}
}
size_t
G1CollectedHeap
::
cards_scanned
()
{
size_t
G1CollectedHeap
::
cards_scanned
()
{
HRInto_G1RemSet
*
g1_rset
=
(
HRInto_G1RemSet
*
)
g1_rem_set
();
return
g1_rem_set
()
->
cardsScanned
();
return
g1_rset
->
cardsScanned
();
}
}
void
void
...
@@ -3850,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i,
...
@@ -3850,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i,
undo_waste
()
*
HeapWordSize
/
K
);
undo_waste
()
*
HeapWordSize
/
K
);
}
}
#ifdef ASSERT
bool
G1ParScanThreadState
::
verify_ref
(
narrowOop
*
ref
)
const
{
assert
(
ref
!=
NULL
,
"invariant"
);
assert
(
UseCompressedOops
,
"sanity"
);
assert
(
!
has_partial_array_mask
(
ref
),
err_msg
(
"ref="
PTR_FORMAT
,
ref
));
oop
p
=
oopDesc
::
load_decode_heap_oop
(
ref
);
assert
(
_g1h
->
is_in_g1_reserved
(
p
),
err_msg
(
"ref="
PTR_FORMAT
" p="
PTR_FORMAT
,
ref
,
intptr_t
(
p
)));
return
true
;
}
bool
G1ParScanThreadState
::
verify_ref
(
oop
*
ref
)
const
{
assert
(
ref
!=
NULL
,
"invariant"
);
if
(
has_partial_array_mask
(
ref
))
{
// Must be in the collection set--it's already been copied.
oop
p
=
clear_partial_array_mask
(
ref
);
assert
(
_g1h
->
obj_in_cs
(
p
),
err_msg
(
"ref="
PTR_FORMAT
" p="
PTR_FORMAT
,
ref
,
intptr_t
(
p
)));
}
else
{
oop
p
=
oopDesc
::
load_decode_heap_oop
(
ref
);
assert
(
_g1h
->
is_in_g1_reserved
(
p
),
err_msg
(
"ref="
PTR_FORMAT
" p="
PTR_FORMAT
,
ref
,
intptr_t
(
p
)));
}
return
true
;
}
bool
G1ParScanThreadState
::
verify_task
(
StarTask
ref
)
const
{
if
(
ref
.
is_narrow
())
{
return
verify_ref
((
narrowOop
*
)
ref
);
}
else
{
return
verify_ref
((
oop
*
)
ref
);
}
}
#endif // ASSERT
void
G1ParScanThreadState
::
trim_queue
()
{
StarTask
ref
;
do
{
// Drain the overflow stack first, so other threads can steal.
while
(
refs
()
->
pop_overflow
(
ref
))
{
deal_with_reference
(
ref
);
}
while
(
refs
()
->
pop_local
(
ref
))
{
deal_with_reference
(
ref
);
}
}
while
(
!
refs
()
->
is_empty
());
}
G1ParClosureSuper
::
G1ParClosureSuper
(
G1CollectedHeap
*
g1
,
G1ParScanThreadState
*
par_scan_state
)
:
G1ParClosureSuper
::
G1ParClosureSuper
(
G1CollectedHeap
*
g1
,
G1ParScanThreadState
*
par_scan_state
)
:
_g1
(
g1
),
_g1_rem
(
_g1
->
g1_rem_set
()),
_cm
(
_g1
->
concurrent_mark
()),
_g1
(
g1
),
_g1_rem
(
_g1
->
g1_rem_set
()),
_cm
(
_g1
->
concurrent_mark
()),
_par_scan_state
(
par_scan_state
)
{
}
_par_scan_state
(
par_scan_state
)
{
}
...
@@ -4052,38 +4095,39 @@ public:
...
@@ -4052,38 +4095,39 @@ public:
:
_g1h
(
g1h
),
_par_scan_state
(
par_scan_state
),
:
_g1h
(
g1h
),
_par_scan_state
(
par_scan_state
),
_queues
(
queues
),
_terminator
(
terminator
)
{}
_queues
(
queues
),
_terminator
(
terminator
)
{}
void
do_void
()
{
void
do_void
();
G1ParScanThreadState
*
pss
=
par_scan_state
();
while
(
true
)
{
pss
->
trim_queue
();
StarTask
stolen_task
;
private:
if
(
queues
()
->
steal
(
pss
->
queue_num
(),
pss
->
hash_seed
(),
stolen_task
))
{
inline
bool
offer_termination
();
// slightly paranoid tests; I'm trying to catch potential
};
// problems before we go into push_on_queue to know where the
// problem is coming from
bool
G1ParEvacuateFollowersClosure
::
offer_termination
()
{
assert
((
oop
*
)
stolen_task
!=
NULL
,
"Error"
);
G1ParScanThreadState
*
const
pss
=
par_scan_state
();
if
(
stolen_task
.
is_narrow
())
{
pss
->
start_term_time
();
assert
(
UseCompressedOops
,
"Error"
);
const
bool
res
=
terminator
()
->
offer_termination
();
narrowOop
*
p
=
(
narrowOop
*
)
stolen_task
;
pss
->
end_term_time
();
assert
(
has_partial_array_mask
(
p
)
||
return
res
;
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"Error"
);
}
pss
->
push_on_queue
(
p
);
}
else
{
void
G1ParEvacuateFollowersClosure
::
do_void
()
{
oop
*
p
=
(
oop
*
)
stolen_task
;
StarTask
stolen_task
;
assert
(
has_partial_array_mask
(
p
)
||
_g1h
->
is_in_g1_reserved
(
*
p
),
"Error"
);
G1ParScanThreadState
*
const
pss
=
par_scan_state
();
pss
->
push_on_queue
(
p
);
pss
->
trim_queue
();
}
continue
;
do
{
while
(
queues
()
->
steal
(
pss
->
queue_num
(),
pss
->
hash_seed
(),
stolen_task
))
{
assert
(
pss
->
verify_task
(
stolen_task
),
"sanity"
);
if
(
stolen_task
.
is_narrow
())
{
pss
->
push_on_queue
((
narrowOop
*
)
stolen_task
);
}
else
{
pss
->
push_on_queue
((
oop
*
)
stolen_task
);
}
}
pss
->
start_term_time
();
pss
->
trim_queue
();
if
(
terminator
()
->
offer_termination
())
break
;
pss
->
end_term_time
();
}
}
pss
->
end_term_time
(
);
}
while
(
!
offer_termination
()
);
pss
->
retire_alloc_buffers
();
}
pss
->
retire_alloc_buffers
();
}
;
}
class
G1ParTask
:
public
AbstractGangTask
{
class
G1ParTask
:
public
AbstractGangTask
{
protected:
protected:
...
@@ -4182,8 +4226,7 @@ public:
...
@@ -4182,8 +4226,7 @@ public:
pss
.
print_termination_stats
(
i
);
pss
.
print_termination_stats
(
i
);
}
}
assert
(
pss
.
refs_to_scan
()
==
0
,
"Task queue should be empty"
);
assert
(
pss
.
refs
()
->
is_empty
(),
"should be empty"
);
assert
(
pss
.
overflowed_refs_to_scan
()
==
0
,
"Overflow queue should be empty"
);
double
end_time_ms
=
os
::
elapsedTime
()
*
1000.0
;
double
end_time_ms
=
os
::
elapsedTime
()
*
1000.0
;
_g1h
->
g1_policy
()
->
record_gc_worker_end_time
(
i
,
end_time_ms
);
_g1h
->
g1_policy
()
->
record_gc_worker_end_time
(
i
,
end_time_ms
);
}
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
16bbccbf
...
@@ -1651,49 +1651,17 @@ public:
...
@@ -1651,49 +1651,17 @@ public:
size_t
alloc_buffer_waste
()
const
{
return
_alloc_buffer_waste
;
}
size_t
alloc_buffer_waste
()
const
{
return
_alloc_buffer_waste
;
}
size_t
undo_waste
()
const
{
return
_undo_waste
;
}
size_t
undo_waste
()
const
{
return
_undo_waste
;
}
template
<
class
T
>
void
push_on_queue
(
T
*
ref
)
{
assert
(
ref
!=
NULL
,
"invariant"
);
assert
(
has_partial_array_mask
(
ref
)
||
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
ref
)),
"invariant"
);
#ifdef ASSERT
#ifdef ASSERT
if
(
has_partial_array_mask
(
ref
))
{
bool
verify_ref
(
narrowOop
*
ref
)
const
;
oop
p
=
clear_partial_array_mask
(
ref
);
bool
verify_ref
(
oop
*
ref
)
const
;
// Verify that we point into the CS
bool
verify_task
(
StarTask
ref
)
const
;
assert
(
_g1h
->
obj_in_cs
(
p
),
"Should be in CS"
);
#endif // ASSERT
}
#endif
refs
()
->
push
(
ref
);
}
void
pop_from_queue
(
StarTask
&
ref
)
{
if
(
refs
()
->
pop_local
(
ref
))
{
assert
((
oop
*
)
ref
!=
NULL
,
"pop_local() returned true"
);
assert
(
UseCompressedOops
||
!
ref
.
is_narrow
(),
"Error"
);
assert
(
has_partial_array_mask
((
oop
*
)
ref
)
||
_g1h
->
is_in_g1_reserved
(
ref
.
is_narrow
()
?
oopDesc
::
load_decode_heap_oop
((
narrowOop
*
)
ref
)
:
oopDesc
::
load_decode_heap_oop
((
oop
*
)
ref
)),
"invariant"
);
}
else
{
StarTask
null_task
;
ref
=
null_task
;
}
}
void
pop_from_overflow_queue
(
StarTask
&
ref
)
{
template
<
class
T
>
void
push_on_queue
(
T
*
ref
)
{
StarTask
new_ref
;
assert
(
verify_ref
(
ref
),
"sanity"
);
refs
()
->
pop_overflow
(
new_ref
);
refs
()
->
push
(
ref
);
assert
((
oop
*
)
new_ref
!=
NULL
,
"pop() from a local non-empty stack"
);
assert
(
UseCompressedOops
||
!
new_ref
.
is_narrow
(),
"Error"
);
assert
(
has_partial_array_mask
((
oop
*
)
new_ref
)
||
_g1h
->
is_in_g1_reserved
(
new_ref
.
is_narrow
()
?
oopDesc
::
load_decode_heap_oop
((
narrowOop
*
)
new_ref
)
:
oopDesc
::
load_decode_heap_oop
((
oop
*
)
new_ref
)),
"invariant"
);
ref
=
new_ref
;
}
}
int
refs_to_scan
()
{
return
(
int
)
refs
()
->
size
();
}
int
overflowed_refs_to_scan
()
{
return
(
int
)
refs
()
->
overflow_stack
()
->
size
();
}
template
<
class
T
>
void
update_rs
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
template
<
class
T
>
void
update_rs
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
if
(
G1DeferredRSUpdate
)
{
if
(
G1DeferredRSUpdate
)
{
deferred_rs_update
(
from
,
p
,
tid
);
deferred_rs_update
(
from
,
p
,
tid
);
...
@@ -1818,59 +1786,15 @@ private:
...
@@ -1818,59 +1786,15 @@ private:
}
}
}
}
public:
void
deal_with_reference
(
StarTask
ref
)
{
void
trim_queue
()
{
assert
(
verify_task
(
ref
),
"sanity"
);
// I've replicated the loop twice, first to drain the overflow
if
(
ref
.
is_narrow
())
{
// queue, second to drain the task queue. This is better than
deal_with_reference
((
narrowOop
*
)
ref
);
// having a single loop, which checks both conditions and, inside
}
else
{
// it, either pops the overflow queue or the task queue, as each
deal_with_reference
((
oop
*
)
ref
);
// loop is tighter. Also, the decision to drain the overflow queue
// first is not arbitrary, as the overflow queue is not visible
// to the other workers, whereas the task queue is. So, we want to
// drain the "invisible" entries first, while allowing the other
// workers to potentially steal the "visible" entries.
while
(
refs_to_scan
()
>
0
||
overflowed_refs_to_scan
()
>
0
)
{
while
(
overflowed_refs_to_scan
()
>
0
)
{
StarTask
ref_to_scan
;
assert
((
oop
*
)
ref_to_scan
==
NULL
,
"Constructed above"
);
pop_from_overflow_queue
(
ref_to_scan
);
// We shouldn't have pushed it on the queue if it was not
// pointing into the CSet.
assert
((
oop
*
)
ref_to_scan
!=
NULL
,
"Follows from inner loop invariant"
);
if
(
ref_to_scan
.
is_narrow
())
{
assert
(
UseCompressedOops
,
"Error"
);
narrowOop
*
p
=
(
narrowOop
*
)
ref_to_scan
;
assert
(
!
has_partial_array_mask
(
p
)
&&
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
}
else
{
oop
*
p
=
(
oop
*
)
ref_to_scan
;
assert
((
has_partial_array_mask
(
p
)
&&
_g1h
->
is_in_g1_reserved
(
clear_partial_array_mask
(
p
)))
||
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
}
}
while
(
refs_to_scan
()
>
0
)
{
StarTask
ref_to_scan
;
assert
((
oop
*
)
ref_to_scan
==
NULL
,
"Constructed above"
);
pop_from_queue
(
ref_to_scan
);
if
((
oop
*
)
ref_to_scan
!=
NULL
)
{
if
(
ref_to_scan
.
is_narrow
())
{
assert
(
UseCompressedOops
,
"Error"
);
narrowOop
*
p
=
(
narrowOop
*
)
ref_to_scan
;
assert
(
!
has_partial_array_mask
(
p
)
&&
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
}
else
{
oop
*
p
=
(
oop
*
)
ref_to_scan
;
assert
((
has_partial_array_mask
(
p
)
&&
_g1h
->
obj_in_cs
(
clear_partial_array_mask
(
p
)))
||
_g1h
->
is_in_g1_reserved
(
oopDesc
::
load_decode_heap_oop
(
p
)),
"sanity"
);
deal_with_reference
(
p
);
}
}
}
}
}
}
}
public:
void
trim_queue
();
};
};
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
浏览文件 @
16bbccbf
...
@@ -25,8 +25,6 @@
...
@@ -25,8 +25,6 @@
class
HeapRegion
;
class
HeapRegion
;
class
G1CollectedHeap
;
class
G1CollectedHeap
;
class
G1RemSet
;
class
G1RemSet
;
class
HRInto_G1RemSet
;
class
G1RemSet
;
class
ConcurrentMark
;
class
ConcurrentMark
;
class
DirtyCardToOopClosure
;
class
DirtyCardToOopClosure
;
class
CMBitMap
;
class
CMBitMap
;
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
16bbccbf
...
@@ -97,13 +97,6 @@ public:
...
@@ -97,13 +97,6 @@ public:
}
}
};
};
void
StupidG1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
IntoCSRegionClosure
rc
(
_g1
,
oc
);
_g1
->
heap_region_iterate
(
&
rc
);
}
class
VerifyRSCleanCardOopClosure
:
public
OopClosure
{
class
VerifyRSCleanCardOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1
;
G1CollectedHeap
*
_g1
;
public:
public:
...
@@ -119,8 +112,9 @@ public:
...
@@ -119,8 +112,9 @@ public:
}
}
};
};
HRInto_G1RemSet
::
HRInto_G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
)
G1RemSet
::
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
)
:
G1RemSet
(
g1
),
_ct_bs
(
ct_bs
),
_g1p
(
_g1
->
g1_policy
()),
:
_g1
(
g1
),
_conc_refine_cards
(
0
),
_ct_bs
(
ct_bs
),
_g1p
(
_g1
->
g1_policy
()),
_cg1r
(
g1
->
concurrent_g1_refine
()),
_cg1r
(
g1
->
concurrent_g1_refine
()),
_traversal_in_progress
(
false
),
_traversal_in_progress
(
false
),
_cset_rs_update_cl
(
NULL
),
_cset_rs_update_cl
(
NULL
),
...
@@ -134,7 +128,7 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
...
@@ -134,7 +128,7 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
}
}
}
}
HRInto_G1RemSet
::~
HRInto_
G1RemSet
()
{
G1RemSet
::~
G1RemSet
()
{
delete
_seq_task
;
delete
_seq_task
;
for
(
uint
i
=
0
;
i
<
n_workers
();
i
++
)
{
for
(
uint
i
=
0
;
i
<
n_workers
();
i
++
)
{
assert
(
_cset_rs_update_cl
[
i
]
==
NULL
,
"it should be"
);
assert
(
_cset_rs_update_cl
[
i
]
==
NULL
,
"it should be"
);
...
@@ -277,7 +271,7 @@ public:
...
@@ -277,7 +271,7 @@ public:
// p threads
// p threads
// Then thread t will start at region t * floor (n/p)
// Then thread t will start at region t * floor (n/p)
HeapRegion
*
HRInto_
G1RemSet
::
calculateStartRegion
(
int
worker_i
)
{
HeapRegion
*
G1RemSet
::
calculateStartRegion
(
int
worker_i
)
{
HeapRegion
*
result
=
_g1p
->
collection_set
();
HeapRegion
*
result
=
_g1p
->
collection_set
();
if
(
ParallelGCThreads
>
0
)
{
if
(
ParallelGCThreads
>
0
)
{
size_t
cs_size
=
_g1p
->
collection_set_size
();
size_t
cs_size
=
_g1p
->
collection_set_size
();
...
@@ -290,7 +284,7 @@ HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
...
@@ -290,7 +284,7 @@ HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
return
result
;
return
result
;
}
}
void
HRInto_
G1RemSet
::
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
void
G1RemSet
::
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
double
rs_time_start
=
os
::
elapsedTime
();
double
rs_time_start
=
os
::
elapsedTime
();
HeapRegion
*
startRegion
=
calculateStartRegion
(
worker_i
);
HeapRegion
*
startRegion
=
calculateStartRegion
(
worker_i
);
...
@@ -340,7 +334,7 @@ public:
...
@@ -340,7 +334,7 @@ public:
}
}
};
};
void
HRInto_
G1RemSet
::
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
)
{
void
G1RemSet
::
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
)
{
double
start
=
os
::
elapsedTime
();
double
start
=
os
::
elapsedTime
();
// Apply the given closure to all remaining log entries.
// Apply the given closure to all remaining log entries.
RefineRecordRefsIntoCSCardTableEntryClosure
into_cset_update_rs_cl
(
_g1
,
into_cset_dcq
);
RefineRecordRefsIntoCSCardTableEntryClosure
into_cset_update_rs_cl
(
_g1
,
into_cset_dcq
);
...
@@ -439,12 +433,11 @@ public:
...
@@ -439,12 +433,11 @@ public:
}
}
};
};
void
HRInto_
G1RemSet
::
cleanupHRRS
()
{
void
G1RemSet
::
cleanupHRRS
()
{
HeapRegionRemSet
::
cleanup
();
HeapRegionRemSet
::
cleanup
();
}
}
void
void
G1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
HRInto_G1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
int
worker_i
)
{
#if CARD_REPEAT_HISTO
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset
();
ct_freq_update_histo_and_reset
();
...
@@ -508,8 +501,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
...
@@ -508,8 +501,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_cset_rs_update_cl
[
worker_i
]
=
NULL
;
_cset_rs_update_cl
[
worker_i
]
=
NULL
;
}
}
void
HRInto_G1RemSet
::
void
G1RemSet
::
prepare_for_oops_into_collection_set_do
()
{
prepare_for_oops_into_collection_set_do
()
{
#if G1_REM_SET_LOGGING
#if G1_REM_SET_LOGGING
PrintRSClosure
cl
;
PrintRSClosure
cl
;
_g1
->
collection_set_iterate
(
&
cl
);
_g1
->
collection_set_iterate
(
&
cl
);
...
@@ -581,7 +573,7 @@ public:
...
@@ -581,7 +573,7 @@ public:
// RSet updating,
// RSet updating,
// * the post-write barrier shouldn't be logging updates to young
// * the post-write barrier shouldn't be logging updates to young
// regions (but there is a situation where this can happen - see
// regions (but there is a situation where this can happen - see
// the comment in
HRInto_
G1RemSet::concurrentRefineOneCard below -
// the comment in G1RemSet::concurrentRefineOneCard below -
// that should not be applicable here), and
// that should not be applicable here), and
// * during actual RSet updating, the filtering of cards in young
// * during actual RSet updating, the filtering of cards in young
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
...
@@ -601,7 +593,7 @@ public:
...
@@ -601,7 +593,7 @@ public:
}
}
};
};
void
HRInto_
G1RemSet
::
cleanup_after_oops_into_collection_set_do
()
{
void
G1RemSet
::
cleanup_after_oops_into_collection_set_do
()
{
guarantee
(
_cards_scanned
!=
NULL
,
"invariant"
);
guarantee
(
_cards_scanned
!=
NULL
,
"invariant"
);
_total_cards_scanned
=
0
;
_total_cards_scanned
=
0
;
for
(
uint
i
=
0
;
i
<
n_workers
();
++
i
)
for
(
uint
i
=
0
;
i
<
n_workers
();
++
i
)
...
@@ -692,12 +684,12 @@ public:
...
@@ -692,12 +684,12 @@ public:
}
}
};
};
void
HRInto_
G1RemSet
::
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{
void
G1RemSet
::
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{
ScrubRSClosure
scrub_cl
(
region_bm
,
card_bm
);
ScrubRSClosure
scrub_cl
(
region_bm
,
card_bm
);
_g1
->
heap_region_iterate
(
&
scrub_cl
);
_g1
->
heap_region_iterate
(
&
scrub_cl
);
}
}
void
HRInto_
G1RemSet
::
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
void
G1RemSet
::
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
)
{
int
worker_num
,
int
claim_val
)
{
ScrubRSClosure
scrub_cl
(
region_bm
,
card_bm
);
ScrubRSClosure
scrub_cl
(
region_bm
,
card_bm
);
_g1
->
heap_region_par_iterate_chunked
(
&
scrub_cl
,
worker_num
,
claim_val
);
_g1
->
heap_region_par_iterate_chunked
(
&
scrub_cl
,
worker_num
,
claim_val
);
...
@@ -741,7 +733,7 @@ public:
...
@@ -741,7 +733,7 @@ public:
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_nv
(
p
);
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_nv
(
p
);
}
};
};
bool
HRInto_
G1RemSet
::
concurrentRefineOneCard_impl
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
G1RemSet
::
concurrentRefineOneCard_impl
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
)
{
bool
check_for_refs_into_cset
)
{
// Construct the region representing the card.
// Construct the region representing the card.
HeapWord
*
start
=
_ct_bs
->
addr_for
(
card_ptr
);
HeapWord
*
start
=
_ct_bs
->
addr_for
(
card_ptr
);
...
@@ -820,7 +812,7 @@ bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i
...
@@ -820,7 +812,7 @@ bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i
return
trigger_cl
.
value
();
return
trigger_cl
.
value
();
}
}
bool
HRInto_
G1RemSet
::
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
G1RemSet
::
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
)
{
bool
check_for_refs_into_cset
)
{
// If the card is no longer dirty, nothing to do.
// If the card is no longer dirty, nothing to do.
if
(
*
card_ptr
!=
CardTableModRefBS
::
dirty_card_val
())
{
if
(
*
card_ptr
!=
CardTableModRefBS
::
dirty_card_val
())
{
...
@@ -995,7 +987,7 @@ public:
...
@@ -995,7 +987,7 @@ public:
}
}
};
};
void
HRInto_
G1RemSet
::
print_summary_info
()
{
void
G1RemSet
::
print_summary_info
()
{
G1CollectedHeap
*
g1
=
G1CollectedHeap
::
heap
();
G1CollectedHeap
*
g1
=
G1CollectedHeap
::
heap
();
#if CARD_REPEAT_HISTO
#if CARD_REPEAT_HISTO
...
@@ -1029,30 +1021,26 @@ void HRInto_G1RemSet::print_summary_info() {
...
@@ -1029,30 +1021,26 @@ void HRInto_G1RemSet::print_summary_info() {
g1
->
concurrent_g1_refine
()
->
threads_do
(
&
p
);
g1
->
concurrent_g1_refine
()
->
threads_do
(
&
p
);
gclog_or_tty
->
print_cr
(
""
);
gclog_or_tty
->
print_cr
(
""
);
if
(
G1UseHRIntoRS
)
{
HRRSStatsIter
blk
;
HRRSStatsIter
blk
;
g1
->
heap_region_iterate
(
&
blk
);
g1
->
heap_region_iterate
(
&
blk
);
gclog_or_tty
->
print_cr
(
" Total heap region rem set sizes = "
SIZE_FORMAT
"K."
gclog_or_tty
->
print_cr
(
" Total heap region rem set sizes = "
SIZE_FORMAT
"K."
" Max = "
SIZE_FORMAT
"K."
,
" Max = "
SIZE_FORMAT
"K."
,
blk
.
total_mem_sz
()
/
K
,
blk
.
max_mem_sz
()
/
K
);
blk
.
total_mem_sz
()
/
K
,
blk
.
max_mem_sz
()
/
K
);
gclog_or_tty
->
print_cr
(
" Static structures = "
SIZE_FORMAT
"K,"
gclog_or_tty
->
print_cr
(
" Static structures = "
SIZE_FORMAT
"K,"
" free_lists = "
SIZE_FORMAT
"K."
,
" free_lists = "
SIZE_FORMAT
"K."
,
HeapRegionRemSet
::
static_mem_size
()
/
K
,
HeapRegionRemSet
::
static_mem_size
()
/
K
,
HeapRegionRemSet
::
fl_mem_size
()
/
K
);
HeapRegionRemSet
::
fl_mem_size
()
/
K
);
gclog_or_tty
->
print_cr
(
" %d occupied cards represented."
,
gclog_or_tty
->
print_cr
(
" %d occupied cards represented."
,
blk
.
occupied
());
blk
.
occupied
());
gclog_or_tty
->
print_cr
(
" Max sz region = ["
PTR_FORMAT
", "
PTR_FORMAT
" )"
gclog_or_tty
->
print_cr
(
" Max sz region = ["
PTR_FORMAT
", "
PTR_FORMAT
" )"
", cap = "
SIZE_FORMAT
"K, occ = "
SIZE_FORMAT
"K."
,
", cap = "
SIZE_FORMAT
"K, occ = "
SIZE_FORMAT
"K."
,
blk
.
max_mem_sz_region
()
->
bottom
(),
blk
.
max_mem_sz_region
()
->
end
(),
blk
.
max_mem_sz_region
()
->
bottom
(),
blk
.
max_mem_sz_region
()
->
end
(),
(
blk
.
max_mem_sz_region
()
->
rem_set
()
->
mem_size
()
+
K
-
1
)
/
K
,
(
blk
.
max_mem_sz_region
()
->
rem_set
()
->
mem_size
()
+
K
-
1
)
/
K
,
(
blk
.
max_mem_sz_region
()
->
rem_set
()
->
occupied
()
+
K
-
1
)
/
K
);
(
blk
.
max_mem_sz_region
()
->
rem_set
()
->
occupied
()
+
K
-
1
)
/
K
);
gclog_or_tty
->
print_cr
(
" Did %d coarsenings."
,
HeapRegionRemSet
::
n_coarsenings
());
gclog_or_tty
->
print_cr
(
" Did %d coarsenings."
,
HeapRegionRemSet
::
n_coarsenings
());
}
}
}
void
HRInto_
G1RemSet
::
prepare_for_verify
()
{
void
G1RemSet
::
prepare_for_verify
()
{
if
(
G1HRRSFlushLogBuffersOnVerify
&&
if
(
G1HRRSFlushLogBuffersOnVerify
&&
(
VerifyBeforeGC
||
VerifyAfterGC
)
(
VerifyBeforeGC
||
VerifyAfterGC
)
&&
!
_g1
->
full_collection
())
{
&&
!
_g1
->
full_collection
())
{
...
...
src/share/vm/gc_implementation/g1/g1RemSet.hpp
浏览文件 @
16bbccbf
...
@@ -27,107 +27,18 @@
...
@@ -27,107 +27,18 @@
class
G1CollectedHeap
;
class
G1CollectedHeap
;
class
CardTableModRefBarrierSet
;
class
CardTableModRefBarrierSet
;
class
HRInto_G1RemSet
;
class
ConcurrentG1Refine
;
class
ConcurrentG1Refine
;
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
// so that they can be used to update the individual region remsets.
class
G1RemSet
:
public
CHeapObj
{
class
G1RemSet
:
public
CHeapObj
{
protected:
protected:
G1CollectedHeap
*
_g1
;
G1CollectedHeap
*
_g1
;
unsigned
_conc_refine_cards
;
unsigned
_conc_refine_cards
;
size_t
n_workers
();
size_t
n_workers
();
public:
G1RemSet
(
G1CollectedHeap
*
g1
)
:
_g1
(
g1
),
_conc_refine_cards
(
0
)
{}
// Invoke "blk->do_oop" on all pointers into the CS in object in regions
// outside the CS (having invoked "blk->set_region" to set the "from"
// region correctly beforehand.) The "worker_i" param is for the
// parallel case where the number of the worker thread calling this
// function can be helpful in partitioning the work to be done. It
// should be the same as the "i" passed to the calling thread's
// work(i) function. In the sequential case this param will be ingored.
virtual
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
int
worker_i
)
=
0
;
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
// code) any threads call oops into collection set do. (This offers an
// opportunity to sequential setup and teardown of structures needed by a
// parallel iteration over the CS's RS.)
virtual
void
prepare_for_oops_into_collection_set_do
()
=
0
;
virtual
void
cleanup_after_oops_into_collection_set_do
()
=
0
;
// If "this" is of the given subtype, return "this", else "NULL".
virtual
HRInto_G1RemSet
*
as_HRInto_G1RemSet
()
{
return
NULL
;
}
// Record, if necessary, the fact that *p (where "p" is in region "from",
// and is, a fortiori, required to be non-NULL) has changed to its new value.
virtual
void
write_ref
(
HeapRegion
*
from
,
oop
*
p
)
=
0
;
virtual
void
write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
)
=
0
;
virtual
void
par_write_ref
(
HeapRegion
*
from
,
oop
*
p
,
int
tid
)
=
0
;
virtual
void
par_write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
,
int
tid
)
=
0
;
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
// 0 bit contains no part of any live object. Eliminates any remembered
// set entries that correspond to dead heap ranges.
virtual
void
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
=
0
;
// Like the above, but assumes is called in parallel: "worker_num" is the
// parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions.
virtual
void
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
)
=
0
;
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// join and leave around parts that must be atomic wrt GC. (NULL means
// being done at a safepoint.)
// With some implementations of this routine, when check_for_refs_into_cset
// is true, a true result may be returned if the given card contains oops
// that have references into the current collection set.
virtual
bool
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
)
{
return
false
;
}
// Print any relevant summary info.
virtual
void
print_summary_info
()
{}
// Prepare remebered set for verification.
virtual
void
prepare_for_verify
()
{};
};
// The simplest possible G1RemSet: iterates over all objects in non-CS
// regions, searching for pointers into the CS.
class
StupidG1RemSet
:
public
G1RemSet
{
public:
StupidG1RemSet
(
G1CollectedHeap
*
g1
)
:
G1RemSet
(
g1
)
{}
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
int
worker_i
);
void
prepare_for_oops_into_collection_set_do
()
{}
void
cleanup_after_oops_into_collection_set_do
()
{}
// Nothing is necessary in the version below.
void
write_ref
(
HeapRegion
*
from
,
oop
*
p
)
{}
void
write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
)
{}
void
par_write_ref
(
HeapRegion
*
from
,
oop
*
p
,
int
tid
)
{}
void
par_write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
,
int
tid
)
{}
void
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{}
void
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
)
{}
};
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
// so that they can be used to update the individual region remsets.
class
HRInto_G1RemSet
:
public
G1RemSet
{
protected:
protected:
enum
SomePrivateConstants
{
enum
SomePrivateConstants
{
UpdateRStoMergeSync
=
0
,
UpdateRStoMergeSync
=
0
,
...
@@ -175,27 +86,31 @@ public:
...
@@ -175,27 +86,31 @@ public:
// scanned.
// scanned.
void
cleanupHRRS
();
void
cleanupHRRS
();
HRInto_
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
);
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
);
~
HRInto_
G1RemSet
();
~
G1RemSet
();
// Invoke "blk->do_oop" on all pointers into the CS in objects in regions
// outside the CS (having invoked "blk->set_region" to set the "from"
// region correctly beforehand.) The "worker_i" param is for the
// parallel case where the number of the worker thread calling this
// function can be helpful in partitioning the work to be done. It
// should be the same as the "i" passed to the calling thread's
// work(i) function. In the sequential case this param will be ingored.
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
int
worker_i
);
int
worker_i
);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
// code) any threads call oops_into_collection_set_do. (This offers an
// opportunity to sequential setup and teardown of structures needed by a
// parallel iteration over the CS's RS.)
void
prepare_for_oops_into_collection_set_do
();
void
prepare_for_oops_into_collection_set_do
();
void
cleanup_after_oops_into_collection_set_do
();
void
cleanup_after_oops_into_collection_set_do
();
void
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
);
void
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
);
template
<
class
T
>
void
scanNewRefsRS_work
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
);
void
scanNewRefsRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
if
(
UseCompressedOops
)
{
scanNewRefsRS_work
<
narrowOop
>
(
oc
,
worker_i
);
}
else
{
scanNewRefsRS_work
<
oop
>
(
oc
,
worker_i
);
}
}
void
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
);
void
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
);
HeapRegion
*
calculateStartRegion
(
int
i
);
H
RInto_G1RemSet
*
as_HRInto_G1RemSet
()
{
return
this
;
}
H
eapRegion
*
calculateStartRegion
(
int
i
);
CardTableModRefBS
*
ct_bs
()
{
return
_ct_bs
;
}
CardTableModRefBS
*
ct_bs
()
{
return
_ct_bs
;
}
size_t
cardsScanned
()
{
return
_total_cards_scanned
;
}
size_t
cardsScanned
()
{
return
_total_cards_scanned
;
}
...
@@ -219,17 +134,31 @@ public:
...
@@ -219,17 +134,31 @@ public:
bool
self_forwarded
(
oop
obj
);
bool
self_forwarded
(
oop
obj
);
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
// 0 bit contains no part of any live object. Eliminates any remembered
// set entries that correspond to dead heap ranges.
void
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
);
void
scrub
(
BitMap
*
region_bm
,
BitMap
*
card_bm
);
// Like the above, but assumes is called in parallel: "worker_num" is the
// parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions.
void
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
void
scrub_par
(
BitMap
*
region_bm
,
BitMap
*
card_bm
,
int
worker_num
,
int
claim_val
);
int
worker_num
,
int
claim_val
);
// If check_for_refs_into_cset is true then a true result is returned
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// if the card contains oops that have references into the current
// join and leave around parts that must be atomic wrt GC. (NULL means
// collection set.
// being done at a safepoint.)
// If check_for_refs_into_cset is true, a true result is returned
// if the given card contains oops that have references into the
// current collection set.
virtual
bool
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
virtual
bool
concurrentRefineOneCard
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
);
bool
check_for_refs_into_cset
);
// Print any relevant summary info.
virtual
void
print_summary_info
();
virtual
void
print_summary_info
();
// Prepare remembered set for verification.
virtual
void
prepare_for_verify
();
virtual
void
prepare_for_verify
();
};
};
...
@@ -250,13 +179,13 @@ public:
...
@@ -250,13 +179,13 @@ public:
class
UpdateRSOopClosure
:
public
OopClosure
{
class
UpdateRSOopClosure
:
public
OopClosure
{
HeapRegion
*
_from
;
HeapRegion
*
_from
;
HRInto_
G1RemSet
*
_rs
;
G1RemSet
*
_rs
;
int
_worker_i
;
int
_worker_i
;
template
<
class
T
>
void
do_oop_work
(
T
*
p
);
template
<
class
T
>
void
do_oop_work
(
T
*
p
);
public:
public:
UpdateRSOopClosure
(
HRInto_
G1RemSet
*
rs
,
int
worker_i
=
0
)
:
UpdateRSOopClosure
(
G1RemSet
*
rs
,
int
worker_i
=
0
)
:
_from
(
NULL
),
_rs
(
rs
),
_worker_i
(
worker_i
)
{
_from
(
NULL
),
_rs
(
rs
),
_worker_i
(
worker_i
)
{
guarantee
(
_rs
!=
NULL
,
"Requires an HRIntoG1RemSet"
);
guarantee
(
_rs
!=
NULL
,
"Requires an HRIntoG1RemSet"
);
}
}
...
...
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
浏览文件 @
16bbccbf
...
@@ -30,16 +30,18 @@ inline size_t G1RemSet::n_workers() {
...
@@ -30,16 +30,18 @@ inline size_t G1RemSet::n_workers() {
}
}
}
}
template
<
class
T
>
inline
void
HRInto_G1RemSet
::
write_ref_nv
(
HeapRegion
*
from
,
T
*
p
)
{
template
<
class
T
>
inline
void
G1RemSet
::
write_ref_nv
(
HeapRegion
*
from
,
T
*
p
)
{
par_write_ref_nv
(
from
,
p
,
0
);
par_write_ref_nv
(
from
,
p
,
0
);
}
}
inline
bool
HRInto_
G1RemSet
::
self_forwarded
(
oop
obj
)
{
inline
bool
G1RemSet
::
self_forwarded
(
oop
obj
)
{
bool
result
=
(
obj
->
is_forwarded
()
&&
(
obj
->
forwardee
()
==
obj
));
bool
result
=
(
obj
->
is_forwarded
()
&&
(
obj
->
forwardee
()
==
obj
));
return
result
;
return
result
;
}
}
template
<
class
T
>
inline
void
HRInto_G1RemSet
::
par_write_ref_nv
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
template
<
class
T
>
inline
void
G1RemSet
::
par_write_ref_nv
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
#ifdef ASSERT
#ifdef ASSERT
// can't do because of races
// can't do because of races
...
@@ -77,7 +79,7 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
...
@@ -77,7 +79,7 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
// Deferred updates to the CSet are either discarded (in the normal case),
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// of the collection.
// See
HRInto_
G1RemSet::cleanup_after_oops_into_collection_set_do().
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
}
else
{
}
else
{
#if G1_REM_SET_LOGGING
#if G1_REM_SET_LOGGING
gclog_or_tty
->
print_cr
(
"Adding "
PTR_FORMAT
" ("
PTR_FORMAT
") to RS"
gclog_or_tty
->
print_cr
(
"Adding "
PTR_FORMAT
" ("
PTR_FORMAT
") to RS"
...
@@ -91,12 +93,14 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
...
@@ -91,12 +93,14 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
}
}
}
}
template
<
class
T
>
inline
void
UpdateRSOopClosure
::
do_oop_work
(
T
*
p
)
{
template
<
class
T
>
inline
void
UpdateRSOopClosure
::
do_oop_work
(
T
*
p
)
{
assert
(
_from
!=
NULL
,
"from region must be non-NULL"
);
assert
(
_from
!=
NULL
,
"from region must be non-NULL"
);
_rs
->
par_write_ref
(
_from
,
p
,
_worker_i
);
_rs
->
par_write_ref
(
_from
,
p
,
_worker_i
);
}
}
template
<
class
T
>
inline
void
UpdateRSetImmediate
::
do_oop_work
(
T
*
p
)
{
template
<
class
T
>
inline
void
UpdateRSetImmediate
::
do_oop_work
(
T
*
p
)
{
assert
(
_from
->
is_in_reserved
(
p
),
"paranoia"
);
assert
(
_from
->
is_in_reserved
(
p
),
"paranoia"
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
)
&&
!
_from
->
is_survivor
())
{
if
(
!
oopDesc
::
is_null
(
heap_oop
)
&&
!
_from
->
is_survivor
())
{
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
16bbccbf
...
@@ -40,9 +40,6 @@
...
@@ -40,9 +40,6 @@
develop(intx, G1PolicyVerbose, 0, \
develop(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \
"The verbosity level on G1 policy decisions") \
\
\
develop(bool, G1UseHRIntoRS, true, \
"Determines whether the 'advanced' HR Into rem set is used.") \
\
develop(intx, G1MarkingVerboseLevel, 0, \
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
"Level (0-4) of verboseness of the marking code") \
\
\
...
...
src/share/vm/gc_implementation/includeDB_gc_g1
浏览文件 @
16bbccbf
...
@@ -310,10 +310,16 @@ heapRegionSeq.hpp heapRegion.hpp
...
@@ -310,10 +310,16 @@ heapRegionSeq.hpp heapRegion.hpp
heapRegionSeq.inline.hpp heapRegionSeq.hpp
heapRegionSeq.inline.hpp heapRegionSeq.hpp
instanceKlass.cpp g1RemSet.inline.hpp
instanceRefKlass.cpp g1RemSet.inline.hpp
klass.hpp g1OopClosures.hpp
klass.hpp g1OopClosures.hpp
memoryService.cpp g1MemoryPool.hpp
memoryService.cpp g1MemoryPool.hpp
objArrayKlass.cpp g1RemSet.inline.hpp
ptrQueue.cpp allocation.hpp
ptrQueue.cpp allocation.hpp
ptrQueue.cpp allocation.inline.hpp
ptrQueue.cpp allocation.inline.hpp
ptrQueue.cpp mutex.hpp
ptrQueue.cpp mutex.hpp
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录