Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
c09be8a2
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c09be8a2
编写于
8月 22, 2013
作者:
J
jmasa
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
1a1253b4
750c729a
变更
26
展开全部
隐藏空白更改
内联
并排
Showing
26 changed file
with
1329 addition
and
511 deletion
+1329
-511
src/share/vm/c1/c1_Runtime1.cpp
src/share/vm/c1/c1_Runtime1.cpp
+15
-10
src/share/vm/code/nmethod.cpp
src/share/vm/code/nmethod.cpp
+22
-14
src/share/vm/code/nmethod.hpp
src/share/vm/code/nmethod.hpp
+1
-1
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+32
-30
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+17
-6
src/share/vm/gc_implementation/g1/concurrentMark.hpp
src/share/vm/gc_implementation/g1/concurrentMark.hpp
+3
-0
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+431
-179
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+104
-75
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
+20
-0
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
+23
-0
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+41
-10
src/share/vm/gc_implementation/g1/g1RemSet.hpp
src/share/vm/gc_implementation/g1/g1RemSet.hpp
+21
-9
src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
+56
-22
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+4
-1
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+301
-145
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+22
-1
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+123
-1
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
+39
-1
src/share/vm/gc_interface/collectedHeap.cpp
src/share/vm/gc_interface/collectedHeap.cpp
+8
-0
src/share/vm/gc_interface/collectedHeap.hpp
src/share/vm/gc_interface/collectedHeap.hpp
+6
-0
src/share/vm/memory/iterator.cpp
src/share/vm/memory/iterator.cpp
+1
-1
src/share/vm/oops/klass.hpp
src/share/vm/oops/klass.hpp
+2
-1
src/share/vm/runtime/sweeper.hpp
src/share/vm/runtime/sweeper.hpp
+1
-0
src/share/vm/services/memoryPool.cpp
src/share/vm/services/memoryPool.cpp
+2
-2
src/share/vm/utilities/growableArray.hpp
src/share/vm/utilities/growableArray.hpp
+1
-0
src/share/vm/utilities/taskqueue.hpp
src/share/vm/utilities/taskqueue.hpp
+33
-2
未找到文件。
src/share/vm/c1/c1_Runtime1.cpp
浏览文件 @
c09be8a2
...
...
@@ -915,16 +915,6 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
// Return to the now deoptimized frame.
}
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
if
(
ScavengeRootsInCode
&&
mirror
.
not_null
()
&&
mirror
()
->
is_scavengable
())
{
MutexLockerEx
ml_code
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nmethod
*
nm
=
CodeCache
::
find_nmethod
(
caller_frame
.
pc
());
guarantee
(
nm
!=
NULL
,
"only nmethods can contain non-perm oops"
);
if
(
!
nm
->
on_scavenge_root_list
())
CodeCache
::
add_scavenge_root_nmethod
(
nm
);
}
// Now copy code back
{
...
...
@@ -1125,6 +1115,21 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
}
}
}
// If we are patching in a non-perm oop, make sure the nmethod
// is on the right list.
if
(
ScavengeRootsInCode
&&
mirror
.
not_null
()
&&
mirror
()
->
is_scavengable
())
{
MutexLockerEx
ml_code
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
nmethod
*
nm
=
CodeCache
::
find_nmethod
(
caller_frame
.
pc
());
guarantee
(
nm
!=
NULL
,
"only nmethods can contain non-perm oops"
);
if
(
!
nm
->
on_scavenge_root_list
())
{
CodeCache
::
add_scavenge_root_nmethod
(
nm
);
}
// Since we've patched some oops in the nmethod,
// (re)register it with the heap.
Universe
::
heap
()
->
register_nmethod
(
nm
);
}
JRT_END
//
...
...
src/share/vm/code/nmethod.cpp
浏览文件 @
c09be8a2
...
...
@@ -687,6 +687,7 @@ nmethod::nmethod(
code_buffer
->
copy_values_to
(
this
);
if
(
ScavengeRootsInCode
&&
detect_scavenge_root_oops
())
{
CodeCache
::
add_scavenge_root_nmethod
(
this
);
Universe
::
heap
()
->
register_nmethod
(
this
);
}
debug_only
(
verify_scavenge_root_oops
());
CodeCache
::
commit
(
this
);
...
...
@@ -881,6 +882,7 @@ nmethod::nmethod(
dependencies
->
copy_to
(
this
);
if
(
ScavengeRootsInCode
&&
detect_scavenge_root_oops
())
{
CodeCache
::
add_scavenge_root_nmethod
(
this
);
Universe
::
heap
()
->
register_nmethod
(
this
);
}
debug_only
(
verify_scavenge_root_oops
());
...
...
@@ -1300,6 +1302,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
methodHandle
the_method
(
method
());
No_Safepoint_Verifier
nsv
;
// during patching, depending on the nmethod state we must notify the GC that
// code has been unloaded, unregistering it. We cannot do this right while
// holding the Patching_lock because we need to use the CodeCache_lock. This
// would be prone to deadlocks.
// This flag is used to remember whether we need to later lock and unregister.
bool
nmethod_needs_unregister
=
false
;
{
// invalidate osr nmethod before acquiring the patching lock since
// they both acquire leaf locks and we don't want a deadlock.
...
...
@@ -1332,6 +1341,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
inc_decompile_count
();
}
// If the state is becoming a zombie, signal to unregister the nmethod with
// the heap.
// This nmethod may have already been unloaded during a full GC.
if
((
state
==
zombie
)
&&
!
is_unloaded
())
{
nmethod_needs_unregister
=
true
;
}
// Change state
_state
=
state
;
...
...
@@ -1367,6 +1383,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// safepoint can sneak in, otherwise the oops used by the
// dependency logic could have become stale.
MutexLockerEx
mu
(
CodeCache_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
nmethod_needs_unregister
)
{
Universe
::
heap
()
->
unregister_nmethod
(
this
);
}
flush_dependencies
(
NULL
);
}
...
...
@@ -1817,21 +1836,10 @@ void nmethod::metadata_do(void f(Metadata*)) {
if
(
_method
!=
NULL
)
f
(
_method
);
}
// This method is called twice during GC -- once while
// tracing the "active" nmethods on thread stacks during
// the (strong) marking phase, and then again when walking
// the code cache contents during the weak roots processing
// phase. The two uses are distinguished by means of the
// 'do_strong_roots_only' flag, which is true in the first
// case. We want to walk the weak roots in the nmethod
// only in the second case. The weak roots in the nmethod
// are the oops in the ExceptionCache and the InlineCache
// oops.
void
nmethod
::
oops_do
(
OopClosure
*
f
,
bool
do_strong_roots_only
)
{
void
nmethod
::
oops_do
(
OopClosure
*
f
,
bool
allow_zombie
)
{
// make sure the oops ready to receive visitors
assert
(
!
is_zombie
()
&&
!
is_unloaded
(),
"should not call follow on zombie or
unloaded nmethod"
);
assert
(
allow_zombie
||
!
is_zombie
(),
"should not call follow on zombie nmethod"
);
assert
(
!
is_unloaded
(),
"should not call follow on
unloaded nmethod"
);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
...
...
src/share/vm/code/nmethod.hpp
浏览文件 @
c09be8a2
...
...
@@ -566,7 +566,7 @@ public:
void
preserve_callee_argument_oops
(
frame
fr
,
const
RegisterMap
*
reg_map
,
OopClosure
*
f
);
void
oops_do
(
OopClosure
*
f
)
{
oops_do
(
f
,
false
);
}
void
oops_do
(
OopClosure
*
f
,
bool
do_strong_roots_only
);
void
oops_do
(
OopClosure
*
f
,
bool
allow_zombie
);
bool
detect_scavenge_root_oops
();
void
verify_scavenge_root_oops
()
PRODUCT_RETURN
;
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
c09be8a2
...
...
@@ -5478,40 +5478,42 @@ CMSParMarkTask::do_young_space_rescan(uint worker_id,
HandleMark
hm
;
SequentialSubTasksDone
*
pst
=
space
->
par_seq_tasks
();
assert
(
pst
->
valid
(),
"Uninitialized use?"
);
uint
nth_task
=
0
;
uint
n_tasks
=
pst
->
n_tasks
();
HeapWord
*
start
,
*
end
;
while
(
!
pst
->
is_task_claimed
(
/* reference */
nth_task
))
{
// We claimed task # nth_task; compute its boundaries.
if
(
chunk_top
==
0
)
{
// no samples were taken
assert
(
nth_task
==
0
&&
n_tasks
==
1
,
"Can have only 1 EdenSpace task"
);
start
=
space
->
bottom
();
end
=
space
->
top
();
}
else
if
(
nth_task
==
0
)
{
start
=
space
->
bottom
();
end
=
chunk_array
[
nth_task
];
}
else
if
(
nth_task
<
(
uint
)
chunk_top
)
{
assert
(
nth_task
>=
1
,
"Control point invariant"
);
start
=
chunk_array
[
nth_task
-
1
];
end
=
chunk_array
[
nth_task
];
}
else
{
assert
(
nth_task
==
(
uint
)
chunk_top
,
"Control point invariant"
);
start
=
chunk_array
[
chunk_top
-
1
];
end
=
space
->
top
();
}
MemRegion
mr
(
start
,
end
);
// Verify that mr is in space
assert
(
mr
.
is_empty
()
||
space
->
used_region
().
contains
(
mr
),
"Should be in space"
);
// Verify that "start" is an object boundary
assert
(
mr
.
is_empty
()
||
oop
(
mr
.
start
())
->
is_oop
(),
"Should be an oop"
);
space
->
par_oop_iterate
(
mr
,
cl
);
if
(
n_tasks
>
0
)
{
assert
(
pst
->
valid
(),
"Uninitialized use?"
);
HeapWord
*
start
,
*
end
;
while
(
!
pst
->
is_task_claimed
(
/* reference */
nth_task
))
{
// We claimed task # nth_task; compute its boundaries.
if
(
chunk_top
==
0
)
{
// no samples were taken
assert
(
nth_task
==
0
&&
n_tasks
==
1
,
"Can have only 1 EdenSpace task"
);
start
=
space
->
bottom
();
end
=
space
->
top
();
}
else
if
(
nth_task
==
0
)
{
start
=
space
->
bottom
();
end
=
chunk_array
[
nth_task
];
}
else
if
(
nth_task
<
(
uint
)
chunk_top
)
{
assert
(
nth_task
>=
1
,
"Control point invariant"
);
start
=
chunk_array
[
nth_task
-
1
];
end
=
chunk_array
[
nth_task
];
}
else
{
assert
(
nth_task
==
(
uint
)
chunk_top
,
"Control point invariant"
);
start
=
chunk_array
[
chunk_top
-
1
];
end
=
space
->
top
();
}
MemRegion
mr
(
start
,
end
);
// Verify that mr is in space
assert
(
mr
.
is_empty
()
||
space
->
used_region
().
contains
(
mr
),
"Should be in space"
);
// Verify that "start" is an object boundary
assert
(
mr
.
is_empty
()
||
oop
(
mr
.
start
())
->
is_oop
(),
"Should be an oop"
);
space
->
par_oop_iterate
(
mr
,
cl
);
}
pst
->
all_tasks_completed
();
}
pst
->
all_tasks_completed
();
}
void
...
...
@@ -5788,7 +5790,7 @@ initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
DefNewGeneration
*
dng
=
(
DefNewGeneration
*
)
_young_gen
;
// Eden space
{
if
(
!
dng
->
eden
()
->
is_empty
())
{
SequentialSubTasksDone
*
pst
=
dng
->
eden
()
->
par_seq_tasks
();
assert
(
!
pst
->
valid
(),
"Clobbering existing data?"
);
// Each valid entry in [0, _eden_chunk_index) represents a task.
...
...
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
c09be8a2
...
...
@@ -4529,7 +4529,7 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
_total_prev_live_bytes
(
0
),
_total_next_live_bytes
(
0
),
_hum_used_bytes
(
0
),
_hum_capacity_bytes
(
0
),
_hum_prev_live_bytes
(
0
),
_hum_next_live_bytes
(
0
),
_total_remset_bytes
(
0
)
{
_total_remset_bytes
(
0
)
,
_total_strong_code_roots_bytes
(
0
)
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
MemRegion
g1_committed
=
g1h
->
g1_committed
();
MemRegion
g1_reserved
=
g1h
->
g1_reserved
();
...
...
@@ -4553,9 +4553,11 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_DOUBLE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
,
"type"
,
"address-range"
,
"used"
,
"prev-live"
,
"next-live"
,
"gc-eff"
,
"remset"
);
"used"
,
"prev-live"
,
"next-live"
,
"gc-eff"
,
"remset"
,
"code-roots"
);
_out
->
print_cr
(
G1PPRL_LINE_PREFIX
G1PPRL_TYPE_H_FORMAT
G1PPRL_ADDR_BASE_H_FORMAT
...
...
@@ -4563,9 +4565,11 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_DOUBLE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
,
""
,
""
,
"(bytes)"
,
"(bytes)"
,
"(bytes)"
,
"(bytes/ms)"
,
"(bytes)"
);
"(bytes)"
,
"(bytes)"
,
"(bytes)"
,
"(bytes/ms)"
,
"(bytes)"
,
"(bytes)"
);
}
// It takes as a parameter a reference to one of the _hum_* fields, it
...
...
@@ -4608,6 +4612,8 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
size_t
next_live_bytes
=
r
->
next_live_bytes
();
double
gc_eff
=
r
->
gc_efficiency
();
size_t
remset_bytes
=
r
->
rem_set
()
->
mem_size
();
size_t
strong_code_roots_bytes
=
r
->
rem_set
()
->
strong_code_roots_mem_size
();
if
(
r
->
used
()
==
0
)
{
type
=
"FREE"
;
}
else
if
(
r
->
is_survivor
())
{
...
...
@@ -4642,6 +4648,7 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
_total_prev_live_bytes
+=
prev_live_bytes
;
_total_next_live_bytes
+=
next_live_bytes
;
_total_remset_bytes
+=
remset_bytes
;
_total_strong_code_roots_bytes
+=
strong_code_roots_bytes
;
// Print a line for this particular region.
_out
->
print_cr
(
G1PPRL_LINE_PREFIX
...
...
@@ -4651,9 +4658,11 @@ bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) {
G1PPRL_BYTE_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_DOUBLE_FORMAT
G1PPRL_BYTE_FORMAT
G1PPRL_BYTE_FORMAT
,
type
,
bottom
,
end
,
used_bytes
,
prev_live_bytes
,
next_live_bytes
,
gc_eff
,
remset_bytes
);
used_bytes
,
prev_live_bytes
,
next_live_bytes
,
gc_eff
,
remset_bytes
,
strong_code_roots_bytes
);
return
false
;
}
...
...
@@ -4669,7 +4678,8 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
G1PPRL_SUM_MB_PERC_FORMAT
(
"used"
)
G1PPRL_SUM_MB_PERC_FORMAT
(
"prev-live"
)
G1PPRL_SUM_MB_PERC_FORMAT
(
"next-live"
)
G1PPRL_SUM_MB_FORMAT
(
"remset"
),
G1PPRL_SUM_MB_FORMAT
(
"remset"
)
G1PPRL_SUM_MB_FORMAT
(
"code-roots"
),
bytes_to_mb
(
_total_capacity_bytes
),
bytes_to_mb
(
_total_used_bytes
),
perc
(
_total_used_bytes
,
_total_capacity_bytes
),
...
...
@@ -4677,6 +4687,7 @@ G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
perc
(
_total_prev_live_bytes
,
_total_capacity_bytes
),
bytes_to_mb
(
_total_next_live_bytes
),
perc
(
_total_next_live_bytes
,
_total_capacity_bytes
),
bytes_to_mb
(
_total_remset_bytes
));
bytes_to_mb
(
_total_remset_bytes
),
bytes_to_mb
(
_total_strong_code_roots_bytes
));
_out
->
cr
();
}
src/share/vm/gc_implementation/g1/concurrentMark.hpp
浏览文件 @
c09be8a2
...
...
@@ -1257,6 +1257,9 @@ private:
// Accumulator for the remembered set size
size_t
_total_remset_bytes
;
// Accumulator for strong code roots memory size
size_t
_total_strong_code_roots_bytes
;
static
double
perc
(
size_t
val
,
size_t
total
)
{
if
(
total
==
0
)
{
return
0.0
;
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
c09be8a2
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
c09be8a2
...
...
@@ -46,6 +46,7 @@
// may combine concurrent marking with parallel, incremental compaction of
// heap subsets that will yield large amounts of garbage.
// Forward declarations
class
HeapRegion
;
class
HRRSCleanupTask
;
class
GenerationSpec
;
...
...
@@ -69,6 +70,7 @@ class STWGCTimer;
class
G1NewTracer
;
class
G1OldTracer
;
class
EvacuationFailedInfo
;
class
nmethod
;
typedef
OverflowTaskQueue
<
StarTask
,
mtGC
>
RefToScanQueue
;
typedef
GenericTaskQueueSet
<
RefToScanQueue
,
mtGC
>
RefToScanQueueSet
;
...
...
@@ -163,18 +165,6 @@ public:
:
G1AllocRegion
(
"Mutator Alloc Region"
,
false
/* bot_updates */
)
{
}
};
// The G1 STW is alive closure.
// An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW
// reference processor. It is also extensively used during
// reference processing during STW evacuation pauses.
class
G1STWIsAliveClosure
:
public
BoolObjectClosure
{
G1CollectedHeap
*
_g1
;
public:
G1STWIsAliveClosure
(
G1CollectedHeap
*
g1
)
:
_g1
(
g1
)
{}
bool
do_object_b
(
oop
p
);
};
class
SurvivorGCAllocRegion
:
public
G1AllocRegion
{
protected:
virtual
HeapRegion
*
allocate_new_region
(
size_t
word_size
,
bool
force
);
...
...
@@ -193,6 +183,18 @@ public:
:
G1AllocRegion
(
"Old GC Alloc Region"
,
true
/* bot_updates */
)
{
}
};
// The G1 STW is alive closure.
// An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW
// reference processor. It is also extensively used during
// reference processing during STW evacuation pauses.
class
G1STWIsAliveClosure
:
public
BoolObjectClosure
{
G1CollectedHeap
*
_g1
;
public:
G1STWIsAliveClosure
(
G1CollectedHeap
*
g1
)
:
_g1
(
g1
)
{}
bool
do_object_b
(
oop
p
);
};
class
RefineCardTableEntryClosure
;
class
G1CollectedHeap
:
public
SharedHeap
{
...
...
@@ -1549,42 +1551,6 @@ public:
virtual
jlong
millis_since_last_gc
();
// Perform any cleanup actions necessary before allowing a verification.
virtual
void
prepare_for_verify
();
// Perform verification.
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use
// vo == UsePrevMarking.
// Currently, there is only one case where this is called with
// vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void
verify
(
bool
silent
,
VerifyOption
vo
);
// Override; it uses the "prev" marking information
virtual
void
verify
(
bool
silent
);
virtual
void
print_on
(
outputStream
*
st
)
const
;
virtual
void
print_extended_on
(
outputStream
*
st
)
const
;
virtual
void
print_on_error
(
outputStream
*
st
)
const
;
virtual
void
print_gc_threads_on
(
outputStream
*
st
)
const
;
virtual
void
gc_threads_do
(
ThreadClosure
*
tc
)
const
;
// Override
void
print_tracing_info
()
const
;
// The following two methods are helpful for debugging RSet issues.
void
print_cset_rsets
()
PRODUCT_RETURN
;
void
print_all_rsets
()
PRODUCT_RETURN
;
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
...
...
@@ -1661,13 +1627,86 @@ public:
else
return
is_obj_ill
(
obj
,
hr
);
}
bool
allocated_since_marking
(
oop
obj
,
HeapRegion
*
hr
,
VerifyOption
vo
);
HeapWord
*
top_at_mark_start
(
HeapRegion
*
hr
,
VerifyOption
vo
);
bool
is_marked
(
oop
obj
,
VerifyOption
vo
);
const
char
*
top_at_mark_start_str
(
VerifyOption
vo
);
ConcurrentMark
*
concurrent_mark
()
const
{
return
_cm
;
}
// Refinement
ConcurrentG1Refine
*
concurrent_g1_refine
()
const
{
return
_cg1r
;
}
// The dirty cards region list is used to record a subset of regions
// whose cards need clearing. The list if populated during the
// remembered set scanning and drained during the card table
// cleanup. Although the methods are reentrant, population/draining
// phases must not overlap. For synchronization purposes the last
// element on the list points to itself.
HeapRegion
*
_dirty_cards_region_list
;
void
push_dirty_cards_region
(
HeapRegion
*
hr
);
HeapRegion
*
pop_dirty_cards_region
();
// Optimized nmethod scanning support routines
// Register the given nmethod with the G1 heap
virtual
void
register_nmethod
(
nmethod
*
nm
);
// Unregister the given nmethod from the G1 heap
virtual
void
unregister_nmethod
(
nmethod
*
nm
);
// Migrate the nmethods in the code root lists of the regions
// in the collection set to regions in to-space. In the event
// of an evacuation failure, nmethods that reference objects
// that were not successfullly evacuated are not migrated.
void
migrate_strong_code_roots
();
// During an initial mark pause, mark all the code roots that
// point into regions *not* in the collection set.
void
mark_strong_code_roots
(
uint
worker_id
);
// Rebuild the stong code root lists for each region
// after a full GC
void
rebuild_strong_code_roots
();
// Verification
// The following is just to alert the verification code
// that a full collection has occurred and that the
// remembered sets are no longer up to date.
bool
_full_collection
;
void
set_full_collection
()
{
_full_collection
=
true
;}
void
clear_full_collection
()
{
_full_collection
=
false
;}
bool
full_collection
()
{
return
_full_collection
;}
// Perform any cleanup actions necessary before allowing a verification.
virtual
void
prepare_for_verify
();
// Perform verification.
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use
// vo == UsePrevMarking.
// Currently, there is only one case where this is called with
// vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void
verify
(
bool
silent
,
VerifyOption
vo
);
// Override; it uses the "prev" marking information
virtual
void
verify
(
bool
silent
);
// The methods below are here for convenience and dispatch the
// appropriate method depending on value of the given VerifyOption
// parameter. The options for that parameter are:
//
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information,
// vo == UseMarkWord -> use mark word from object header
// parameter. The values for that parameter, and their meanings,
// are the same as those above.
bool
is_obj_dead_cond
(
const
oop
obj
,
const
HeapRegion
*
hr
,
...
...
@@ -1692,31 +1731,21 @@ public:
return
false
;
// keep some compilers happy
}
bool
allocated_since_marking
(
oop
obj
,
HeapRegion
*
hr
,
VerifyOption
vo
);
HeapWord
*
top_at_mark_start
(
HeapRegion
*
hr
,
VerifyOption
vo
);
bool
is_marked
(
oop
obj
,
VerifyOption
vo
);
const
char
*
top_at_mark_start_str
(
VerifyOption
vo
);
// Printing
// The following is just to alert the verification code
// that a full collection has occurred and that the
// remembered sets are no longer up to date.
bool
_full_collection
;
void
set_full_collection
()
{
_full_collection
=
true
;}
void
clear_full_collection
()
{
_full_collection
=
false
;}
bool
full_collection
()
{
return
_full_collection
;}
virtual
void
print_on
(
outputStream
*
st
)
const
;
virtual
void
print_extended_on
(
outputStream
*
st
)
const
;
virtual
void
print_on_error
(
outputStream
*
st
)
const
;
ConcurrentMark
*
concurrent_mark
()
const
{
return
_cm
;
}
ConcurrentG1Refine
*
concurrent_g1_refine
()
const
{
return
_cg1r
;
}
virtual
void
print_gc_threads_on
(
outputStream
*
st
)
const
;
virtual
void
gc_threads_do
(
ThreadClosure
*
tc
)
const
;
// The dirty cards region list is used to record a subset of regions
// whose cards need clearing. The list if populated during the
// remembered set scanning and drained during the card table
// cleanup. Although the methods are reentrant, population/draining
// phases must not overlap. For synchronization purposes the last
// element on the list points to itself.
HeapRegion
*
_dirty_cards_region_list
;
void
push_dirty_cards_region
(
HeapRegion
*
hr
);
HeapRegion
*
pop_dirty_cards_region
();
// Override
void
print_tracing_info
()
const
;
// The following two methods are helpful for debugging RSet issues.
void
print_cset_rsets
()
PRODUCT_RETURN
;
void
print_all_rsets
()
PRODUCT_RETURN
;
public:
void
stop_conc_gc_threads
();
...
...
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
浏览文件 @
c09be8a2
...
...
@@ -161,6 +161,8 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_last_update_rs_times_ms
(
_max_gc_threads
,
"%.1lf"
),
_last_update_rs_processed_buffers
(
_max_gc_threads
,
"%d"
),
_last_scan_rs_times_ms
(
_max_gc_threads
,
"%.1lf"
),
_last_strong_code_root_scan_times_ms
(
_max_gc_threads
,
"%.1lf"
),
_last_strong_code_root_mark_times_ms
(
_max_gc_threads
,
"%.1lf"
),
_last_obj_copy_times_ms
(
_max_gc_threads
,
"%.1lf"
),
_last_termination_times_ms
(
_max_gc_threads
,
"%.1lf"
),
_last_termination_attempts
(
_max_gc_threads
,
SIZE_FORMAT
),
...
...
@@ -182,6 +184,8 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
_last_update_rs_times_ms
.
reset
();
_last_update_rs_processed_buffers
.
reset
();
_last_scan_rs_times_ms
.
reset
();
_last_strong_code_root_scan_times_ms
.
reset
();
_last_strong_code_root_mark_times_ms
.
reset
();
_last_obj_copy_times_ms
.
reset
();
_last_termination_times_ms
.
reset
();
_last_termination_attempts
.
reset
();
...
...
@@ -197,6 +201,8 @@ void G1GCPhaseTimes::note_gc_end() {
_last_update_rs_times_ms
.
verify
();
_last_update_rs_processed_buffers
.
verify
();
_last_scan_rs_times_ms
.
verify
();
_last_strong_code_root_scan_times_ms
.
verify
();
_last_strong_code_root_mark_times_ms
.
verify
();
_last_obj_copy_times_ms
.
verify
();
_last_termination_times_ms
.
verify
();
_last_termination_attempts
.
verify
();
...
...
@@ -210,6 +216,8 @@ void G1GCPhaseTimes::note_gc_end() {
_last_satb_filtering_times_ms
.
get
(
i
)
+
_last_update_rs_times_ms
.
get
(
i
)
+
_last_scan_rs_times_ms
.
get
(
i
)
+
_last_strong_code_root_scan_times_ms
.
get
(
i
)
+
_last_strong_code_root_mark_times_ms
.
get
(
i
)
+
_last_obj_copy_times_ms
.
get
(
i
)
+
_last_termination_times_ms
.
get
(
i
);
...
...
@@ -239,6 +247,9 @@ double G1GCPhaseTimes::accounted_time_ms() {
// Now subtract the time taken to fix up roots in generated code
misc_time_ms
+=
_cur_collection_code_root_fixup_time_ms
;
// Strong code root migration time
misc_time_ms
+=
_cur_strong_code_root_migration_time_ms
;
// Subtract the time taken to clean the card table from the
// current value of "other time"
misc_time_ms
+=
_cur_clear_ct_time_ms
;
...
...
@@ -257,9 +268,13 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
if
(
_last_satb_filtering_times_ms
.
sum
()
>
0.0
)
{
_last_satb_filtering_times_ms
.
print
(
2
,
"SATB Filtering (ms)"
);
}
if
(
_last_strong_code_root_mark_times_ms
.
sum
()
>
0.0
)
{
_last_strong_code_root_mark_times_ms
.
print
(
2
,
"Code Root Marking (ms)"
);
}
_last_update_rs_times_ms
.
print
(
2
,
"Update RS (ms)"
);
_last_update_rs_processed_buffers
.
print
(
3
,
"Processed Buffers"
);
_last_scan_rs_times_ms
.
print
(
2
,
"Scan RS (ms)"
);
_last_strong_code_root_scan_times_ms
.
print
(
2
,
"Code Root Scanning (ms)"
);
_last_obj_copy_times_ms
.
print
(
2
,
"Object Copy (ms)"
);
_last_termination_times_ms
.
print
(
2
,
"Termination (ms)"
);
if
(
G1Log
::
finest
())
{
...
...
@@ -273,12 +288,17 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
if
(
_last_satb_filtering_times_ms
.
sum
()
>
0.0
)
{
_last_satb_filtering_times_ms
.
print
(
1
,
"SATB Filtering (ms)"
);
}
if
(
_last_strong_code_root_mark_times_ms
.
sum
()
>
0.0
)
{
_last_strong_code_root_mark_times_ms
.
print
(
1
,
"Code Root Marking (ms)"
);
}
_last_update_rs_times_ms
.
print
(
1
,
"Update RS (ms)"
);
_last_update_rs_processed_buffers
.
print
(
2
,
"Processed Buffers"
);
_last_scan_rs_times_ms
.
print
(
1
,
"Scan RS (ms)"
);
_last_strong_code_root_scan_times_ms
.
print
(
1
,
"Code Root Scanning (ms)"
);
_last_obj_copy_times_ms
.
print
(
1
,
"Object Copy (ms)"
);
}
print_stats
(
1
,
"Code Root Fixup"
,
_cur_collection_code_root_fixup_time_ms
);
print_stats
(
1
,
"Code Root Migration"
,
_cur_strong_code_root_migration_time_ms
);
print_stats
(
1
,
"Clear CT"
,
_cur_clear_ct_time_ms
);
double
misc_time_ms
=
pause_time_sec
*
MILLIUNITS
-
accounted_time_ms
();
print_stats
(
1
,
"Other"
,
misc_time_ms
);
...
...
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
浏览文件 @
c09be8a2
...
...
@@ -119,6 +119,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
WorkerDataArray
<
double
>
_last_update_rs_times_ms
;
WorkerDataArray
<
int
>
_last_update_rs_processed_buffers
;
WorkerDataArray
<
double
>
_last_scan_rs_times_ms
;
WorkerDataArray
<
double
>
_last_strong_code_root_scan_times_ms
;
WorkerDataArray
<
double
>
_last_strong_code_root_mark_times_ms
;
WorkerDataArray
<
double
>
_last_obj_copy_times_ms
;
WorkerDataArray
<
double
>
_last_termination_times_ms
;
WorkerDataArray
<
size_t
>
_last_termination_attempts
;
...
...
@@ -128,6 +130,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double
_cur_collection_par_time_ms
;
double
_cur_collection_code_root_fixup_time_ms
;
double
_cur_strong_code_root_migration_time_ms
;
double
_cur_clear_ct_time_ms
;
double
_cur_ref_proc_time_ms
;
...
...
@@ -179,6 +182,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_last_scan_rs_times_ms
.
set
(
worker_i
,
ms
);
}
void
record_strong_code_root_scan_time
(
uint
worker_i
,
double
ms
)
{
_last_strong_code_root_scan_times_ms
.
set
(
worker_i
,
ms
);
}
void
record_strong_code_root_mark_time
(
uint
worker_i
,
double
ms
)
{
_last_strong_code_root_mark_times_ms
.
set
(
worker_i
,
ms
);
}
void
record_obj_copy_time
(
uint
worker_i
,
double
ms
)
{
_last_obj_copy_times_ms
.
set
(
worker_i
,
ms
);
}
...
...
@@ -208,6 +219,10 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_collection_code_root_fixup_time_ms
=
ms
;
}
void
record_strong_code_root_migration_time
(
double
ms
)
{
_cur_strong_code_root_migration_time_ms
=
ms
;
}
void
record_ref_proc_time
(
double
ms
)
{
_cur_ref_proc_time_ms
=
ms
;
}
...
...
@@ -294,6 +309,14 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
return
_last_scan_rs_times_ms
.
average
();
}
double
average_last_strong_code_root_scan_time
(){
return
_last_strong_code_root_scan_times_ms
.
average
();
}
double
average_last_strong_code_root_mark_time
(){
return
_last_strong_code_root_mark_times_ms
.
average
();
}
double
average_last_obj_copy_time
()
{
return
_last_obj_copy_times_ms
.
average
();
}
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
c09be8a2
...
...
@@ -104,15 +104,25 @@ void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
class
ScanRSClosure
:
public
HeapRegionClosure
{
size_t
_cards_done
,
_cards
;
G1CollectedHeap
*
_g1h
;
OopsInHeapRegionClosure
*
_oc
;
CodeBlobToOopClosure
*
_code_root_cl
;
G1BlockOffsetSharedArray
*
_bot_shared
;
CardTableModRefBS
*
_ct_bs
;
int
_worker_i
;
int
_block_size
;
bool
_try_claimed
;
double
_strong_code_root_scan_time_sec
;
int
_worker_i
;
int
_block_size
;
bool
_try_claimed
;
public:
ScanRSClosure
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
:
ScanRSClosure
(
OopsInHeapRegionClosure
*
oc
,
CodeBlobToOopClosure
*
code_root_cl
,
int
worker_i
)
:
_oc
(
oc
),
_code_root_cl
(
code_root_cl
),
_strong_code_root_scan_time_sec
(
0.0
),
_cards
(
0
),
_cards_done
(
0
),
_worker_i
(
worker_i
),
...
...
@@ -160,6 +170,12 @@ public:
card_start
,
card_start
+
G1BlockOffsetSharedArray
::
N_words
);
}
void
scan_strong_code_roots
(
HeapRegion
*
r
)
{
double
scan_start
=
os
::
elapsedTime
();
r
->
strong_code_roots_do
(
_code_root_cl
);
_strong_code_root_scan_time_sec
+=
(
os
::
elapsedTime
()
-
scan_start
);
}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
assert
(
r
->
in_collection_set
(),
"should only be called on elements of CS."
);
HeapRegionRemSet
*
hrrs
=
r
->
rem_set
();
...
...
@@ -173,6 +189,7 @@ public:
// _try_claimed || r->claim_iter()
// is true: either we're supposed to work on claimed-but-not-complete
// regions, or we successfully claimed the region.
HeapRegionRemSetIterator
iter
(
hrrs
);
size_t
card_index
;
...
...
@@ -205,30 +222,43 @@ public:
}
}
if
(
!
_try_claimed
)
{
// Scan the strong code root list attached to the current region
scan_strong_code_roots
(
r
);
hrrs
->
set_iter_complete
();
}
return
false
;
}
double
strong_code_root_scan_time_sec
()
{
return
_strong_code_root_scan_time_sec
;
}
size_t
cards_done
()
{
return
_cards_done
;}
size_t
cards_looked_up
()
{
return
_cards
;}
};
void
G1RemSet
::
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
void
G1RemSet
::
scanRS
(
OopsInHeapRegionClosure
*
oc
,
CodeBlobToOopClosure
*
code_root_cl
,
int
worker_i
)
{
double
rs_time_start
=
os
::
elapsedTime
();
HeapRegion
*
startRegion
=
_g1
->
start_cset_region_for_worker
(
worker_i
);
ScanRSClosure
scanRScl
(
oc
,
worker_i
);
ScanRSClosure
scanRScl
(
oc
,
code_root_cl
,
worker_i
);
_g1
->
collection_set_iterate_from
(
startRegion
,
&
scanRScl
);
scanRScl
.
set_try_claimed
();
_g1
->
collection_set_iterate_from
(
startRegion
,
&
scanRScl
);
double
scan_rs_time_sec
=
os
::
elapsedTime
()
-
rs_time_start
;
double
scan_rs_time_sec
=
(
os
::
elapsedTime
()
-
rs_time_start
)
-
scanRScl
.
strong_code_root_scan_time_sec
();
assert
(
_cards_scanned
!=
NULL
,
"invariant"
);
assert
(
_cards_scanned
!=
NULL
,
"invariant"
);
_cards_scanned
[
worker_i
]
=
scanRScl
.
cards_done
();
_g1p
->
phase_times
()
->
record_scan_rs_time
(
worker_i
,
scan_rs_time_sec
*
1000.0
);
_g1p
->
phase_times
()
->
record_strong_code_root_scan_time
(
worker_i
,
scanRScl
.
strong_code_root_scan_time_sec
()
*
1000.0
);
}
// Closure used for updating RSets and recording references that
...
...
@@ -288,7 +318,8 @@ void G1RemSet::cleanupHRRS() {
}
void
G1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
)
{
CodeBlobToOopClosure
*
code_root_cl
,
int
worker_i
)
{
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset
();
#endif
...
...
@@ -328,7 +359,7 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_g1p
->
phase_times
()
->
record_update_rs_time
(
worker_i
,
0.0
);
}
if
(
G1UseParallelRSetScanning
||
(
worker_i
==
0
))
{
scanRS
(
oc
,
worker_i
);
scanRS
(
oc
,
code_root_cl
,
worker_i
);
}
else
{
_g1p
->
phase_times
()
->
record_scan_rs_time
(
worker_i
,
0.0
);
}
...
...
src/share/vm/gc_implementation/g1/g1RemSet.hpp
浏览文件 @
c09be8a2
...
...
@@ -81,14 +81,23 @@ public:
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
);
~
G1RemSet
();
// Invoke "blk->do_oop" on all pointers into the CS in objects in regions
// outside the CS (having invoked "blk->set_region" to set the "from"
// region correctly beforehand.) The "worker_i" param is for the
// parallel case where the number of the worker thread calling this
// function can be helpful in partitioning the work to be done. It
// should be the same as the "i" passed to the calling thread's
// work(i) function. In the sequential case this param will be ingored.
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
int
worker_i
);
// Invoke "blk->do_oop" on all pointers into the collection set
// from objects in regions outside the collection set (having
// invoked "blk->set_region" to set the "from" region correctly
// beforehand.)
//
// Invoke code_root_cl->do_code_blob on the unmarked nmethods
// on the strong code roots list for each region in the
// collection set.
//
// The "worker_i" param is for the parallel case where the id
// of the worker thread calling this function can be helpful in
// partitioning the work to be done. It should be the same as
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
CodeBlobToOopClosure
*
code_root_cl
,
int
worker_i
);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
...
...
@@ -98,7 +107,10 @@ public:
void
prepare_for_oops_into_collection_set_do
();
void
cleanup_after_oops_into_collection_set_do
();
void
scanRS
(
OopsInHeapRegionClosure
*
oc
,
int
worker_i
);
void
scanRS
(
OopsInHeapRegionClosure
*
oc
,
CodeBlobToOopClosure
*
code_root_cl
,
int
worker_i
);
void
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
int
worker_i
);
CardTableModRefBS
*
ct_bs
()
{
return
_ct_bs
;
}
...
...
src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
浏览文件 @
c09be8a2
...
...
@@ -127,32 +127,55 @@ void G1RemSetSummary::subtract_from(G1RemSetSummary* other) {
class
HRRSStatsIter
:
public
HeapRegionClosure
{
size_t
_occupied
;
size_t
_total_mem_sz
;
size_t
_max_mem_sz
;
HeapRegion
*
_max_mem_sz_region
;
size_t
_total_rs_mem_sz
;
size_t
_max_rs_mem_sz
;
HeapRegion
*
_max_rs_mem_sz_region
;
size_t
_total_code_root_mem_sz
;
size_t
_max_code_root_mem_sz
;
HeapRegion
*
_max_code_root_mem_sz_region
;
public:
HRRSStatsIter
()
:
_occupied
(
0
),
_total_mem_sz
(
0
),
_max_mem_sz
(
0
),
_max_mem_sz_region
(
NULL
)
_total_rs_mem_sz
(
0
),
_max_rs_mem_sz
(
0
),
_max_rs_mem_sz_region
(
NULL
),
_total_code_root_mem_sz
(
0
),
_max_code_root_mem_sz
(
0
),
_max_code_root_mem_sz_region
(
NULL
)
{}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
size_t
mem_sz
=
r
->
rem_set
()
->
mem_size
();
if
(
mem_sz
>
_max_mem_sz
)
{
_max_mem_sz
=
mem_sz
;
_max_mem_sz_region
=
r
;
HeapRegionRemSet
*
hrrs
=
r
->
rem_set
();
// HeapRegionRemSet::mem_size() includes the
// size of the strong code roots
size_t
rs_mem_sz
=
hrrs
->
mem_size
();
if
(
rs_mem_sz
>
_max_rs_mem_sz
)
{
_max_rs_mem_sz
=
rs_mem_sz
;
_max_rs_mem_sz_region
=
r
;
}
_total_rs_mem_sz
+=
rs_mem_sz
;
size_t
code_root_mem_sz
=
hrrs
->
strong_code_roots_mem_size
();
if
(
code_root_mem_sz
>
_max_code_root_mem_sz
)
{
_max_code_root_mem_sz
=
code_root_mem_sz
;
_max_code_root_mem_sz_region
=
r
;
}
_total_mem_sz
+=
mem_sz
;
size_t
occ
=
r
->
rem_set
()
->
occupied
();
_total_code_root_mem_sz
+=
code_root_mem_sz
;
size_t
occ
=
hrrs
->
occupied
();
_occupied
+=
occ
;
return
false
;
}
size_t
total_mem_sz
()
{
return
_total_mem_sz
;
}
size_t
max_mem_sz
()
{
return
_max_mem_sz
;
}
size_t
total_rs_mem_sz
()
{
return
_total_rs_mem_sz
;
}
size_t
max_rs_mem_sz
()
{
return
_max_rs_mem_sz
;
}
HeapRegion
*
max_rs_mem_sz_region
()
{
return
_max_rs_mem_sz_region
;
}
size_t
total_code_root_mem_sz
()
{
return
_total_code_root_mem_sz
;
}
size_t
max_code_root_mem_sz
()
{
return
_max_code_root_mem_sz
;
}
HeapRegion
*
max_code_root_mem_sz_region
()
{
return
_max_code_root_mem_sz_region
;
}
size_t
occupied
()
{
return
_occupied
;
}
HeapRegion
*
max_mem_sz_region
()
{
return
_max_mem_sz_region
;
}
};
double
calc_percentage
(
size_t
numerator
,
size_t
denominator
)
{
...
...
@@ -184,22 +207,33 @@ void G1RemSetSummary::print_on(outputStream* out) {
HRRSStatsIter
blk
;
G1CollectedHeap
::
heap
()
->
heap_region_iterate
(
&
blk
);
// RemSet stats
out
->
print_cr
(
" Total heap region rem set sizes = "
SIZE_FORMAT
"K."
" Max = "
SIZE_FORMAT
"K."
,
blk
.
total_
mem_sz
()
/
K
,
blk
.
max
_mem_sz
()
/
K
);
blk
.
total_
rs_mem_sz
()
/
K
,
blk
.
max_rs
_mem_sz
()
/
K
);
out
->
print_cr
(
" Static structures = "
SIZE_FORMAT
"K,"
" free_lists = "
SIZE_FORMAT
"K."
,
HeapRegionRemSet
::
static_mem_size
()
/
K
,
HeapRegionRemSet
::
fl_mem_size
()
/
K
);
out
->
print_cr
(
" "
SIZE_FORMAT
" occupied cards represented."
,
blk
.
occupied
());
HeapRegion
*
max_
mem_sz_region
=
blk
.
max
_mem_sz_region
();
HeapRegionRemSet
*
rem_set
=
max
_mem_sz_region
->
rem_set
();
HeapRegion
*
max_
rs_mem_sz_region
=
blk
.
max_rs
_mem_sz_region
();
HeapRegionRemSet
*
max_rs_rem_set
=
max_rs
_mem_sz_region
->
rem_set
();
out
->
print_cr
(
" Max size region = "
HR_FORMAT
", "
"size = "
SIZE_FORMAT
"K, occupied = "
SIZE_FORMAT
"K."
,
HR_FORMAT_PARAMS
(
max_mem_sz_region
),
(
rem_set
->
mem_size
()
+
K
-
1
)
/
K
,
(
rem_set
->
occupied
()
+
K
-
1
)
/
K
);
HR_FORMAT_PARAMS
(
max_rs_mem_sz_region
),
(
max_rs_rem_set
->
mem_size
()
+
K
-
1
)
/
K
,
(
max_rs_rem_set
->
occupied
()
+
K
-
1
)
/
K
);
out
->
print_cr
(
" Did %d coarsenings."
,
num_coarsenings
());
// Strong code root stats
out
->
print_cr
(
" Total heap region code-root set sizes = "
SIZE_FORMAT
"K."
" Max = "
SIZE_FORMAT
"K."
,
blk
.
total_code_root_mem_sz
()
/
K
,
blk
.
max_code_root_mem_sz
()
/
K
);
HeapRegion
*
max_code_root_mem_sz_region
=
blk
.
max_code_root_mem_sz_region
();
HeapRegionRemSet
*
max_code_root_rem_set
=
max_code_root_mem_sz_region
->
rem_set
();
out
->
print_cr
(
" Max size region = "
HR_FORMAT
", "
"size = "
SIZE_FORMAT
"K, num_elems = "
SIZE_FORMAT
"."
,
HR_FORMAT_PARAMS
(
max_code_root_mem_sz_region
),
(
max_code_root_rem_set
->
strong_code_roots_mem_size
()
+
K
-
1
)
/
K
,
(
max_code_root_rem_set
->
strong_code_roots_list_length
()));
}
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
c09be8a2
...
...
@@ -319,7 +319,10 @@
\
diagnostic(bool, G1VerifyRSetsDuringFullGC, false, \
"If true, perform verification of each heap region's " \
"remembered set when verifying the heap during a full GC.")
"remembered set when verifying the heap during a full GC.") \
\
diagnostic(bool, G1VerifyHeapRegionCodeRoots, false, \
"Verify the code root lists attached to each heap region.")
G1_FLAGS
(
DECLARE_DEVELOPER_FLAG
,
DECLARE_PD_DEVELOPER_FLAG
,
DECLARE_PRODUCT_FLAG
,
DECLARE_PD_PRODUCT_FLAG
,
DECLARE_DIAGNOSTIC_FLAG
,
DECLARE_EXPERIMENTAL_FLAG
,
DECLARE_NOTPRODUCT_FLAG
,
DECLARE_MANAGEABLE_FLAG
,
DECLARE_PRODUCT_RW_FLAG
)
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
c09be8a2
...
...
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "code/nmethod.hpp"
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
...
...
@@ -50,144 +51,6 @@ FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
OopClosure
*
oc
)
:
_r_bottom
(
r
->
bottom
()),
_r_end
(
r
->
end
()),
_oc
(
oc
)
{
}
class
VerifyLiveClosure
:
public
OopClosure
{
private:
G1CollectedHeap
*
_g1h
;
CardTableModRefBS
*
_bs
;
oop
_containing_obj
;
bool
_failures
;
int
_n_failures
;
VerifyOption
_vo
;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyLiveClosure
(
G1CollectedHeap
*
g1h
,
VerifyOption
vo
)
:
_g1h
(
g1h
),
_bs
(
NULL
),
_containing_obj
(
NULL
),
_failures
(
false
),
_n_failures
(
0
),
_vo
(
vo
)
{
BarrierSet
*
bs
=
_g1h
->
barrier_set
();
if
(
bs
->
is_a
(
BarrierSet
::
CardTableModRef
))
_bs
=
(
CardTableModRefBS
*
)
bs
;
}
void
set_containing_obj
(
oop
obj
)
{
_containing_obj
=
obj
;
}
bool
failures
()
{
return
_failures
;
}
int
n_failures
()
{
return
_n_failures
;
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
void
print_object
(
outputStream
*
out
,
oop
obj
)
{
#ifdef PRODUCT
Klass
*
k
=
obj
->
klass
();
const
char
*
class_name
=
InstanceKlass
::
cast
(
k
)
->
external_name
();
out
->
print_cr
(
"class name %s"
,
class_name
);
#else // PRODUCT
obj
->
print_on
(
out
);
#endif // PRODUCT
}
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
assert
(
_containing_obj
!=
NULL
,
"Precondition"
);
assert
(
!
_g1h
->
is_obj_dead_cond
(
_containing_obj
,
_vo
),
"Precondition"
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
bool
failed
=
false
;
if
(
!
_g1h
->
is_in_closed_subset
(
obj
)
||
_g1h
->
is_obj_dead_cond
(
obj
,
_vo
))
{
MutexLockerEx
x
(
ParGCRareEvent_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
!
_failures
)
{
gclog_or_tty
->
print_cr
(
""
);
gclog_or_tty
->
print_cr
(
"----------"
);
}
if
(
!
_g1h
->
is_in_closed_subset
(
obj
))
{
HeapRegion
*
from
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" of live obj "
PTR_FORMAT
" in region "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p
,
(
void
*
)
_containing_obj
,
from
->
bottom
(),
from
->
end
());
print_object
(
gclog_or_tty
,
_containing_obj
);
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" not in the heap"
,
(
void
*
)
obj
);
}
else
{
HeapRegion
*
from
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
);
HeapRegion
*
to
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
obj
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" of live obj "
PTR_FORMAT
" in region "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p
,
(
void
*
)
_containing_obj
,
from
->
bottom
(),
from
->
end
());
print_object
(
gclog_or_tty
,
_containing_obj
);
gclog_or_tty
->
print_cr
(
"points to dead obj "
PTR_FORMAT
" in region "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
(
void
*
)
obj
,
to
->
bottom
(),
to
->
end
());
print_object
(
gclog_or_tty
,
obj
);
}
gclog_or_tty
->
print_cr
(
"----------"
);
gclog_or_tty
->
flush
();
_failures
=
true
;
failed
=
true
;
_n_failures
++
;
}
if
(
!
_g1h
->
full_collection
()
||
G1VerifyRSetsDuringFullGC
)
{
HeapRegion
*
from
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
);
HeapRegion
*
to
=
_g1h
->
heap_region_containing
(
obj
);
if
(
from
!=
NULL
&&
to
!=
NULL
&&
from
!=
to
&&
!
to
->
isHumongous
())
{
jbyte
cv_obj
=
*
_bs
->
byte_for_const
(
_containing_obj
);
jbyte
cv_field
=
*
_bs
->
byte_for_const
(
p
);
const
jbyte
dirty
=
CardTableModRefBS
::
dirty_card_val
();
bool
is_bad
=
!
(
from
->
is_young
()
||
to
->
rem_set
()
->
contains_reference
(
p
)
||
!
G1HRRSFlushLogBuffersOnVerify
&&
// buffers were not flushed
(
_containing_obj
->
is_objArray
()
?
cv_field
==
dirty
:
cv_obj
==
dirty
||
cv_field
==
dirty
));
if
(
is_bad
)
{
MutexLockerEx
x
(
ParGCRareEvent_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
!
_failures
)
{
gclog_or_tty
->
print_cr
(
""
);
gclog_or_tty
->
print_cr
(
"----------"
);
}
gclog_or_tty
->
print_cr
(
"Missing rem set entry:"
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" "
"of obj "
PTR_FORMAT
", "
"in region "
HR_FORMAT
,
p
,
(
void
*
)
_containing_obj
,
HR_FORMAT_PARAMS
(
from
));
_containing_obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" "
"in region "
HR_FORMAT
,
(
void
*
)
obj
,
HR_FORMAT_PARAMS
(
to
));
obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"Obj head CTE = %d, field CTE = %d."
,
cv_obj
,
cv_field
);
gclog_or_tty
->
print_cr
(
"----------"
);
gclog_or_tty
->
flush
();
_failures
=
true
;
if
(
!
failed
)
_n_failures
++
;
}
}
}
}
}
};
template
<
class
ClosureType
>
HeapWord
*
walk_mem_region_loop
(
ClosureType
*
cl
,
G1CollectedHeap
*
g1h
,
HeapRegion
*
hr
,
...
...
@@ -368,7 +231,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
if
(
!
par
)
{
// If this is parallel, this will be done later.
HeapRegionRemSet
*
hrrs
=
rem_set
();
if
(
hrrs
!=
NULL
)
hrrs
->
clear
();
hrrs
->
clear
();
_claimed
=
InitialClaimValue
;
}
zero_marked_bytes
();
...
...
@@ -505,6 +368,7 @@ HeapRegion::HeapRegion(uint hrs_index,
_rem_set
(
NULL
),
_recorded_rs_length
(
0
),
_predicted_elapsed_time_ms
(
0
),
_predicted_bytes_to_copy
(
0
)
{
_rem_set
=
new
HeapRegionRemSet
(
sharedOffsetArray
,
this
);
_orig_end
=
mr
.
end
();
// Note that initialize() will set the start of the unmarked area of the
// region.
...
...
@@ -512,8 +376,6 @@ HeapRegion::HeapRegion(uint hrs_index,
set_top
(
bottom
());
set_saved_mark
();
_rem_set
=
new
HeapRegionRemSet
(
sharedOffsetArray
,
this
);
assert
(
HeapRegionRemSet
::
num_par_rem_sets
()
>
0
,
"Invariant."
);
}
...
...
@@ -733,6 +595,160 @@ oops_on_card_seq_iterate_careful(MemRegion mr,
return
NULL
;
}
// Code roots support
void
HeapRegion
::
add_strong_code_root
(
nmethod
*
nm
)
{
HeapRegionRemSet
*
hrrs
=
rem_set
();
hrrs
->
add_strong_code_root
(
nm
);
}
void
HeapRegion
::
remove_strong_code_root
(
nmethod
*
nm
)
{
HeapRegionRemSet
*
hrrs
=
rem_set
();
hrrs
->
remove_strong_code_root
(
nm
);
}
void
HeapRegion
::
migrate_strong_code_roots
()
{
assert
(
in_collection_set
(),
"only collection set regions"
);
assert
(
!
isHumongous
(),
"not humongous regions"
);
HeapRegionRemSet
*
hrrs
=
rem_set
();
hrrs
->
migrate_strong_code_roots
();
}
void
HeapRegion
::
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
{
HeapRegionRemSet
*
hrrs
=
rem_set
();
hrrs
->
strong_code_roots_do
(
blk
);
}
class
VerifyStrongCodeRootOopClosure
:
public
OopClosure
{
const
HeapRegion
*
_hr
;
nmethod
*
_nm
;
bool
_failures
;
bool
_has_oops_in_region
;
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
// Note: not all the oops embedded in the nmethod are in the
// current region. We only look at those which are.
if
(
_hr
->
is_in
(
obj
))
{
// Object is in the region. Check that its less than top
if
(
_hr
->
top
()
<=
(
HeapWord
*
)
obj
)
{
// Object is above top
gclog_or_tty
->
print_cr
(
"Object "
PTR_FORMAT
" in region "
"["
PTR_FORMAT
", "
PTR_FORMAT
") is above "
"top "
PTR_FORMAT
,
obj
,
_hr
->
bottom
(),
_hr
->
end
(),
_hr
->
top
());
_failures
=
true
;
return
;
}
// Nmethod has at least one oop in the current region
_has_oops_in_region
=
true
;
}
}
}
public:
VerifyStrongCodeRootOopClosure
(
const
HeapRegion
*
hr
,
nmethod
*
nm
)
:
_hr
(
hr
),
_failures
(
false
),
_has_oops_in_region
(
false
)
{}
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
bool
failures
()
{
return
_failures
;
}
bool
has_oops_in_region
()
{
return
_has_oops_in_region
;
}
};
class
VerifyStrongCodeRootCodeBlobClosure
:
public
CodeBlobClosure
{
const
HeapRegion
*
_hr
;
bool
_failures
;
public:
VerifyStrongCodeRootCodeBlobClosure
(
const
HeapRegion
*
hr
)
:
_hr
(
hr
),
_failures
(
false
)
{}
void
do_code_blob
(
CodeBlob
*
cb
)
{
nmethod
*
nm
=
(
cb
==
NULL
)
?
NULL
:
cb
->
as_nmethod_or_null
();
if
(
nm
!=
NULL
)
{
// Verify that the nemthod is live
if
(
!
nm
->
is_alive
())
{
gclog_or_tty
->
print_cr
(
"region ["
PTR_FORMAT
","
PTR_FORMAT
"] has dead nmethod "
PTR_FORMAT
" in its strong code roots"
,
_hr
->
bottom
(),
_hr
->
end
(),
nm
);
_failures
=
true
;
}
else
{
VerifyStrongCodeRootOopClosure
oop_cl
(
_hr
,
nm
);
nm
->
oops_do
(
&
oop_cl
);
if
(
!
oop_cl
.
has_oops_in_region
())
{
gclog_or_tty
->
print_cr
(
"region ["
PTR_FORMAT
","
PTR_FORMAT
"] has nmethod "
PTR_FORMAT
" in its strong code roots "
"with no pointers into region"
,
_hr
->
bottom
(),
_hr
->
end
(),
nm
);
_failures
=
true
;
}
else
if
(
oop_cl
.
failures
())
{
gclog_or_tty
->
print_cr
(
"region ["
PTR_FORMAT
","
PTR_FORMAT
"] has other "
"failures for nmethod "
PTR_FORMAT
,
_hr
->
bottom
(),
_hr
->
end
(),
nm
);
_failures
=
true
;
}
}
}
}
bool
failures
()
{
return
_failures
;
}
};
void
HeapRegion
::
verify_strong_code_roots
(
VerifyOption
vo
,
bool
*
failures
)
const
{
if
(
!
G1VerifyHeapRegionCodeRoots
)
{
// We're not verifying code roots.
return
;
}
if
(
vo
==
VerifyOption_G1UseMarkWord
)
{
// Marking verification during a full GC is performed after class
// unloading, code cache unloading, etc so the strong code roots
// attached to each heap region are in an inconsistent state. They won't
// be consistent until the strong code roots are rebuilt after the
// actual GC. Skip verifying the strong code roots in this particular
// time.
assert
(
VerifyDuringGC
,
"only way to get here"
);
return
;
}
HeapRegionRemSet
*
hrrs
=
rem_set
();
int
strong_code_roots_length
=
hrrs
->
strong_code_roots_list_length
();
// if this region is empty then there should be no entries
// on its strong code root list
if
(
is_empty
())
{
if
(
strong_code_roots_length
>
0
)
{
gclog_or_tty
->
print_cr
(
"region ["
PTR_FORMAT
","
PTR_FORMAT
"] is empty "
"but has "
INT32_FORMAT
" code root entries"
,
bottom
(),
end
(),
strong_code_roots_length
);
*
failures
=
true
;
}
return
;
}
// An H-region should have an empty strong code root list
if
(
isHumongous
())
{
if
(
strong_code_roots_length
>
0
)
{
gclog_or_tty
->
print_cr
(
"region ["
PTR_FORMAT
","
PTR_FORMAT
"] is humongous "
"but has "
INT32_FORMAT
" code root entries"
,
bottom
(),
end
(),
strong_code_roots_length
);
*
failures
=
true
;
}
return
;
}
VerifyStrongCodeRootCodeBlobClosure
cb_cl
(
this
);
strong_code_roots_do
(
&
cb_cl
);
if
(
cb_cl
.
failures
())
{
*
failures
=
true
;
}
}
void
HeapRegion
::
print
()
const
{
print_on
(
gclog_or_tty
);
}
void
HeapRegion
::
print_on
(
outputStream
*
st
)
const
{
if
(
isHumongous
())
{
...
...
@@ -761,10 +777,143 @@ void HeapRegion::print_on(outputStream* st) const {
G1OffsetTableContigSpace
::
print_on
(
st
);
}
void
HeapRegion
::
verify
()
const
{
bool
dummy
=
false
;
verify
(
VerifyOption_G1UsePrevMarking
,
/* failures */
&
dummy
);
}
class
VerifyLiveClosure
:
public
OopClosure
{
private:
G1CollectedHeap
*
_g1h
;
CardTableModRefBS
*
_bs
;
oop
_containing_obj
;
bool
_failures
;
int
_n_failures
;
VerifyOption
_vo
;
public:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyLiveClosure
(
G1CollectedHeap
*
g1h
,
VerifyOption
vo
)
:
_g1h
(
g1h
),
_bs
(
NULL
),
_containing_obj
(
NULL
),
_failures
(
false
),
_n_failures
(
0
),
_vo
(
vo
)
{
BarrierSet
*
bs
=
_g1h
->
barrier_set
();
if
(
bs
->
is_a
(
BarrierSet
::
CardTableModRef
))
_bs
=
(
CardTableModRefBS
*
)
bs
;
}
void
set_containing_obj
(
oop
obj
)
{
_containing_obj
=
obj
;
}
bool
failures
()
{
return
_failures
;
}
int
n_failures
()
{
return
_n_failures
;
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
void
print_object
(
outputStream
*
out
,
oop
obj
)
{
#ifdef PRODUCT
Klass
*
k
=
obj
->
klass
();
const
char
*
class_name
=
InstanceKlass
::
cast
(
k
)
->
external_name
();
out
->
print_cr
(
"class name %s"
,
class_name
);
#else // PRODUCT
obj
->
print_on
(
out
);
#endif // PRODUCT
}
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
assert
(
_containing_obj
!=
NULL
,
"Precondition"
);
assert
(
!
_g1h
->
is_obj_dead_cond
(
_containing_obj
,
_vo
),
"Precondition"
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
bool
failed
=
false
;
if
(
!
_g1h
->
is_in_closed_subset
(
obj
)
||
_g1h
->
is_obj_dead_cond
(
obj
,
_vo
))
{
MutexLockerEx
x
(
ParGCRareEvent_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
!
_failures
)
{
gclog_or_tty
->
print_cr
(
""
);
gclog_or_tty
->
print_cr
(
"----------"
);
}
if
(
!
_g1h
->
is_in_closed_subset
(
obj
))
{
HeapRegion
*
from
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" of live obj "
PTR_FORMAT
" in region "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p
,
(
void
*
)
_containing_obj
,
from
->
bottom
(),
from
->
end
());
print_object
(
gclog_or_tty
,
_containing_obj
);
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" not in the heap"
,
(
void
*
)
obj
);
}
else
{
HeapRegion
*
from
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
);
HeapRegion
*
to
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
obj
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" of live obj "
PTR_FORMAT
" in region "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p
,
(
void
*
)
_containing_obj
,
from
->
bottom
(),
from
->
end
());
print_object
(
gclog_or_tty
,
_containing_obj
);
gclog_or_tty
->
print_cr
(
"points to dead obj "
PTR_FORMAT
" in region "
"["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
(
void
*
)
obj
,
to
->
bottom
(),
to
->
end
());
print_object
(
gclog_or_tty
,
obj
);
}
gclog_or_tty
->
print_cr
(
"----------"
);
gclog_or_tty
->
flush
();
_failures
=
true
;
failed
=
true
;
_n_failures
++
;
}
if
(
!
_g1h
->
full_collection
()
||
G1VerifyRSetsDuringFullGC
)
{
HeapRegion
*
from
=
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
);
HeapRegion
*
to
=
_g1h
->
heap_region_containing
(
obj
);
if
(
from
!=
NULL
&&
to
!=
NULL
&&
from
!=
to
&&
!
to
->
isHumongous
())
{
jbyte
cv_obj
=
*
_bs
->
byte_for_const
(
_containing_obj
);
jbyte
cv_field
=
*
_bs
->
byte_for_const
(
p
);
const
jbyte
dirty
=
CardTableModRefBS
::
dirty_card_val
();
bool
is_bad
=
!
(
from
->
is_young
()
||
to
->
rem_set
()
->
contains_reference
(
p
)
||
!
G1HRRSFlushLogBuffersOnVerify
&&
// buffers were not flushed
(
_containing_obj
->
is_objArray
()
?
cv_field
==
dirty
:
cv_obj
==
dirty
||
cv_field
==
dirty
));
if
(
is_bad
)
{
MutexLockerEx
x
(
ParGCRareEvent_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
!
_failures
)
{
gclog_or_tty
->
print_cr
(
""
);
gclog_or_tty
->
print_cr
(
"----------"
);
}
gclog_or_tty
->
print_cr
(
"Missing rem set entry:"
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" "
"of obj "
PTR_FORMAT
", "
"in region "
HR_FORMAT
,
p
,
(
void
*
)
_containing_obj
,
HR_FORMAT_PARAMS
(
from
));
_containing_obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" "
"in region "
HR_FORMAT
,
(
void
*
)
obj
,
HR_FORMAT_PARAMS
(
to
));
obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"Obj head CTE = %d, field CTE = %d."
,
cv_obj
,
cv_field
);
gclog_or_tty
->
print_cr
(
"----------"
);
gclog_or_tty
->
flush
();
_failures
=
true
;
if
(
!
failed
)
_n_failures
++
;
}
}
}
}
}
};
// This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects.
...
...
@@ -904,6 +1053,13 @@ void HeapRegion::verify(VerifyOption vo,
*
failures
=
true
;
return
;
}
verify_strong_code_roots
(
vo
,
failures
);
}
void
HeapRegion
::
verify
()
const
{
bool
dummy
=
false
;
verify
(
VerifyOption_G1UsePrevMarking
,
/* failures */
&
dummy
);
}
// G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
c09be8a2
...
...
@@ -52,6 +52,7 @@ class HeapRegionRemSet;
class
HeapRegionRemSetIterator
;
class
HeapRegion
;
class
HeapRegionSetBase
;
class
nmethod
;
#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
...
...
@@ -371,7 +372,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
RebuildRSClaimValue
=
5
,
ParEvacFailureClaimValue
=
6
,
AggregateCountClaimValue
=
7
,
VerifyCountClaimValue
=
8
VerifyCountClaimValue
=
8
,
ParMarkRootClaimValue
=
9
};
inline
HeapWord
*
par_allocate_no_bot_updates
(
size_t
word_size
)
{
...
...
@@ -796,6 +798,25 @@ class HeapRegion: public G1OffsetTableContigSpace {
virtual
void
reset_after_compaction
();
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void
add_strong_code_root
(
nmethod
*
nm
);
void
remove_strong_code_root
(
nmethod
*
nm
);
// During a collection, migrate the successfully evacuated
// strong code roots that referenced into this region to the
// new regions that they now point into. Unsuccessfully
// evacuated code roots are not migrated.
void
migrate_strong_code_roots
();
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list for this region
void
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
;
// Verify that the entries on the strong code root list for this
// region are live and include at least one pointer into this region.
void
verify_strong_code_roots
(
VerifyOption
vo
,
bool
*
failures
)
const
;
void
print
()
const
;
void
print_on
(
outputStream
*
st
)
const
;
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
浏览文件 @
c09be8a2
...
...
@@ -33,6 +33,7 @@
#include "oops/oop.inline.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
class
PerRegionTable
:
public
CHeapObj
<
mtGC
>
{
friend
class
OtherRegionsTable
;
...
...
@@ -849,7 +850,7 @@ int HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet
::
HeapRegionRemSet
(
G1BlockOffsetSharedArray
*
bosa
,
HeapRegion
*
hr
)
:
_bosa
(
bosa
),
_other_regions
(
hr
)
{
:
_bosa
(
bosa
),
_
strong_code_roots_list
(
NULL
),
_
other_regions
(
hr
)
{
reset_for_par_iteration
();
}
...
...
@@ -908,6 +909,12 @@ void HeapRegionRemSet::cleanup() {
}
void
HeapRegionRemSet
::
clear
()
{
if
(
_strong_code_roots_list
!=
NULL
)
{
delete
_strong_code_roots_list
;
}
_strong_code_roots_list
=
new
(
ResourceObj
::
C_HEAP
,
mtGC
)
GrowableArray
<
nmethod
*>
(
10
,
0
,
NULL
,
true
);
_other_regions
.
clear
();
assert
(
occupied
()
==
0
,
"Should be clear."
);
reset_for_par_iteration
();
...
...
@@ -925,6 +932,121 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
_other_regions
.
scrub
(
ctbs
,
region_bm
,
card_bm
);
}
// Code roots support
void
HeapRegionRemSet
::
add_strong_code_root
(
nmethod
*
nm
)
{
assert
(
nm
!=
NULL
,
"sanity"
);
// Search for the code blob from the RHS to avoid
// duplicate entries as much as possible
if
(
_strong_code_roots_list
->
find_from_end
(
nm
)
<
0
)
{
// Code blob isn't already in the list
_strong_code_roots_list
->
push
(
nm
);
}
}
void
HeapRegionRemSet
::
remove_strong_code_root
(
nmethod
*
nm
)
{
assert
(
nm
!=
NULL
,
"sanity"
);
int
idx
=
_strong_code_roots_list
->
find
(
nm
);
if
(
idx
>=
0
)
{
_strong_code_roots_list
->
remove_at
(
idx
);
}
// Check that there were no duplicates
guarantee
(
_strong_code_roots_list
->
find
(
nm
)
<
0
,
"duplicate entry found"
);
}
class
NMethodMigrationOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1h
;
HeapRegion
*
_from
;
nmethod
*
_nm
;
uint
_num_self_forwarded
;
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
if
(
_from
->
is_in
(
obj
))
{
// Reference still points into the source region.
// Since roots are immediately evacuated this means that
// we must have self forwarded the object
assert
(
obj
->
is_forwarded
(),
err_msg
(
"code roots should be immediately evacuated. "
"Ref: "
PTR_FORMAT
", "
"Obj: "
PTR_FORMAT
", "
"Region: "
HR_FORMAT
,
p
,
(
void
*
)
obj
,
HR_FORMAT_PARAMS
(
_from
)));
assert
(
obj
->
forwardee
()
==
obj
,
err_msg
(
"not self forwarded? obj = "
PTR_FORMAT
,
(
void
*
)
obj
));
// The object has been self forwarded.
// Note, if we're during an initial mark pause, there is
// no need to explicitly mark object. It will be marked
// during the regular evacuation failure handling code.
_num_self_forwarded
++
;
}
else
{
// The reference points into a promotion or to-space region
HeapRegion
*
to
=
_g1h
->
heap_region_containing
(
obj
);
to
->
rem_set
()
->
add_strong_code_root
(
_nm
);
}
}
}
public:
NMethodMigrationOopClosure
(
G1CollectedHeap
*
g1h
,
HeapRegion
*
from
,
nmethod
*
nm
)
:
_g1h
(
g1h
),
_from
(
from
),
_nm
(
nm
),
_num_self_forwarded
(
0
)
{}
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
uint
retain
()
{
return
_num_self_forwarded
>
0
;
}
};
void
HeapRegionRemSet
::
migrate_strong_code_roots
()
{
assert
(
hr
()
->
in_collection_set
(),
"only collection set regions"
);
assert
(
!
hr
()
->
isHumongous
(),
"not humongous regions"
);
ResourceMark
rm
;
// List of code blobs to retain for this region
GrowableArray
<
nmethod
*>
to_be_retained
(
10
);
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
while
(
_strong_code_roots_list
->
is_nonempty
())
{
nmethod
*
nm
=
_strong_code_roots_list
->
pop
();
if
(
nm
!=
NULL
)
{
NMethodMigrationOopClosure
oop_cl
(
g1h
,
hr
(),
nm
);
nm
->
oops_do
(
&
oop_cl
);
if
(
oop_cl
.
retain
())
{
to_be_retained
.
push
(
nm
);
}
}
}
// Now push any code roots we need to retain
assert
(
to_be_retained
.
is_empty
()
||
hr
()
->
evacuation_failed
(),
"Retained nmethod list must be empty or "
"evacuation of this region failed"
);
while
(
to_be_retained
.
is_nonempty
())
{
nmethod
*
nm
=
to_be_retained
.
pop
();
assert
(
nm
!=
NULL
,
"sanity"
);
add_strong_code_root
(
nm
);
}
}
void
HeapRegionRemSet
::
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
{
for
(
int
i
=
0
;
i
<
_strong_code_roots_list
->
length
();
i
+=
1
)
{
nmethod
*
nm
=
_strong_code_roots_list
->
at
(
i
);
blk
->
do_code_blob
(
nm
);
}
}
size_t
HeapRegionRemSet
::
strong_code_roots_mem_size
()
{
return
sizeof
(
GrowableArray
<
nmethod
*>
)
+
_strong_code_roots_list
->
max_length
()
*
sizeof
(
nmethod
*
);
}
//-------------------- Iteration --------------------
HeapRegionRemSetIterator
::
HeapRegionRemSetIterator
(
const
HeapRegionRemSet
*
hrrs
)
:
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
浏览文件 @
c09be8a2
...
...
@@ -37,6 +37,7 @@ class HeapRegion;
class
HeapRegionRemSetIterator
;
class
PerRegionTable
;
class
SparsePRT
;
class
nmethod
;
// Essentially a wrapper around SparsePRTCleanupTask. See
// sparsePRT.hpp for more details.
...
...
@@ -191,6 +192,10 @@ private:
G1BlockOffsetSharedArray
*
_bosa
;
G1BlockOffsetSharedArray
*
bosa
()
const
{
return
_bosa
;
}
// A list of code blobs (nmethods) whose code contains pointers into
// the region that owns this RSet.
GrowableArray
<
nmethod
*>*
_strong_code_roots_list
;
OtherRegionsTable
_other_regions
;
enum
ParIterState
{
Unclaimed
,
Claimed
,
Complete
};
...
...
@@ -282,11 +287,13 @@ public:
}
// The actual # of bytes this hr_remset takes up.
// Note also includes the strong code root set.
size_t
mem_size
()
{
return
_other_regions
.
mem_size
()
// This correction is necessary because the above includes the second
// part.
+
sizeof
(
this
)
-
sizeof
(
OtherRegionsTable
);
+
(
sizeof
(
this
)
-
sizeof
(
OtherRegionsTable
))
+
strong_code_roots_mem_size
();
}
// Returns the memory occupancy of all static data structures associated
...
...
@@ -304,6 +311,37 @@ public:
bool
contains_reference
(
OopOrNarrowOopStar
from
)
const
{
return
_other_regions
.
contains_reference
(
from
);
}
// Routines for managing the list of code roots that point into
// the heap region that owns this RSet.
void
add_strong_code_root
(
nmethod
*
nm
);
void
remove_strong_code_root
(
nmethod
*
nm
);
// During a collection, migrate the successfully evacuated strong
// code roots that referenced into the region that owns this RSet
// to the RSets of the new regions that they now point into.
// Unsuccessfully evacuated code roots are not migrated.
void
migrate_strong_code_roots
();
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list
void
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
;
// Returns the number of elements in the strong code roots list
int
strong_code_roots_list_length
()
{
return
_strong_code_roots_list
->
length
();
}
// Returns true if the strong code roots contains the given
// nmethod.
bool
strong_code_roots_list_contains
(
nmethod
*
nm
)
{
return
_strong_code_roots_list
->
contains
(
nm
);
}
// Returns the amount of memory, in bytes, currently
// consumed by the strong code roots.
size_t
strong_code_roots_mem_size
();
void
print
()
const
;
// Called during a stop-world phase to perform any deferred cleanups.
...
...
src/share/vm/gc_interface/collectedHeap.cpp
浏览文件 @
c09be8a2
...
...
@@ -118,6 +118,14 @@ void CollectedHeap::print_heap_after_gc() {
}
}
void
CollectedHeap
::
register_nmethod
(
nmethod
*
nm
)
{
assert_locked_or_safepoint
(
CodeCache_lock
);
}
void
CollectedHeap
::
unregister_nmethod
(
nmethod
*
nm
)
{
assert_locked_or_safepoint
(
CodeCache_lock
);
}
void
CollectedHeap
::
trace_heap
(
GCWhen
::
Type
when
,
GCTracer
*
gc_tracer
)
{
const
GCHeapSummary
&
heap_summary
=
create_heap_summary
();
const
MetaspaceSummary
&
metaspace_summary
=
create_metaspace_summary
();
...
...
src/share/vm/gc_interface/collectedHeap.hpp
浏览文件 @
c09be8a2
...
...
@@ -49,6 +49,7 @@ class MetaspaceSummary;
class
Thread
;
class
ThreadClosure
;
class
VirtualSpaceSummary
;
class
nmethod
;
class
GCMessage
:
public
FormatBuffer
<
1024
>
{
public:
...
...
@@ -603,6 +604,11 @@ class CollectedHeap : public CHeapObj<mtInternal> {
void
print_heap_before_gc
();
void
print_heap_after_gc
();
// Registering and unregistering an nmethod (compiled code) with the heap.
// Override with specific mechanism for each specialized heap type.
virtual
void
register_nmethod
(
nmethod
*
nm
);
virtual
void
unregister_nmethod
(
nmethod
*
nm
);
void
trace_heap_before_gc
(
GCTracer
*
gc_tracer
);
void
trace_heap_after_gc
(
GCTracer
*
gc_tracer
);
...
...
src/share/vm/memory/iterator.cpp
浏览文件 @
c09be8a2
...
...
@@ -64,7 +64,7 @@ void MarkingCodeBlobClosure::do_code_blob(CodeBlob* cb) {
}
void
CodeBlobToOopClosure
::
do_newly_marked_nmethod
(
nmethod
*
nm
)
{
nm
->
oops_do
(
_cl
,
/*
do_strong_roots_only=*/
tru
e
);
nm
->
oops_do
(
_cl
,
/*
allow_zombie=*/
fals
e
);
}
void
CodeBlobToOopClosure
::
do_code_blob
(
CodeBlob
*
cb
)
{
...
...
src/share/vm/oops/klass.hpp
浏览文件 @
c09be8a2
...
...
@@ -352,7 +352,8 @@ class Klass : public Metadata {
static
int
layout_helper_log2_element_size
(
jint
lh
)
{
assert
(
lh
<
(
jint
)
_lh_neutral_value
,
"must be array"
);
int
l2esz
=
(
lh
>>
_lh_log2_element_size_shift
)
&
_lh_log2_element_size_mask
;
assert
(
l2esz
<=
LogBitsPerLong
,
"sanity"
);
assert
(
l2esz
<=
LogBitsPerLong
,
err_msg
(
"sanity. l2esz: 0x%x for lh: 0x%x"
,
(
uint
)
l2esz
,
(
uint
)
lh
));
return
l2esz
;
}
static
jint
array_layout_helper
(
jint
tag
,
int
hsize
,
BasicType
etype
,
int
log2_esize
)
{
...
...
src/share/vm/runtime/sweeper.hpp
浏览文件 @
c09be8a2
...
...
@@ -83,6 +83,7 @@ class NMethodSweeper : public AllStatic {
static
jlong
peak_disconnect_time
()
{
return
_peak_disconnect_time
;
}
#ifdef ASSERT
static
bool
is_sweeping
(
nmethod
*
which
)
{
return
_current
==
which
;
}
// Keep track of sweeper activity in the ring buffer
static
void
record_sweep
(
nmethod
*
nm
,
int
line
);
static
void
report_events
(
int
id
,
address
entry
);
...
...
src/share/vm/services/memoryPool.cpp
浏览文件 @
c09be8a2
...
...
@@ -268,11 +268,11 @@ MemoryUsage MetaspacePool::get_memory_usage() {
}
size_t
MetaspacePool
::
used_in_bytes
()
{
return
MetaspaceAux
::
allocated_used_bytes
(
Metaspace
::
NonClassType
);
return
MetaspaceAux
::
allocated_used_bytes
();
}
size_t
MetaspacePool
::
capacity_in_bytes
()
const
{
return
MetaspaceAux
::
allocated_capacity_bytes
(
Metaspace
::
NonClassType
);
return
MetaspaceAux
::
allocated_capacity_bytes
();
}
size_t
MetaspacePool
::
calculate_max_size
()
const
{
...
...
src/share/vm/utilities/growableArray.hpp
浏览文件 @
c09be8a2
...
...
@@ -194,6 +194,7 @@ template<class E> class GrowableArray : public GenericGrowableArray {
void
clear
()
{
_len
=
0
;
}
int
length
()
const
{
return
_len
;
}
int
max_length
()
const
{
return
_max
;
}
void
trunc_to
(
int
l
)
{
assert
(
l
<=
_len
,
"cannot increase length"
);
_len
=
l
;
}
bool
is_empty
()
const
{
return
_len
==
0
;
}
bool
is_nonempty
()
const
{
return
_len
!=
0
;
}
...
...
src/share/vm/utilities/taskqueue.hpp
浏览文件 @
c09be8a2
/*
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
3
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -132,6 +132,8 @@ void TaskQueueStats::reset() {
}
#endif // TASKQUEUE_STATS
// TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
template
<
unsigned
int
N
,
MEMFLAGS
F
>
class
TaskQueueSuper
:
public
CHeapObj
<
F
>
{
protected:
...
...
@@ -249,7 +251,36 @@ public:
TASKQUEUE_STATS_ONLY
(
TaskQueueStats
stats
;)
};
//
// GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
// ended-queue (deque), intended for use in work stealing. Queue operations
// are non-blocking.
//
// A queue owner thread performs push() and pop_local() operations on one end
// of the queue, while other threads may steal work using the pop_global()
// method.
//
// The main difference to the original algorithm is that this
// implementation allows wrap-around at the end of its allocated
// storage, which is an array.
//
// The original paper is:
//
// Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
// Thread scheduling for multiprogrammed multiprocessors.
// Theory of Computing Systems 34, 2 (2001), 115-144.
//
// The following paper provides an correctness proof and an
// implementation for weakly ordered memory models including (pseudo-)
// code containing memory barriers for a Chase-Lev deque. Chase-Lev is
// similar to ABP, with the main difference that it allows resizing of the
// underlying storage:
//
// Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
// Correct and efficient work-stealing for weak memory models
// Proceedings of the 18th ACM SIGPLAN symposium on Principles and
// practice of parallel programming (PPoPP 2013), 69-80
//
template
<
class
E
,
MEMFLAGS
F
,
unsigned
int
N
=
TASKQUEUE_SIZE
>
class
GenericTaskQueue
:
public
TaskQueueSuper
<
N
,
F
>
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录