Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
1c7f20f7
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1c7f20f7
编写于
7月 06, 2011
作者:
J
jcoomes
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
e02ba138
cf62aa47
变更
69
展开全部
隐藏空白更改
内联
并排
Showing
69 changed file
with
2898 addition
and
1625 deletion
+2898
-1625
src/share/vm/classfile/javaClasses.cpp
src/share/vm/classfile/javaClasses.cpp
+1
-28
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+0
-2
src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
...re/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
+0
-6
src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp
...are/vm/gc_implementation/concurrentMarkSweep/freeList.cpp
+15
-2
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+446
-394
src/share/vm/gc_implementation/g1/concurrentMark.hpp
src/share/vm/gc_implementation/g1/concurrentMark.hpp
+37
-44
src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
+156
-0
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+431
-258
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+94
-64
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+8
-10
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+43
-47
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+6
-0
src/share/vm/gc_implementation/g1/g1HRPrinter.cpp
src/share/vm/gc_implementation/g1/g1HRPrinter.cpp
+112
-0
src/share/vm/gc_implementation/g1/g1HRPrinter.hpp
src/share/vm/gc_implementation/g1/g1HRPrinter.hpp
+182
-0
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+23
-5
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
+15
-40
src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
+15
-23
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+2
-79
src/share/vm/gc_implementation/g1/g1RemSet.hpp
src/share/vm/gc_implementation/g1/g1RemSet.hpp
+0
-2
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
+0
-6
src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp
...e/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp
+3
-5
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+24
-28
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+25
-17
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+19
-13
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
+5
-3
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+204
-243
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
+113
-62
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
+33
-14
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
+1
-1
src/share/vm/gc_implementation/g1/heapRegionSets.cpp
src/share/vm/gc_implementation/g1/heapRegionSets.cpp
+11
-0
src/share/vm/gc_implementation/g1/heapRegionSets.hpp
src/share/vm/gc_implementation/g1/heapRegionSets.hpp
+2
-1
src/share/vm/gc_implementation/g1/sparsePRT.cpp
src/share/vm/gc_implementation/g1/sparsePRT.cpp
+5
-4
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+12
-0
src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
...hare/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
+25
-9
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
..._implementation/parallelScavenge/parallelScavengeHeap.cpp
+16
-26
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
..._implementation/parallelScavenge/parallelScavengeHeap.hpp
+7
-8
src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
+5
-6
src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
+3
-4
src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp
...share/vm/gc_implementation/parallelScavenge/psPermGen.cpp
+2
-2
src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
...hare/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
+1
-1
src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
.../vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
+2
-3
src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp
.../vm/gc_implementation/parallelScavenge/vmPSOperations.hpp
+2
-4
src/share/vm/gc_implementation/shared/allocationStats.hpp
src/share/vm/gc_implementation/shared/allocationStats.hpp
+9
-7
src/share/vm/gc_implementation/shared/concurrentGCThread.cpp
src/share/vm/gc_implementation/shared/concurrentGCThread.cpp
+0
-11
src/share/vm/gc_implementation/shared/concurrentGCThread.hpp
src/share/vm/gc_implementation/shared/concurrentGCThread.hpp
+0
-2
src/share/vm/gc_interface/collectedHeap.hpp
src/share/vm/gc_interface/collectedHeap.hpp
+7
-12
src/share/vm/gc_interface/collectedHeap.inline.hpp
src/share/vm/gc_interface/collectedHeap.inline.hpp
+5
-21
src/share/vm/memory/collectorPolicy.cpp
src/share/vm/memory/collectorPolicy.cpp
+0
-4
src/share/vm/memory/collectorPolicy.hpp
src/share/vm/memory/collectorPolicy.hpp
+1
-4
src/share/vm/memory/genCollectedHeap.cpp
src/share/vm/memory/genCollectedHeap.cpp
+5
-13
src/share/vm/memory/genCollectedHeap.hpp
src/share/vm/memory/genCollectedHeap.hpp
+2
-6
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+1
-1
src/share/vm/memory/universe.hpp
src/share/vm/memory/universe.hpp
+10
-1
src/share/vm/oops/methodOop.cpp
src/share/vm/oops/methodOop.cpp
+21
-58
src/share/vm/oops/typeArrayKlass.cpp
src/share/vm/oops/typeArrayKlass.cpp
+1
-5
src/share/vm/prims/jni.cpp
src/share/vm/prims/jni.cpp
+14
-0
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+26
-0
src/share/vm/runtime/atomic.cpp
src/share/vm/runtime/atomic.cpp
+10
-0
src/share/vm/runtime/atomic.hpp
src/share/vm/runtime/atomic.hpp
+2
-0
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+17
-0
src/share/vm/runtime/java.cpp
src/share/vm/runtime/java.cpp
+4
-6
src/share/vm/runtime/safepoint.cpp
src/share/vm/runtime/safepoint.cpp
+5
-0
src/share/vm/runtime/thread.cpp
src/share/vm/runtime/thread.cpp
+8
-0
src/share/vm/utilities/bitMap.hpp
src/share/vm/utilities/bitMap.hpp
+4
-4
src/share/vm/utilities/ostream.cpp
src/share/vm/utilities/ostream.cpp
+87
-4
src/share/vm/utilities/ostream.hpp
src/share/vm/utilities/ostream.hpp
+18
-2
src/share/vm/utilities/quickSort.cpp
src/share/vm/utilities/quickSort.cpp
+218
-0
src/share/vm/utilities/quickSort.hpp
src/share/vm/utilities/quickSort.hpp
+138
-0
test/gc/6941923/test6941923.sh
test/gc/6941923/test6941923.sh
+179
-0
未找到文件。
src/share/vm/classfile/javaClasses.cpp
浏览文件 @
1c7f20f7
...
...
@@ -1258,7 +1258,6 @@ class BacktraceBuilder: public StackObj {
objArrayOop
_methods
;
typeArrayOop
_bcis
;
int
_index
;
bool
_dirty
;
No_Safepoint_Verifier
_nsv
;
public:
...
...
@@ -1272,37 +1271,13 @@ class BacktraceBuilder: public StackObj {
};
// constructor for new backtrace
BacktraceBuilder
(
TRAPS
)
:
_methods
(
NULL
),
_bcis
(
NULL
),
_head
(
NULL
)
,
_dirty
(
false
)
{
BacktraceBuilder
(
TRAPS
)
:
_methods
(
NULL
),
_bcis
(
NULL
),
_head
(
NULL
)
{
expand
(
CHECK
);
_backtrace
=
_head
;
_index
=
0
;
}
void
flush
()
{
// The following appears to have been an optimization to save from
// doing a barrier for each individual store into the _methods array,
// but rather to do it for the entire array after the series of writes.
// That optimization seems to have been lost when compressed oops was
// implemented. However, the extra card-marks below was left in place,
// but is now redundant because the individual stores into the
// _methods array already execute the barrier code. CR 6918185 has
// been filed so the original code may be restored by deferring the
// barriers until after the entire sequence of stores, thus re-enabling
// the intent of the original optimization. In the meantime the redundant
// card mark below is now disabled.
if
(
_dirty
&&
_methods
!=
NULL
)
{
#if 0
BarrierSet* bs = Universe::heap()->barrier_set();
assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
bs->write_ref_array((HeapWord*)_methods->base(), _methods->length());
#endif
_dirty
=
false
;
}
}
void
expand
(
TRAPS
)
{
flush
();
objArrayHandle
old_head
(
THREAD
,
_head
);
Pause_No_Safepoint_Verifier
pnsv
(
&
_nsv
);
...
...
@@ -1328,7 +1303,6 @@ class BacktraceBuilder: public StackObj {
}
oop
backtrace
()
{
flush
();
return
_backtrace
();
}
...
...
@@ -1342,7 +1316,6 @@ class BacktraceBuilder: public StackObj {
_methods
->
obj_at_put
(
_index
,
method
);
_bcis
->
ushort_at_put
(
_index
,
bci
);
_index
++
;
_dirty
=
true
;
}
methodOop
current_method
()
{
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
浏览文件 @
1c7f20f7
...
...
@@ -1833,8 +1833,6 @@ CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
}
)
_indexedFreeList
[
size
].
removeChunk
(
fc
);
debug_only
(
fc
->
clearNext
());
debug_only
(
fc
->
clearPrev
());
NOT_PRODUCT
(
if
(
FLSVerifyIndexTable
)
{
verifyIndexedFreeList
(
size
);
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp
浏览文件 @
1c7f20f7
...
...
@@ -114,17 +114,11 @@ class FreeChunk VALUE_OBJ_CLASS_SPEC {
linkNext
(
ptr
);
if
(
ptr
!=
NULL
)
ptr
->
linkPrev
(
this
);
}
void
linkAfterNonNull
(
FreeChunk
*
ptr
)
{
assert
(
ptr
!=
NULL
,
"precondition violation"
);
linkNext
(
ptr
);
ptr
->
linkPrev
(
this
);
}
void
linkNext
(
FreeChunk
*
ptr
)
{
_next
=
ptr
;
}
void
linkPrev
(
FreeChunk
*
ptr
)
{
LP64_ONLY
(
if
(
UseCompressedOops
)
_prev
=
ptr
;
else
)
_prev
=
(
FreeChunk
*
)((
intptr_t
)
ptr
|
0x1
);
}
void
clearPrev
()
{
_prev
=
NULL
;
}
void
clearNext
()
{
_next
=
NULL
;
}
void
markNotFree
()
{
// Set _prev (klass) to null before (if) clearing the mark word below
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp
浏览文件 @
1c7f20f7
...
...
@@ -300,8 +300,21 @@ void FreeList::verify_stats() const {
// dictionary for example, this might be the first block and
// in that case there would be no place that we could record
// the stats (which are kept in the block itself).
assert
(
_allocation_stats
.
prevSweep
()
+
_allocation_stats
.
splitBirths
()
+
1
// Total Stock + 1
>=
_allocation_stats
.
splitDeaths
()
+
(
ssize_t
)
count
(),
"Conservation Principle"
);
assert
((
_allocation_stats
.
prevSweep
()
+
_allocation_stats
.
splitBirths
()
+
_allocation_stats
.
coalBirths
()
+
1
)
// Total Production Stock + 1
>=
(
_allocation_stats
.
splitDeaths
()
+
_allocation_stats
.
coalDeaths
()
+
(
ssize_t
)
count
()),
// Total Current Stock + depletion
err_msg
(
"FreeList "
PTR_FORMAT
" of size "
SIZE_FORMAT
" violates Conservation Principle: "
"prevSweep("
SIZE_FORMAT
")"
" + splitBirths("
SIZE_FORMAT
")"
" + coalBirths("
SIZE_FORMAT
") + 1 >= "
" splitDeaths("
SIZE_FORMAT
")"
" coalDeaths("
SIZE_FORMAT
")"
" + count("
SSIZE_FORMAT
")"
,
this
,
_size
,
_allocation_stats
.
prevSweep
(),
_allocation_stats
.
splitBirths
(),
_allocation_stats
.
splitBirths
(),
_allocation_stats
.
splitDeaths
(),
_allocation_stats
.
coalDeaths
(),
count
()));
}
void
FreeList
::
assert_proper_lock_protection_work
()
const
{
...
...
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/concurrentMark.hpp
浏览文件 @
1c7f20f7
...
...
@@ -131,22 +131,22 @@ class CMBitMap : public CMBitMapRO {
void
mark
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
_bm
.
at_put
(
heapWordToOffset
(
addr
),
true
);
_bm
.
set_bit
(
heapWordToOffset
(
addr
)
);
}
void
clear
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
_bm
.
at_put
(
heapWordToOffset
(
addr
),
false
);
_bm
.
clear_bit
(
heapWordToOffset
(
addr
)
);
}
bool
parMark
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
return
_bm
.
par_
at_put
(
heapWordToOffset
(
addr
),
true
);
return
_bm
.
par_
set_bit
(
heapWordToOffset
(
addr
)
);
}
bool
parClear
(
HeapWord
*
addr
)
{
assert
(
_bmStartWord
<=
addr
&&
addr
<
(
_bmStartWord
+
_bmWordSize
),
"outside underlying space?"
);
return
_bm
.
par_
at_put
(
heapWordToOffset
(
addr
),
false
);
return
_bm
.
par_
clear_bit
(
heapWordToOffset
(
addr
)
);
}
void
markRange
(
MemRegion
mr
);
void
clearAll
();
...
...
@@ -605,10 +605,10 @@ public:
void
mark_stack_pop
(
oop
*
arr
,
int
max
,
int
*
n
)
{
_markStack
.
par_pop_arr
(
arr
,
max
,
n
);
}
size_t
mark_stack_size
()
{
return
_markStack
.
size
();
}
size_t
mark_stack_size
()
{
return
_markStack
.
size
();
}
size_t
partial_mark_stack_size_target
()
{
return
_markStack
.
maxElems
()
/
3
;
}
bool
mark_stack_overflow
()
{
return
_markStack
.
overflow
();
}
bool
mark_stack_empty
()
{
return
_markStack
.
isEmpty
();
}
bool
mark_stack_overflow
()
{
return
_markStack
.
overflow
();
}
bool
mark_stack_empty
()
{
return
_markStack
.
isEmpty
();
}
// (Lock-free) Manipulation of the region stack
bool
region_stack_push_lock_free
(
MemRegion
mr
)
{
...
...
@@ -736,12 +736,14 @@ public:
// will dump the contents of its reference fields, as well as
// liveness information for the object and its referents. The dump
// will be written to a file with the following name:
// G1PrintReachableBaseFile + "." + str. use_prev_marking decides
// whether the prev (use_prev_marking == true) or next
// (use_prev_marking == false) marking information will be used to
// determine the liveness of each object / referent. If all is true,
// all objects in the heap will be dumped, otherwise only the live
// ones. In the dump the following symbols / abbreviations are used:
// G1PrintReachableBaseFile + "." + str.
// vo decides whether the prev (vo == UsePrevMarking), the next
// (vo == UseNextMarking) marking information, or the mark word
// (vo == UseMarkWord) will be used to determine the liveness of
// each object / referent.
// If all is true, all objects in the heap will be dumped, otherwise
// only the live ones. In the dump the following symbols / breviations
// are used:
// M : an explicitly live object (its bitmap bit is set)
// > : an implicitly live object (over tams)
// O : an object outside the G1 heap (typically: in the perm gen)
...
...
@@ -749,7 +751,7 @@ public:
// AND MARKED : indicates that an object is both explicitly and
// implicitly live (it should be one or the other, not both)
void
print_reachable
(
const
char
*
str
,
bool
use_prev_marking
,
bool
all
)
PRODUCT_RETURN
;
VerifyOption
vo
,
bool
all
)
PRODUCT_RETURN
;
// Clear the next marking bitmap (will be called concurrently).
void
clearNextBitmap
();
...
...
@@ -831,8 +833,9 @@ public:
// _min_finger then we need to gray objects.
// This routine is like registerCSetRegion but for an entire
// collection of regions.
if
(
max_finger
>
_min_finger
)
if
(
max_finger
>
_min_finger
)
{
_should_gray_objects
=
true
;
}
}
// Returns "true" if at least one mark has been completed.
...
...
@@ -878,14 +881,18 @@ public:
// The following indicate whether a given verbose level has been
// set. Notice that anything above stats is conditional to
// _MARKING_VERBOSE_ having been set to 1
bool
verbose_stats
()
{
return
_verbose_level
>=
stats_verbose
;
}
bool
verbose_low
()
{
return
_MARKING_VERBOSE_
&&
_verbose_level
>=
low_verbose
;
}
bool
verbose_medium
()
{
return
_MARKING_VERBOSE_
&&
_verbose_level
>=
medium_verbose
;
}
bool
verbose_high
()
{
return
_MARKING_VERBOSE_
&&
_verbose_level
>=
high_verbose
;
}
bool
verbose_stats
()
{
return
_verbose_level
>=
stats_verbose
;
}
bool
verbose_low
()
{
return
_MARKING_VERBOSE_
&&
_verbose_level
>=
low_verbose
;
}
bool
verbose_medium
()
{
return
_MARKING_VERBOSE_
&&
_verbose_level
>=
medium_verbose
;
}
bool
verbose_high
()
{
return
_MARKING_VERBOSE_
&&
_verbose_level
>=
high_verbose
;
}
};
// A class representing a marking task.
...
...
@@ -928,7 +935,7 @@ private:
double
_start_time_ms
;
// the oop closure used for iterations over oops
OopClosure
*
_oop_closure
;
G1CMOopClosure
*
_cm
_oop_closure
;
// the region this task is scanning, NULL if we're not scanning any
HeapRegion
*
_curr_region
;
...
...
@@ -1061,8 +1068,9 @@ private:
// respective limit and calls reached_limit() if they have
void
check_limits
()
{
if
(
_words_scanned
>=
_words_scanned_limit
||
_refs_reached
>=
_refs_reached_limit
)
_refs_reached
>=
_refs_reached_limit
)
{
reached_limit
();
}
}
// this is supposed to be called regularly during a marking step as
// it checks a bunch of conditions that might cause the marking step
...
...
@@ -1122,32 +1130,17 @@ public:
// Clears any recorded partially scanned region
void
clear_aborted_region
()
{
set_aborted_region
(
MemRegion
());
}
void
set_oop_closure
(
OopClosure
*
oop_closure
)
{
_oop_closure
=
oop_closure
;
}
void
set_cm_oop_closure
(
G1CMOopClosure
*
cm_oop_closure
);
// It grays the object by marking it and, if necessary, pushing it
// on the local queue
void
deal_with_reference
(
oop
obj
);
inline
void
deal_with_reference
(
oop
obj
);
// It scans an object and visits its children.
void
scan_object
(
oop
obj
)
{
assert
(
_nextMarkBitMap
->
isMarked
((
HeapWord
*
)
obj
),
"invariant"
);
if
(
_cm
->
verbose_high
())
gclog_or_tty
->
print_cr
(
"[%d] we're scanning object "
PTR_FORMAT
,
_task_id
,
(
void
*
)
obj
);
size_t
obj_size
=
obj
->
size
();
_words_scanned
+=
obj_size
;
obj
->
oop_iterate
(
_oop_closure
);
statsOnly
(
++
_objs_scanned
);
check_limits
();
}
void
scan_object
(
oop
obj
);
// It pushes an object on the local queue.
void
push
(
oop
obj
);
inline
void
push
(
oop
obj
);
// These two move entries to/from the global stack.
void
move_entries_to_global_stack
();
...
...
src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
0 → 100644
浏览文件 @
1c7f20f7
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
inline
void
CMTask
::
push
(
oop
obj
)
{
HeapWord
*
objAddr
=
(
HeapWord
*
)
obj
;
assert
(
_g1h
->
is_in_g1_reserved
(
objAddr
),
"invariant"
);
assert
(
!
_g1h
->
is_on_master_free_list
(
_g1h
->
heap_region_containing
((
HeapWord
*
)
objAddr
)),
"invariant"
);
assert
(
!
_g1h
->
is_obj_ill
(
obj
),
"invariant"
);
assert
(
_nextMarkBitMap
->
isMarked
(
objAddr
),
"invariant"
);
if
(
_cm
->
verbose_high
())
{
gclog_or_tty
->
print_cr
(
"[%d] pushing "
PTR_FORMAT
,
_task_id
,
(
void
*
)
obj
);
}
if
(
!
_task_queue
->
push
(
obj
))
{
// The local task queue looks full. We need to push some entries
// to the global stack.
if
(
_cm
->
verbose_medium
())
{
gclog_or_tty
->
print_cr
(
"[%d] task queue overflow, "
"moving entries to the global stack"
,
_task_id
);
}
move_entries_to_global_stack
();
// this should succeed since, even if we overflow the global
// stack, we should have definitely removed some entries from the
// local queue. So, there must be space on it.
bool
success
=
_task_queue
->
push
(
obj
);
assert
(
success
,
"invariant"
);
}
statsOnly
(
int
tmp_size
=
_task_queue
->
size
();
if
(
tmp_size
>
_local_max_size
)
{
_local_max_size
=
tmp_size
;
}
++
_local_pushes
);
}
// This determines whether the method below will check both the local
// and global fingers when determining whether to push on the stack a
// gray object (value 1) or whether it will only check the global one
// (value 0). The tradeoffs are that the former will be a bit more
// accurate and possibly push less on the stack, but it might also be
// a little bit slower.
#define _CHECK_BOTH_FINGERS_ 1
inline
void
CMTask
::
deal_with_reference
(
oop
obj
)
{
if
(
_cm
->
verbose_high
())
{
gclog_or_tty
->
print_cr
(
"[%d] we're dealing with reference = "
PTR_FORMAT
,
_task_id
,
(
void
*
)
obj
);
}
++
_refs_reached
;
HeapWord
*
objAddr
=
(
HeapWord
*
)
obj
;
assert
(
obj
->
is_oop_or_null
(
true
/* ignore mark word */
),
"Error"
);
if
(
_g1h
->
is_in_g1_reserved
(
objAddr
))
{
assert
(
obj
!=
NULL
,
"null check is implicit"
);
if
(
!
_nextMarkBitMap
->
isMarked
(
objAddr
))
{
// Only get the containing region if the object is not marked on the
// bitmap (otherwise, it's a waste of time since we won't do
// anything with it).
HeapRegion
*
hr
=
_g1h
->
heap_region_containing_raw
(
obj
);
if
(
!
hr
->
obj_allocated_since_next_marking
(
obj
))
{
if
(
_cm
->
verbose_high
())
{
gclog_or_tty
->
print_cr
(
"[%d] "
PTR_FORMAT
" is not considered marked"
,
_task_id
,
(
void
*
)
obj
);
}
// we need to mark it first
if
(
_nextMarkBitMap
->
parMark
(
objAddr
))
{
// No OrderAccess:store_load() is needed. It is implicit in the
// CAS done in parMark(objAddr) above
HeapWord
*
global_finger
=
_cm
->
finger
();
#if _CHECK_BOTH_FINGERS_
// we will check both the local and global fingers
if
(
_finger
!=
NULL
&&
objAddr
<
_finger
)
{
if
(
_cm
->
verbose_high
())
{
gclog_or_tty
->
print_cr
(
"[%d] below the local finger ("
PTR_FORMAT
"), "
"pushing it"
,
_task_id
,
_finger
);
}
push
(
obj
);
}
else
if
(
_curr_region
!=
NULL
&&
objAddr
<
_region_limit
)
{
// do nothing
}
else
if
(
objAddr
<
global_finger
)
{
// Notice that the global finger might be moving forward
// concurrently. This is not a problem. In the worst case, we
// mark the object while it is above the global finger and, by
// the time we read the global finger, it has moved forward
// passed this object. In this case, the object will probably
// be visited when a task is scanning the region and will also
// be pushed on the stack. So, some duplicate work, but no
// correctness problems.
if
(
_cm
->
verbose_high
())
{
gclog_or_tty
->
print_cr
(
"[%d] below the global finger "
"("
PTR_FORMAT
"), pushing it"
,
_task_id
,
global_finger
);
}
push
(
obj
);
}
else
{
// do nothing
}
#else // _CHECK_BOTH_FINGERS_
// we will only check the global finger
if
(
objAddr
<
global_finger
)
{
// see long comment above
if
(
_cm
->
verbose_high
())
{
gclog_or_tty
->
print_cr
(
"[%d] below the global finger "
"("
PTR_FORMAT
"), pushing it"
,
_task_id
,
global_finger
);
}
push
(
obj
);
}
#endif // _CHECK_BOTH_FINGERS_
}
}
}
}
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
1c7f20f7
...
...
@@ -27,8 +27,10 @@
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
...
...
@@ -42,7 +44,6 @@
// heap subsets that will yield large amounts of garbage.
class
HeapRegion
;
class
HeapRegionSeq
;
class
HRRSCleanupTask
;
class
PermanentGenerationSpec
;
class
GenerationSpec
;
...
...
@@ -103,6 +104,19 @@ public:
size_t
length
()
{
return
_length
;
}
size_t
survivor_length
()
{
return
_survivor_length
;
}
// Currently we do not keep track of the used byte sum for the
// young list and the survivors and it'd be quite a lot of work to
// do so. When we'll eventually replace the young list with
// instances of HeapRegionLinkedList we'll get that for free. So,
// we'll report the more accurate information then.
size_t
eden_used_bytes
()
{
assert
(
length
()
>=
survivor_length
(),
"invariant"
);
return
(
length
()
-
survivor_length
())
*
HeapRegion
::
GrainBytes
;
}
size_t
survivor_used_bytes
()
{
return
survivor_length
()
*
HeapRegion
::
GrainBytes
;
}
void
rs_length_sampling_init
();
bool
rs_length_sampling_more
();
void
rs_length_sampling_next
();
...
...
@@ -183,9 +197,6 @@ private:
// The part of _g1_storage that is currently committed.
MemRegion
_g1_committed
;
// The maximum part of _g1_storage that has ever been committed.
MemRegion
_g1_max_committed
;
// The master free list. It will satisfy all new region allocations.
MasterFreeRegionList
_free_list
;
...
...
@@ -209,7 +220,7 @@ private:
void
rebuild_region_lists
();
// The sequence of all heap regions in the heap.
HeapRegionSeq
*
_hrs
;
HeapRegionSeq
_hrs
;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion
_mutator_alloc_region
;
...
...
@@ -288,6 +299,8 @@ private:
size_t
*
_surviving_young_words
;
G1HRPrinter
_hr_printer
;
void
setup_surviving_young_words
();
void
update_surviving_young_words
(
size_t
*
surv_young_words
);
void
cleanup_surviving_young_words
();
...
...
@@ -408,13 +421,15 @@ protected:
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful.
int
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
);
// index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful.
size_t
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord
*
humongous_obj_allocate_initialize_regions
(
in
t
first
,
HeapWord
*
humongous_obj_allocate_initialize_regions
(
size_
t
first
,
size_t
num_regions
,
size_t
word_size
);
...
...
@@ -434,8 +449,7 @@ protected:
// * All allocation requests for new TLABs should go to
// allocate_new_tlab().
//
// * All non-TLAB allocation requests should go to mem_allocate()
// and mem_allocate() should never be called with is_tlab == true.
// * All non-TLAB allocation requests should go to mem_allocate().
//
// * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If
...
...
@@ -455,8 +469,6 @@ protected:
virtual
HeapWord
*
allocate_new_tlab
(
size_t
word_size
);
virtual
HeapWord
*
mem_allocate
(
size_t
word_size
,
bool
is_noref
,
bool
is_tlab
,
/* expected to be false */
bool
*
gc_overhead_limit_was_exceeded
);
// The following three methods take a gc_count_before_ret
...
...
@@ -574,8 +586,8 @@ public:
void
register_region_with_in_cset_fast_test
(
HeapRegion
*
r
)
{
assert
(
_in_cset_fast_test_base
!=
NULL
,
"sanity"
);
assert
(
r
->
in_collection_set
(),
"invariant"
);
in
t
index
=
r
->
hrs_index
();
assert
(
0
<=
index
&&
(
size_t
)
index
<
_in_cset_fast_test_length
,
"invariant"
);
size_
t
index
=
r
->
hrs_index
();
assert
(
index
<
_in_cset_fast_test_length
,
"invariant"
);
assert
(
!
_in_cset_fast_test_base
[
index
],
"invariant"
);
_in_cset_fast_test_base
[
index
]
=
true
;
}
...
...
@@ -626,6 +638,8 @@ public:
return
_full_collections_completed
;
}
G1HRPrinter
*
hr_printer
()
{
return
&
_hr_printer
;
}
protected:
// Shrink the garbage-first heap by at most the given size (in bytes!).
...
...
@@ -741,6 +755,11 @@ protected:
HumongousRegionSet
*
humongous_proxy_set
,
bool
par
);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void
update_committed_space
(
HeapWord
*
old_end
,
HeapWord
*
new_end
);
// The concurrent marker (and the thread it runs in.)
ConcurrentMark
*
_cm
;
ConcurrentMarkThread
*
_cmThread
;
...
...
@@ -803,7 +822,6 @@ protected:
oop
handle_evacuation_failure_par
(
OopsInHeapRegionClosure
*
cl
,
oop
obj
);
void
handle_evacuation_failure_common
(
oop
obj
,
markOop
m
);
// Ensure that the relevant gc_alloc regions are set.
void
get_gc_alloc_regions
();
// We're done with GC alloc regions. We are going to tear down the
...
...
@@ -954,15 +972,13 @@ public:
}
// The total number of regions in the heap.
size_t
n_regions
()
;
size_t
n_regions
()
{
return
_hrs
.
length
();
}
// The
number of regions that are completely free
.
size_t
max_regions
()
;
// The
max number of regions in the heap
.
size_t
max_regions
()
{
return
_hrs
.
max_length
();
}
// The number of regions that are completely free.
size_t
free_regions
()
{
return
_free_list
.
length
();
}
size_t
free_regions
()
{
return
_free_list
.
length
();
}
// The number of regions that are not completely free.
size_t
used_regions
()
{
return
n_regions
()
-
free_regions
();
}
...
...
@@ -970,6 +986,10 @@ public:
// The number of regions available for "regular" expansion.
size_t
expansion_regions
()
{
return
_expansion_regions
;
}
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion
*
new_heap_region
(
size_t
hrs_index
,
HeapWord
*
bottom
);
void
verify_not_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
void
verify_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
void
verify_dirty_young_list
(
HeapRegion
*
head
)
PRODUCT_RETURN
;
...
...
@@ -1131,17 +1151,15 @@ public:
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void
heap_region_iterate
(
HeapRegionClosure
*
blk
);
void
heap_region_iterate
(
HeapRegionClosure
*
blk
)
const
;
// Iterate over heap regions starting with r (or the first region if "r"
// is NULL), in address order, terminating early if the "doHeapRegion"
// method returns "true".
void
heap_region_iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
);
// As above but starting from the region at index idx.
void
heap_region_iterate_from
(
int
idx
,
HeapRegionClosure
*
blk
);
void
heap_region_iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
)
const
;
HeapRegion
*
region_at
(
size_t
idx
);
// Return the region with the given index. It assumes the index is valid.
HeapRegion
*
region_at
(
size_t
index
)
const
{
return
_hrs
.
at
(
index
);
}
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
...
...
@@ -1182,12 +1200,14 @@ public:
// A G1CollectedHeap will contain some number of heap regions. This
// finds the region containing a given address, or else returns NULL.
HeapRegion
*
heap_region_containing
(
const
void
*
addr
)
const
;
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing
(
const
T
addr
)
const
;
// Like the above, but requires "addr" to be in the heap (to avoid a
// null-check), and unlike the above, may return an continuing humongous
// region.
HeapRegion
*
heap_region_containing_raw
(
const
void
*
addr
)
const
;
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing_raw
(
const
T
addr
)
const
;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
...
...
@@ -1249,7 +1269,7 @@ public:
return
true
;
}
bool
is_in_young
(
oop
obj
)
{
bool
is_in_young
(
const
oop
obj
)
{
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
return
hr
!=
NULL
&&
hr
->
is_young
();
}
...
...
@@ -1286,10 +1306,6 @@ public:
return
true
;
}
// The boundary between a "large" and "small" array of primitives, in
// words.
virtual
size_t
large_typearray_limit
();
// Returns "true" iff the given word_size is "very large".
static
bool
isHumongous
(
size_t
word_size
)
{
// Note this has to be strictly greater-than as the TLABs
...
...
@@ -1329,14 +1345,20 @@ public:
// Perform verification.
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use
// use_prev_marking == true. Currently, there is only one case where
// this is called with use_prev_marking == false, which is to verify
// the "next" marking information at the end of remark.
void
verify
(
bool
allow_dirty
,
bool
silent
,
bool
use_prev_marking
);
// vo == UsePrevMarking.
// Currently, there is only one case where this is called with
// vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void
verify
(
bool
allow_dirty
,
bool
silent
,
VerifyOption
vo
);
// Override; it uses the "prev" marking information
virtual
void
verify
(
bool
allow_dirty
,
bool
silent
);
...
...
@@ -1355,10 +1377,9 @@ public:
// Override
void
print_tracing_info
()
const
;
// If "addr" is a pointer into the (reserved?) heap, returns a positive
// number indicating the "arena" within the heap in which "addr" falls.
// Or else returns 0.
virtual
int
addr_to_arena_id
(
void
*
addr
)
const
;
// The following two methods are helpful for debugging RSet issues.
void
print_cset_rsets
()
PRODUCT_RETURN
;
void
print_all_rsets
()
PRODUCT_RETURN
;
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
...
...
@@ -1389,24 +1410,27 @@ public:
// bitmap off to the side.
void
doConcurrentMark
();
// This is called from the marksweep collector which then does
// a concurrent mark and verifies that the results agree with
// the stop the world marking.
void
checkConcurrentMark
();
// Do a full concurrent marking, synchronously.
void
do_sync_mark
();
bool
isMarkedPrev
(
oop
obj
)
const
;
bool
isMarkedNext
(
oop
obj
)
const
;
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information,
// vo == UseMarkWord -> use mark word from object header
bool
is_obj_dead_cond
(
const
oop
obj
,
const
HeapRegion
*
hr
,
const
bool
use_prev_marking
)
const
{
if
(
use_prev_marking
)
{
return
is_obj_dead
(
obj
,
hr
);
}
else
{
return
is_obj_ill
(
obj
,
hr
);
const
VerifyOption
vo
)
const
{
switch
(
vo
)
{
case
VerifyOption_G1UsePrevMarking
:
return
is_obj_dead
(
obj
,
hr
);
case
VerifyOption_G1UseNextMarking
:
return
is_obj_ill
(
obj
,
hr
);
default:
assert
(
vo
==
VerifyOption_G1UseMarkWord
,
"must be"
);
return
!
obj
->
is_gc_marked
();
}
}
...
...
@@ -1447,18 +1471,24 @@ public:
// Added if it is in permanent gen it isn't dead.
// Added if it is NULL it isn't dead.
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information,
// vo == UseMarkWord -> use mark word from object header
bool
is_obj_dead_cond
(
const
oop
obj
,
const
bool
use_prev_marking
)
{
if
(
use_prev_marking
)
{
return
is_obj_dead
(
obj
);
}
else
{
return
is_obj_ill
(
obj
);
const
VerifyOption
vo
)
const
{
switch
(
vo
)
{
case
VerifyOption_G1UsePrevMarking
:
return
is_obj_dead
(
obj
);
case
VerifyOption_G1UseNextMarking
:
return
is_obj_ill
(
obj
);
default:
assert
(
vo
==
VerifyOption_G1UseMarkWord
,
"must be"
);
return
!
obj
->
is_gc_marked
();
}
}
bool
is_obj_dead
(
const
oop
obj
)
{
bool
is_obj_dead
(
const
oop
obj
)
const
{
const
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
if
(
hr
==
NULL
)
{
if
(
Universe
::
heap
()
->
is_in_permanent
(
obj
))
...
...
@@ -1469,7 +1499,7 @@ public:
else
return
is_obj_dead
(
obj
,
hr
);
}
bool
is_obj_ill
(
const
oop
obj
)
{
bool
is_obj_ill
(
const
oop
obj
)
const
{
const
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
if
(
hr
==
NULL
)
{
if
(
Universe
::
heap
()
->
is_in_permanent
(
obj
))
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
浏览文件 @
1c7f20f7
...
...
@@ -34,9 +34,10 @@
// Inline functions for G1CollectedHeap
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing
(
const
void
*
addr
)
const
{
HeapRegion
*
hr
=
_hrs
->
addr_to_region
(
addr
);
G1CollectedHeap
::
heap_region_containing
(
const
T
addr
)
const
{
HeapRegion
*
hr
=
_hrs
.
addr_to_region
((
HeapWord
*
)
addr
);
// hr can be null if addr in perm_gen
if
(
hr
!=
NULL
&&
hr
->
continuesHumongous
())
{
hr
=
hr
->
humongous_start_region
();
...
...
@@ -44,19 +45,16 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
return
hr
;
}
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing_raw
(
const
void
*
addr
)
const
{
assert
(
_g1_reserved
.
contains
(
addr
),
"invariant"
);
size_t
index
=
pointer_delta
(
addr
,
_g1_reserved
.
start
(),
1
)
>>
HeapRegion
::
LogOfHRGrainBytes
;
HeapRegion
*
res
=
_hrs
->
at
(
index
);
assert
(
res
==
_hrs
->
addr_to_region
(
addr
),
"sanity"
);
G1CollectedHeap
::
heap_region_containing_raw
(
const
T
addr
)
const
{
assert
(
_g1_reserved
.
contains
((
const
void
*
)
addr
),
"invariant"
);
HeapRegion
*
res
=
_hrs
.
addr_to_region_unsafe
((
HeapWord
*
)
addr
);
return
res
;
}
inline
bool
G1CollectedHeap
::
obj_in_cs
(
oop
obj
)
{
HeapRegion
*
r
=
_hrs
->
addr_to_region
(
obj
);
HeapRegion
*
r
=
_hrs
.
addr_to_region
((
HeapWord
*
)
obj
);
return
r
!=
NULL
&&
r
->
in_collection_set
();
}
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
1c7f20f7
...
...
@@ -239,6 +239,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
_should_revert_to_full_young_gcs
(
false
),
_last_full_young_gc
(
false
),
_eden_bytes_before_gc
(
0
),
_survivor_bytes_before_gc
(
0
),
_capacity_before_gc
(
0
),
_prev_collection_pause_used_at_end_bytes
(
0
),
_collection_set
(
NULL
),
...
...
@@ -897,6 +901,11 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
_bytes_in_to_space_after_gc
=
0
;
_bytes_in_collection_set_before_gc
=
0
;
YoungList
*
young_list
=
_g1
->
young_list
();
_eden_bytes_before_gc
=
young_list
->
eden_used_bytes
();
_survivor_bytes_before_gc
=
young_list
->
survivor_used_bytes
();
_capacity_before_gc
=
_g1
->
capacity
();
#ifdef DEBUG
// initialise these to something well known so that we can spot
// if they are not set properly
...
...
@@ -1460,14 +1469,6 @@ void G1CollectorPolicy::record_collection_pause_end() {
}
}
}
if
(
PrintGCDetails
)
gclog_or_tty
->
print
(
" ["
);
if
(
PrintGC
||
PrintGCDetails
)
_g1
->
print_size_transition
(
gclog_or_tty
,
_cur_collection_pause_used_at_start_bytes
,
_g1
->
used
(),
_g1
->
capacity
());
if
(
PrintGCDetails
)
gclog_or_tty
->
print_cr
(
"]"
);
_all_pause_times_ms
->
add
(
elapsed_ms
);
if
(
update_stats
)
{
...
...
@@ -1672,6 +1673,40 @@ void G1CollectorPolicy::record_collection_pause_end() {
// </NEW PREDICTION>
}
#define EXT_SIZE_FORMAT "%d%s"
#define EXT_SIZE_PARAMS(bytes) \
byte_size_in_proper_unit((bytes)), \
proper_unit_for_byte_size((bytes))
void
G1CollectorPolicy
::
print_heap_transition
()
{
if
(
PrintGCDetails
)
{
YoungList
*
young_list
=
_g1
->
young_list
();
size_t
eden_bytes
=
young_list
->
eden_used_bytes
();
size_t
survivor_bytes
=
young_list
->
survivor_used_bytes
();
size_t
used_before_gc
=
_cur_collection_pause_used_at_start_bytes
;
size_t
used
=
_g1
->
used
();
size_t
capacity
=
_g1
->
capacity
();
gclog_or_tty
->
print_cr
(
" [Eden: "
EXT_SIZE_FORMAT
"->"
EXT_SIZE_FORMAT
" "
"Survivors: "
EXT_SIZE_FORMAT
"->"
EXT_SIZE_FORMAT
" "
"Heap: "
EXT_SIZE_FORMAT
"("
EXT_SIZE_FORMAT
")->"
EXT_SIZE_FORMAT
"("
EXT_SIZE_FORMAT
")]"
,
EXT_SIZE_PARAMS
(
_eden_bytes_before_gc
),
EXT_SIZE_PARAMS
(
eden_bytes
),
EXT_SIZE_PARAMS
(
_survivor_bytes_before_gc
),
EXT_SIZE_PARAMS
(
survivor_bytes
),
EXT_SIZE_PARAMS
(
used_before_gc
),
EXT_SIZE_PARAMS
(
_capacity_before_gc
),
EXT_SIZE_PARAMS
(
used
),
EXT_SIZE_PARAMS
(
capacity
));
}
else
if
(
PrintGC
)
{
_g1
->
print_size_transition
(
gclog_or_tty
,
_cur_collection_pause_used_at_start_bytes
,
_g1
->
used
(),
_g1
->
capacity
());
}
}
// <NEW PREDICTION>
void
G1CollectorPolicy
::
adjust_concurrent_refinement
(
double
update_rs_time
,
...
...
@@ -2435,21 +2470,6 @@ record_collection_pause_start(double start_time_sec, size_t start_used) {
G1CollectorPolicy
::
record_collection_pause_start
(
start_time_sec
,
start_used
);
}
class
NextNonCSElemFinder
:
public
HeapRegionClosure
{
HeapRegion
*
_res
;
public:
NextNonCSElemFinder
()
:
_res
(
NULL
)
{}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
if
(
!
r
->
in_collection_set
())
{
_res
=
r
;
return
true
;
}
else
{
return
false
;
}
}
HeapRegion
*
res
()
{
return
_res
;
}
};
class
KnownGarbageClosure
:
public
HeapRegionClosure
{
CollectionSetChooser
*
_hrSorted
;
...
...
@@ -2618,14 +2638,6 @@ add_to_collection_set(HeapRegion* hr) {
assert
(
_inc_cset_build_state
==
Active
,
"Precondition"
);
assert
(
!
hr
->
is_young
(),
"non-incremental add of young region"
);
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
"added region to cset "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
hr
->
is_young
()
?
"YOUNG"
:
"NOT_YOUNG"
);
}
if
(
_g1
->
mark_in_progress
())
_g1
->
concurrent_mark
()
->
registerCSetRegion
(
hr
);
...
...
@@ -2791,14 +2803,6 @@ void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
_inc_cset_tail
->
set_next_in_collection_set
(
hr
);
}
_inc_cset_tail
=
hr
;
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
" added region to incremental cset (RHS) "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", young %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
(
hr
->
is_young
())
?
"YES"
:
"NO"
);
}
}
// Add the region to the LHS of the incremental cset
...
...
@@ -2816,14 +2820,6 @@ void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
_inc_cset_tail
=
hr
;
}
_inc_cset_head
=
hr
;
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
" added region to incremental cset (LHS) "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", young %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
(
hr
->
is_young
())
?
"YES"
:
"NO"
);
}
}
#ifndef PRODUCT
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
浏览文件 @
1c7f20f7
...
...
@@ -891,6 +891,7 @@ public:
virtual
void
record_collection_pause_end_G1_strong_roots
();
virtual
void
record_collection_pause_end
();
void
print_heap_transition
();
// Record the fact that a full collection occurred.
virtual
void
record_full_collection_start
();
...
...
@@ -1179,6 +1180,11 @@ protected:
// The limit on the number of regions allocated for survivors.
size_t
_max_survivor_regions
;
// For reporting purposes.
size_t
_eden_bytes_before_gc
;
size_t
_survivor_bytes_before_gc
;
size_t
_capacity_before_gc
;
// The amount of survor regions after a collection.
size_t
_recorded_survivor_regions
;
// List of survivor regions.
...
...
src/share/vm/gc_implementation/g1/g1HRPrinter.cpp
0 → 100644
浏览文件 @
1c7f20f7
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/ostream.hpp"
const
char
*
G1HRPrinter
::
action_name
(
ActionType
action
)
{
switch
(
action
)
{
case
Alloc
:
return
"ALLOC"
;
case
AllocForce
:
return
"ALLOC-FORCE"
;
case
Retire
:
return
"RETIRE"
;
case
Reuse
:
return
"REUSE"
;
case
CSet
:
return
"CSET"
;
case
EvacFailure
:
return
"EVAC-FAILURE"
;
case
Cleanup
:
return
"CLEANUP"
;
case
PostCompaction
:
return
"POST-COMPACTION"
;
case
Commit
:
return
"COMMIT"
;
case
Uncommit
:
return
"UNCOMMIT"
;
default:
ShouldNotReachHere
();
}
// trying to keep the Windows compiler happy
return
NULL
;
}
const
char
*
G1HRPrinter
::
region_type_name
(
RegionType
type
)
{
switch
(
type
)
{
case
Unset
:
return
NULL
;
case
Eden
:
return
"Eden"
;
case
Survivor
:
return
"Survivor"
;
case
Old
:
return
"Old"
;
case
SingleHumongous
:
return
"SingleH"
;
case
StartsHumongous
:
return
"StartsH"
;
case
ContinuesHumongous
:
return
"ContinuesH"
;
default:
ShouldNotReachHere
();
}
// trying to keep the Windows compiler happy
return
NULL
;
}
const
char
*
G1HRPrinter
::
phase_name
(
PhaseType
phase
)
{
switch
(
phase
)
{
case
StartGC
:
return
"StartGC"
;
case
EndGC
:
return
"EndGC"
;
case
StartFullGC
:
return
"StartFullGC"
;
case
EndFullGC
:
return
"EndFullGC"
;
default:
ShouldNotReachHere
();
}
// trying to keep the Windows compiler happy
return
NULL
;
}
#define G1HR_PREFIX " G1HR"
void
G1HRPrinter
::
print
(
ActionType
action
,
RegionType
type
,
HeapRegion
*
hr
,
HeapWord
*
top
)
{
const
char
*
action_str
=
action_name
(
action
);
const
char
*
type_str
=
region_type_name
(
type
);
HeapWord
*
bottom
=
hr
->
bottom
();
if
(
type_str
!=
NULL
)
{
if
(
top
!=
NULL
)
{
gclog_or_tty
->
print_cr
(
G1HR_PREFIX
" %s(%s) "
PTR_FORMAT
" "
PTR_FORMAT
,
action_str
,
type_str
,
bottom
,
top
);
}
else
{
gclog_or_tty
->
print_cr
(
G1HR_PREFIX
" %s(%s) "
PTR_FORMAT
,
action_str
,
type_str
,
bottom
);
}
}
else
{
if
(
top
!=
NULL
)
{
gclog_or_tty
->
print_cr
(
G1HR_PREFIX
" %s "
PTR_FORMAT
" "
PTR_FORMAT
,
action_str
,
bottom
,
top
);
}
else
{
gclog_or_tty
->
print_cr
(
G1HR_PREFIX
" %s "
PTR_FORMAT
,
action_str
,
bottom
);
}
}
}
void
G1HRPrinter
::
print
(
ActionType
action
,
HeapWord
*
bottom
,
HeapWord
*
end
)
{
const
char
*
action_str
=
action_name
(
action
);
gclog_or_tty
->
print_cr
(
G1HR_PREFIX
" %s ["
PTR_FORMAT
","
PTR_FORMAT
"]"
,
action_str
,
bottom
,
end
);
}
void
G1HRPrinter
::
print
(
PhaseType
phase
,
size_t
phase_num
)
{
const
char
*
phase_str
=
phase_name
(
phase
);
gclog_or_tty
->
print_cr
(
G1HR_PREFIX
" #%s "
SIZE_FORMAT
,
phase_str
,
phase_num
);
}
src/share/vm/gc_implementation/g1/g1HRPrinter.hpp
0 → 100644
浏览文件 @
1c7f20f7
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
#include "memory/allocation.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#define SKIP_RETIRED_FULL_REGIONS 1
class
G1HRPrinter
VALUE_OBJ_CLASS_SPEC
{
public:
typedef
enum
{
Alloc
,
AllocForce
,
Retire
,
Reuse
,
CSet
,
EvacFailure
,
Cleanup
,
PostCompaction
,
Commit
,
Uncommit
}
ActionType
;
typedef
enum
{
Unset
,
Eden
,
Survivor
,
Old
,
SingleHumongous
,
StartsHumongous
,
ContinuesHumongous
}
RegionType
;
typedef
enum
{
StartGC
,
EndGC
,
StartFullGC
,
EndFullGC
}
PhaseType
;
private:
bool
_active
;
static
const
char
*
action_name
(
ActionType
action
);
static
const
char
*
region_type_name
(
RegionType
type
);
static
const
char
*
phase_name
(
PhaseType
phase
);
// Print an action event. This version is used in most scenarios and
// only prints the region's bottom. The parameters type and top are
// optional (the "not set" values are Unset and NULL).
static
void
print
(
ActionType
action
,
RegionType
type
,
HeapRegion
*
hr
,
HeapWord
*
top
);
// Print an action event. This version prints both the region's
// bottom and end. Used for Commit / Uncommit events.
static
void
print
(
ActionType
action
,
HeapWord
*
bottom
,
HeapWord
*
end
);
// Print a phase event.
static
void
print
(
PhaseType
phase
,
size_t
phase_num
);
public:
// In some places we iterate over a list in order to generate output
// for the list's elements. By exposing this we can avoid this
// iteration if the printer is not active.
const
bool
is_active
()
{
return
_active
;
}
// Have to set this explicitly as we have to do this during the
// heap's initialize() method, not in the constructor.
void
set_active
(
bool
active
)
{
_active
=
active
;
}
// The methods below are convenient wrappers for the print() methods.
void
alloc
(
HeapRegion
*
hr
,
RegionType
type
,
bool
force
=
false
)
{
if
(
is_active
())
{
print
((
!
force
)
?
Alloc
:
AllocForce
,
type
,
hr
,
NULL
);
}
}
void
alloc
(
RegionType
type
,
HeapRegion
*
hr
,
HeapWord
*
top
)
{
if
(
is_active
())
{
print
(
Alloc
,
type
,
hr
,
top
);
}
}
void
retire
(
HeapRegion
*
hr
)
{
if
(
is_active
())
{
if
(
!
SKIP_RETIRED_FULL_REGIONS
||
hr
->
top
()
<
hr
->
end
())
{
print
(
Retire
,
Unset
,
hr
,
hr
->
top
());
}
}
}
void
reuse
(
HeapRegion
*
hr
)
{
if
(
is_active
())
{
print
(
Reuse
,
Unset
,
hr
,
NULL
);
}
}
void
cset
(
HeapRegion
*
hr
)
{
if
(
is_active
())
{
print
(
CSet
,
Unset
,
hr
,
NULL
);
}
}
void
evac_failure
(
HeapRegion
*
hr
)
{
if
(
is_active
())
{
print
(
EvacFailure
,
Unset
,
hr
,
NULL
);
}
}
void
cleanup
(
HeapRegion
*
hr
)
{
if
(
is_active
())
{
print
(
Cleanup
,
Unset
,
hr
,
NULL
);
}
}
void
post_compaction
(
HeapRegion
*
hr
,
RegionType
type
)
{
if
(
is_active
())
{
print
(
PostCompaction
,
type
,
hr
,
hr
->
top
());
}
}
void
commit
(
HeapWord
*
bottom
,
HeapWord
*
end
)
{
if
(
is_active
())
{
print
(
Commit
,
bottom
,
end
);
}
}
void
uncommit
(
HeapWord
*
bottom
,
HeapWord
*
end
)
{
if
(
is_active
())
{
print
(
Uncommit
,
bottom
,
end
);
}
}
void
start_gc
(
bool
full
,
size_t
gc_num
)
{
if
(
is_active
())
{
if
(
!
full
)
{
print
(
StartGC
,
gc_num
);
}
else
{
print
(
StartFullGC
,
gc_num
);
}
}
}
void
end_gc
(
bool
full
,
size_t
gc_num
)
{
if
(
is_active
())
{
if
(
!
full
)
{
print
(
EndGC
,
gc_num
);
}
else
{
print
(
EndFullGC
,
gc_num
);
}
}
}
G1HRPrinter
()
:
_active
(
false
)
{
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HRPRINTER_HPP
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
浏览文件 @
1c7f20f7
...
...
@@ -84,11 +84,6 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
mark_sweep_phase1
(
marked_for_unloading
,
clear_all_softrefs
);
if
(
VerifyDuringGC
)
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
g1h
->
checkConcurrentMark
();
}
mark_sweep_phase2
();
// Don't add any more derived pointers during phase3
...
...
@@ -179,6 +174,29 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
assert
(
GenMarkSweep
::
_marking_stack
.
is_empty
(),
"stack should be empty by now"
);
if
(
VerifyDuringGC
)
{
HandleMark
hm
;
// handle scope
COMPILER2_PRESENT
(
DerivedPointerTableDeactivate
dpt_deact
);
gclog_or_tty
->
print
(
" VerifyDuringGC:(full)[Verifying "
);
Universe
::
heap
()
->
prepare_for_verify
();
// Note: we can verify only the heap here. When an object is
// marked, the previous value of the mark word (including
// identity hash values, ages, etc) is preserved, and the mark
// word is set to markOop::marked_value - effectively removing
// any hash values from the mark word. These hash values are
// used when verifying the dictionaries and so removing them
// from the mark word can make verification of the dictionaries
// fail. At the end of the GC, the orginal mark word values
// (including hash values) are restored to the appropriate
// objects.
Universe
::
heap
()
->
verify
(
/* allow dirty */
true
,
/* silent */
false
,
/* option */
VerifyOption_G1UseMarkWord
);
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
gclog_or_tty
->
print_cr
(
"]"
);
}
}
class
G1PrepareCompactClosure
:
public
HeapRegionClosure
{
...
...
src/share/vm/gc_implementation/g1/g1OopClosures.hpp
浏览文件 @
1c7f20f7
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -33,6 +33,7 @@ class DirtyCardToOopClosure;
class
CMBitMap
;
class
CMMarkStack
;
class
G1ParScanThreadState
;
class
CMTask
;
// A class that scans oops in a given heap region (much as OopsInGenClosure
// scans oops in a generation.)
...
...
@@ -40,7 +41,7 @@ class OopsInHeapRegionClosure: public OopsInGenClosure {
protected:
HeapRegion
*
_from
;
public:
v
irtual
v
oid
set_region
(
HeapRegion
*
from
)
{
_from
=
from
;
}
void
set_region
(
HeapRegion
*
from
)
{
_from
=
from
;
}
};
class
G1ParClosureSuper
:
public
OopsInHeapRegionClosure
{
...
...
@@ -161,44 +162,6 @@ public:
bool
do_header
()
{
return
false
;
}
};
class
FilterInHeapRegionAndIntoCSClosure
:
public
OopsInHeapRegionClosure
{
G1CollectedHeap
*
_g1
;
OopsInHeapRegionClosure
*
_oc
;
public:
FilterInHeapRegionAndIntoCSClosure
(
G1CollectedHeap
*
g1
,
OopsInHeapRegionClosure
*
oc
)
:
_g1
(
g1
),
_oc
(
oc
)
{}
template
<
class
T
>
void
do_oop_nv
(
T
*
p
);
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_nv
(
p
);
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_nv
(
p
);
}
bool
apply_to_weak_ref_discovered_field
()
{
return
true
;
}
bool
do_header
()
{
return
false
;
}
void
set_region
(
HeapRegion
*
from
)
{
_oc
->
set_region
(
from
);
}
};
class
FilterAndMarkInHeapRegionAndIntoCSClosure
:
public
OopsInHeapRegionClosure
{
G1CollectedHeap
*
_g1
;
ConcurrentMark
*
_cm
;
OopsInHeapRegionClosure
*
_oc
;
public:
FilterAndMarkInHeapRegionAndIntoCSClosure
(
G1CollectedHeap
*
g1
,
OopsInHeapRegionClosure
*
oc
,
ConcurrentMark
*
cm
)
:
_g1
(
g1
),
_oc
(
oc
),
_cm
(
cm
)
{
}
template
<
class
T
>
void
do_oop_nv
(
T
*
p
);
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_nv
(
p
);
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_nv
(
p
);
}
bool
apply_to_weak_ref_discovered_field
()
{
return
true
;
}
bool
do_header
()
{
return
false
;
}
void
set_region
(
HeapRegion
*
from
)
{
_oc
->
set_region
(
from
);
}
};
class
FilterOutOfRegionClosure
:
public
OopClosure
{
HeapWord
*
_r_bottom
;
HeapWord
*
_r_end
;
...
...
@@ -214,4 +177,16 @@ public:
int
out_of_region
()
{
return
_out_of_region
;
}
};
// Closure for iterating over object fields during concurrent marking
class
G1CMOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1h
;
ConcurrentMark
*
_cm
;
CMTask
*
_task
;
public:
G1CMOopClosure
(
G1CollectedHeap
*
g1h
,
ConcurrentMark
*
cm
,
CMTask
*
task
);
template
<
class
T
>
void
do_oop_nv
(
T
*
p
);
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_nv
(
p
);
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_nv
(
p
);
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp
浏览文件 @
1c7f20f7
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/concurrentMark.
inline.
hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1OopClosures.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
...
...
@@ -66,27 +66,6 @@ template <class T> inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
}
}
template
<
class
T
>
inline
void
FilterInHeapRegionAndIntoCSClosure
::
do_oop_nv
(
T
*
p
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
)
&&
_g1
->
obj_in_cs
(
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
)))
_oc
->
do_oop
(
p
);
}
template
<
class
T
>
inline
void
FilterAndMarkInHeapRegionAndIntoCSClosure
::
do_oop_nv
(
T
*
p
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
HeapRegion
*
hr
=
_g1
->
heap_region_containing
((
HeapWord
*
)
obj
);
if
(
hr
!=
NULL
)
{
if
(
hr
->
in_collection_set
())
_oc
->
do_oop
(
p
);
else
if
(
!
hr
->
is_young
())
_cm
->
grayRoot
(
obj
);
}
}
}
// This closure is applied to the fields of the objects that have just been copied.
template
<
class
T
>
inline
void
G1ParScanClosure
::
do_oop_nv
(
T
*
p
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
...
...
@@ -129,5 +108,18 @@ template <class T> inline void G1ParPushHeapRSClosure::do_oop_nv(T* p) {
}
}
template
<
class
T
>
inline
void
G1CMOopClosure
::
do_oop_nv
(
T
*
p
)
{
assert
(
_g1h
->
is_in_g1_reserved
((
HeapWord
*
)
p
),
"invariant"
);
assert
(
!
_g1h
->
is_on_master_free_list
(
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
)),
"invariant"
);
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
if
(
_cm
->
verbose_high
())
{
gclog_or_tty
->
print_cr
(
"[%d] we're looking at location "
"*"
PTR_FORMAT
" = "
PTR_FORMAT
,
_task
->
task_id
(),
p
,
(
void
*
)
obj
);
}
_task
->
deal_with_reference
(
obj
);
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_INLINE_HPP
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
1c7f20f7
...
...
@@ -66,41 +66,6 @@ void ct_freq_update_histo_and_reset() {
}
#endif
class
IntoCSOopClosure
:
public
OopsInHeapRegionClosure
{
OopsInHeapRegionClosure
*
_blk
;
G1CollectedHeap
*
_g1
;
public:
IntoCSOopClosure
(
G1CollectedHeap
*
g1
,
OopsInHeapRegionClosure
*
blk
)
:
_g1
(
g1
),
_blk
(
blk
)
{}
void
set_region
(
HeapRegion
*
from
)
{
_blk
->
set_region
(
from
);
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
if
(
_g1
->
obj_in_cs
(
obj
))
_blk
->
do_oop
(
p
);
}
bool
apply_to_weak_ref_discovered_field
()
{
return
true
;
}
bool
idempotent
()
{
return
true
;
}
};
class
VerifyRSCleanCardOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1
;
public:
VerifyRSCleanCardOopClosure
(
G1CollectedHeap
*
g1
)
:
_g1
(
g1
)
{}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
HeapRegion
*
to
=
_g1
->
heap_region_containing
(
obj
);
guarantee
(
to
==
NULL
||
!
to
->
in_collection_set
(),
"Missed a rem set member."
);
}
};
G1RemSet
::
G1RemSet
(
G1CollectedHeap
*
g1
,
CardTableModRefBS
*
ct_bs
)
:
_g1
(
g1
),
_conc_refine_cards
(
0
),
_ct_bs
(
ct_bs
),
_g1p
(
_g1
->
g1_policy
()),
...
...
@@ -332,31 +297,6 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
_g1p
->
record_update_rs_time
(
worker_i
,
(
os
::
elapsedTime
()
-
start
)
*
1000.0
);
}
#ifndef PRODUCT
class
PrintRSClosure
:
public
HeapRegionClosure
{
int
_count
;
public:
PrintRSClosure
()
:
_count
(
0
)
{}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
HeapRegionRemSet
*
hrrs
=
r
->
rem_set
();
_count
+=
(
int
)
hrrs
->
occupied
();
if
(
hrrs
->
occupied
()
==
0
)
{
gclog_or_tty
->
print
(
"Heap Region ["
PTR_FORMAT
", "
PTR_FORMAT
") "
"has no remset entries
\n
"
,
r
->
bottom
(),
r
->
end
());
}
else
{
gclog_or_tty
->
print
(
"Printing rem set for heap region ["
PTR_FORMAT
", "
PTR_FORMAT
")
\n
"
,
r
->
bottom
(),
r
->
end
());
r
->
print
();
hrrs
->
print
();
gclog_or_tty
->
print
(
"
\n
Done printing rem set
\n
"
);
}
return
false
;
}
int
occupied
()
{
return
_count
;}
};
#endif
class
CountRSSizeClosure
:
public
HeapRegionClosure
{
size_t
_n
;
size_t
_tot
;
...
...
@@ -482,10 +422,6 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
}
void
G1RemSet
::
prepare_for_oops_into_collection_set_do
()
{
#if G1_REM_SET_LOGGING
PrintRSClosure
cl
;
_g1
->
collection_set_iterate
(
&
cl
);
#endif
cleanupHRRS
();
ConcurrentG1Refine
*
cg1r
=
_g1
->
concurrent_g1_refine
();
_g1
->
set_refine_cte_cl_concurrency
(
false
);
...
...
@@ -504,14 +440,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
}
class
cleanUpIteratorsClosure
:
public
HeapRegionClosure
{
bool
doHeapRegion
(
HeapRegion
*
r
)
{
HeapRegionRemSet
*
hrrs
=
r
->
rem_set
();
hrrs
->
init_for_par_iteration
();
return
false
;
}
};
// This closure, applied to a DirtyCardQueueSet, is used to immediately
// update the RSets for the regions in the CSet. For each card it iterates
// through the oops which coincide with that card. It scans the reference
...
...
@@ -572,18 +500,13 @@ public:
void
G1RemSet
::
cleanup_after_oops_into_collection_set_do
()
{
guarantee
(
_cards_scanned
!=
NULL
,
"invariant"
);
_total_cards_scanned
=
0
;
for
(
uint
i
=
0
;
i
<
n_workers
();
++
i
)
for
(
uint
i
=
0
;
i
<
n_workers
();
++
i
)
{
_total_cards_scanned
+=
_cards_scanned
[
i
];
}
FREE_C_HEAP_ARRAY
(
size_t
,
_cards_scanned
);
_cards_scanned
=
NULL
;
// Cleanup after copy
#if G1_REM_SET_LOGGING
PrintRSClosure
cl
;
_g1
->
heap_region_iterate
(
&
cl
);
#endif
_g1
->
set_refine_cte_cl_concurrency
(
true
);
cleanUpIteratorsClosure
iterClosure
;
_g1
->
collection_set_iterate
(
&
iterClosure
);
// Set all cards back to clean.
_g1
->
cleanUpCardTable
();
...
...
src/share/vm/gc_implementation/g1/g1RemSet.hpp
浏览文件 @
1c7f20f7
...
...
@@ -142,8 +142,6 @@ public:
virtual
void
prepare_for_verify
();
};
#define G1_REM_SET_LOGGING 0
class
CountNonCleanMemRegionClosure
:
public
MemRegionClosure
{
G1CollectedHeap
*
_g1
;
int
_n
;
...
...
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
浏览文件 @
1c7f20f7
...
...
@@ -65,12 +65,6 @@ inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
HeapRegion
*
to
=
_g1
->
heap_region_containing
(
obj
);
if
(
to
!=
NULL
&&
from
!=
to
)
{
#if G1_REM_SET_LOGGING
gclog_or_tty
->
print_cr
(
"Adding "
PTR_FORMAT
" ("
PTR_FORMAT
") to RS"
" for region ["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p
,
obj
,
to
->
bottom
(),
to
->
end
());
#endif
assert
(
to
->
rem_set
()
!=
NULL
,
"Need per-region 'into' remsets."
);
to
->
rem_set
()
->
add_reference
(
p
,
tid
);
}
...
...
src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp
浏览文件 @
1c7f20f7
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -45,8 +45,7 @@ typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
class
FilterIntoCSClosure
;
class
FilterOutOfRegionClosure
;
class
FilterInHeapRegionAndIntoCSClosure
;
class
FilterAndMarkInHeapRegionAndIntoCSClosure
;
class
G1CMOopClosure
;
#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES
#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined."
...
...
@@ -58,8 +57,7 @@ class FilterAndMarkInHeapRegionAndIntoCSClosure;
f(G1ParPushHeapRSClosure,_nv) \
f(FilterIntoCSClosure,_nv) \
f(FilterOutOfRegionClosure,_nv) \
f(FilterInHeapRegionAndIntoCSClosure,_nv) \
f(FilterAndMarkInHeapRegionAndIntoCSClosure,_nv)
f(G1CMOopClosure,_nv)
#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES
#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined."
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
1c7f20f7
...
...
@@ -60,13 +60,14 @@ private:
oop
_containing_obj
;
bool
_failures
;
int
_n_failures
;
bool
_use_prev_marking
;
VerifyOption
_vo
;
public:
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
VerifyLiveClosure
(
G1CollectedHeap
*
g1h
,
bool
use_prev_marking
)
:
// _vo == UsePrevMarking -> use "prev" marking information,
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseMarkWord -> use mark word from object header.
VerifyLiveClosure
(
G1CollectedHeap
*
g1h
,
VerifyOption
vo
)
:
_g1h
(
g1h
),
_bs
(
NULL
),
_containing_obj
(
NULL
),
_failures
(
false
),
_n_failures
(
0
),
_
use_prev_marking
(
use_prev_marking
)
_failures
(
false
),
_n_failures
(
0
),
_
vo
(
vo
)
{
BarrierSet
*
bs
=
_g1h
->
barrier_set
();
if
(
bs
->
is_a
(
BarrierSet
::
CardTableModRef
))
...
...
@@ -95,14 +96,14 @@ public:
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
assert
(
_containing_obj
!=
NULL
,
"Precondition"
);
assert
(
!
_g1h
->
is_obj_dead_cond
(
_containing_obj
,
_
use_prev_marking
),
assert
(
!
_g1h
->
is_obj_dead_cond
(
_containing_obj
,
_
vo
),
"Precondition"
);
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
bool
failed
=
false
;
if
(
!
_g1h
->
is_in_closed_subset
(
obj
)
||
_g1h
->
is_obj_dead_cond
(
obj
,
_
use_prev_marking
))
{
_g1h
->
is_obj_dead_cond
(
obj
,
_
vo
))
{
if
(
!
_failures
)
{
gclog_or_tty
->
print_cr
(
""
);
gclog_or_tty
->
print_cr
(
"----------"
);
...
...
@@ -159,20 +160,16 @@ public:
gclog_or_tty
->
print_cr
(
"----------"
);
}
gclog_or_tty
->
print_cr
(
"Missing rem set entry:"
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" of obj "
PTR_FORMAT
", in region %d ["
PTR_FORMAT
", "
PTR_FORMAT
"),"
,
p
,
(
void
*
)
_containing_obj
,
from
->
hrs_index
(),
from
->
bottom
(),
from
->
end
());
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" "
"of obj "
PTR_FORMAT
", "
"in region "
HR_FORMAT
,
p
,
(
void
*
)
_containing_obj
,
HR_FORMAT_PARAMS
(
from
));
_containing_obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" in region %d ["
PTR_FORMAT
", "
PTR_FORMAT
")."
,
(
void
*
)
obj
,
to
->
hrs_index
(),
to
->
bottom
(),
to
->
end
());
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" "
"in region "
HR_FORMAT
,
(
void
*
)
obj
,
HR_FORMAT_PARAMS
(
to
));
obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"Obj head CTE = %d, field CTE = %d."
,
cv_obj
,
cv_field
);
...
...
@@ -484,11 +481,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
HeapRegion
::
HeapRegion
(
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
)
HeapRegion
(
size_t
hrs_index
,
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
)
:
G1OffsetTableContigSpace
(
sharedOffsetArray
,
mr
,
is_zeroed
),
_next_fk
(
HeapRegionDCTOC
::
NoFilterKind
),
_hrs_index
(
-
1
),
_next_fk
(
HeapRegionDCTOC
::
NoFilterKind
),
_hrs_index
(
hrs_index
),
_humongous_type
(
NotHumongous
),
_humongous_start_region
(
NULL
),
_in_collection_set
(
false
),
_is_gc_alloc_region
(
false
),
_next_in_special_set
(
NULL
),
_orig_end
(
NULL
),
...
...
@@ -740,20 +736,20 @@ void HeapRegion::print_on(outputStream* st) const {
void
HeapRegion
::
verify
(
bool
allow_dirty
)
const
{
bool
dummy
=
false
;
verify
(
allow_dirty
,
/* use_prev_marking */
true
,
/* failures */
&
dummy
);
verify
(
allow_dirty
,
VerifyOption_G1UsePrevMarking
,
/* failures */
&
dummy
);
}
// This really ought to be commoned up into OffsetTableContigSpace somehow.
// We would need a mechanism to make that code skip dead objects.
void
HeapRegion
::
verify
(
bool
allow_dirty
,
bool
use_prev_marking
,
VerifyOption
vo
,
bool
*
failures
)
const
{
G1CollectedHeap
*
g1
=
G1CollectedHeap
::
heap
();
*
failures
=
false
;
HeapWord
*
p
=
bottom
();
HeapWord
*
prev_p
=
NULL
;
VerifyLiveClosure
vl_cl
(
g1
,
use_prev_marking
);
VerifyLiveClosure
vl_cl
(
g1
,
vo
);
bool
is_humongous
=
isHumongous
();
bool
do_bot_verify
=
!
is_young
();
size_t
object_num
=
0
;
...
...
@@ -778,7 +774,7 @@ void HeapRegion::verify(bool allow_dirty,
return
;
}
if
(
!
g1
->
is_obj_dead_cond
(
obj
,
this
,
use_prev_marking
))
{
if
(
!
g1
->
is_obj_dead_cond
(
obj
,
this
,
vo
))
{
if
(
obj
->
is_oop
())
{
klassOop
klass
=
obj
->
klass
();
if
(
!
klass
->
is_perm
())
{
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
1c7f20f7
...
...
@@ -52,9 +52,11 @@ class HeapRegionRemSetIterator;
class
HeapRegion
;
class
HeapRegionSetBase
;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
(_hr_)->top(), (_hr_)->end()
#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
(_hr_)->hrs_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
...
...
@@ -237,9 +239,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetArrayContigSpace
*
offsets
()
{
return
&
_offsets
;
}
protected:
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int
_hrs_index
;
// The index of this region in the heap region sequence.
size_t
_hrs_index
;
HumongousType
_humongous_type
;
// For a humongous region, region in which it starts.
...
...
@@ -296,8 +297,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
enum
YoungType
{
NotYoung
,
// a region is not young
Young
,
// a region is young
Survivor
// a region is young and it contains
// survivor
Survivor
// a region is young and it contains survivors
};
volatile
YoungType
_young_type
;
...
...
@@ -351,7 +351,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion
(
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
HeapRegion
(
size_t
hrs_index
,
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
);
static
int
LogOfHRGrainBytes
;
...
...
@@ -393,8 +394,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int
hrs_index
()
const
{
return
_hrs_index
;
}
void
set_hrs_index
(
int
index
)
{
_hrs_index
=
index
;
}
size_t
hrs_index
()
const
{
return
_hrs_index
;
}
// The number of bytes marked live in the region in the last marking phase.
size_t
marked_bytes
()
{
return
_prev_marked_bytes
;
}
...
...
@@ -579,6 +579,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
void
set_next_dirty_cards_region
(
HeapRegion
*
hr
)
{
_next_dirty_cards_region
=
hr
;
}
bool
is_on_dirty_cards_region_list
()
const
{
return
get_next_dirty_cards_region
()
!=
NULL
;
}
HeapWord
*
orig_end
()
{
return
_orig_end
;
}
// Allows logical separation between objects allocated before and after.
void
save_marks
();
...
...
@@ -853,14 +855,20 @@ class HeapRegion: public G1OffsetTableContigSpace {
void
print
()
const
;
void
print_on
(
outputStream
*
st
)
const
;
// use_prev_marking == true -> use "prev" marking information,
// use_prev_marking == false -> use "next" marking information
// vo == UsePrevMarking -> use "prev" marking information,
// vo == UseNextMarking -> use "next" marking information
// vo == UseMarkWord -> use the mark word in the object header
//
// NOTE: Only the "prev" marking information is guaranteed to be
// consistent most of the time, so most calls to this should use
// use_prev_marking == true. Currently, there is only one case where
// this is called with use_prev_marking == false, which is to verify
// the "next" marking information at the end of remark.
void
verify
(
bool
allow_dirty
,
bool
use_prev_marking
,
bool
*
failures
)
const
;
// vo == UsePrevMarking.
// Currently, there is only one case where this is called with
// vo == UseNextMarking, which is to verify the "next" marking
// information at the end of remark.
// Currently there is only one place where this is called with
// vo == UseMarkWord, which is to verify the marking during a
// full GC.
void
verify
(
bool
allow_dirty
,
VerifyOption
vo
,
bool
*
failures
)
const
;
// Override; it uses the "prev" marking information
virtual
void
verify
(
bool
allow_dirty
)
const
;
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
浏览文件 @
1c7f20f7
...
...
@@ -834,7 +834,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
#endif
// Set the corresponding coarse bit.
in
t
max_hrs_index
=
max
->
hr
()
->
hrs_index
();
size_
t
max_hrs_index
=
max
->
hr
()
->
hrs_index
();
if
(
!
_coarse_map
.
at
(
max_hrs_index
))
{
_coarse_map
.
at_put
(
max_hrs_index
,
true
);
_n_coarse_entries
++
;
...
...
@@ -860,7 +860,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{
// First eliminated garbage regions from the coarse map.
if
(
G1RSScrubVerbose
)
gclog_or_tty
->
print_cr
(
"Scrubbing region %d:"
,
hr
()
->
hrs_index
());
gclog_or_tty
->
print_cr
(
"Scrubbing region "
SIZE_FORMAT
":"
,
hr
()
->
hrs_index
());
assert
(
_coarse_map
.
size
()
==
region_bm
->
size
(),
"Precondition"
);
if
(
G1RSScrubVerbose
)
...
...
@@ -878,7 +879,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
PosParPRT
*
nxt
=
cur
->
next
();
// If the entire region is dead, eliminate.
if
(
G1RSScrubVerbose
)
gclog_or_tty
->
print_cr
(
" For other region %d:"
,
cur
->
hr
()
->
hrs_index
());
gclog_or_tty
->
print_cr
(
" For other region "
SIZE_FORMAT
":"
,
cur
->
hr
()
->
hrs_index
());
if
(
!
region_bm
->
at
(
cur
->
hr
()
->
hrs_index
()))
{
*
prev
=
nxt
;
cur
->
set_next
(
NULL
);
...
...
@@ -994,7 +996,7 @@ void OtherRegionsTable::clear() {
void
OtherRegionsTable
::
clear_incoming_entry
(
HeapRegion
*
from_hr
)
{
MutexLockerEx
x
(
&
_m
,
Mutex
::
_no_safepoint_check_flag
);
size_t
hrs_ind
=
(
size_t
)
from_hr
->
hrs_index
();
size_t
hrs_ind
=
from_hr
->
hrs_index
();
size_t
ind
=
hrs_ind
&
_mod_max_fine_entries_mask
;
if
(
del_single_region_table
(
ind
,
from_hr
))
{
assert
(
!
_coarse_map
.
at
(
hrs_ind
),
"Inv"
);
...
...
@@ -1002,7 +1004,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map
.
par_at_put
(
hrs_ind
,
0
);
}
// Check to see if any of the fcc entries come from here.
in
t
hr_ind
=
hr
()
->
hrs_index
();
size_
t
hr_ind
=
hr
()
->
hrs_index
();
for
(
int
tid
=
0
;
tid
<
HeapRegionRemSet
::
num_par_rem_sets
();
tid
++
)
{
int
fcc_ent
=
_from_card_cache
[
tid
][
hr_ind
];
if
(
fcc_ent
!=
-
1
)
{
...
...
@@ -1083,8 +1085,9 @@ int HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet
::
HeapRegionRemSet
(
G1BlockOffsetSharedArray
*
bosa
,
HeapRegion
*
hr
)
:
_bosa
(
bosa
),
_other_regions
(
hr
),
_iter_state
(
Unclaimed
)
{
}
:
_bosa
(
bosa
),
_other_regions
(
hr
)
{
reset_for_par_iteration
();
}
void
HeapRegionRemSet
::
setup_remset_size
()
{
// Setup sparse and fine-grain tables sizes.
...
...
@@ -1099,10 +1102,6 @@ void HeapRegionRemSet::setup_remset_size() {
guarantee
(
G1RSetSparseRegionEntries
>
0
&&
G1RSetRegionEntries
>
0
,
"Sanity"
);
}
void
HeapRegionRemSet
::
init_for_par_iteration
()
{
_iter_state
=
Unclaimed
;
}
bool
HeapRegionRemSet
::
claim_iter
()
{
if
(
_iter_state
!=
Unclaimed
)
return
false
;
jint
res
=
Atomic
::
cmpxchg
(
Claimed
,
(
jint
*
)(
&
_iter_state
),
Unclaimed
);
...
...
@@ -1117,7 +1116,6 @@ bool HeapRegionRemSet::iter_is_complete() {
return
_iter_state
==
Complete
;
}
void
HeapRegionRemSet
::
init_iterator
(
HeapRegionRemSetIterator
*
iter
)
const
{
iter
->
initialize
(
this
);
}
...
...
@@ -1130,7 +1128,7 @@ void HeapRegionRemSet::print() const {
while
(
iter
.
has_next
(
card_index
))
{
HeapWord
*
card_start
=
G1CollectedHeap
::
heap
()
->
bot_shared
()
->
address_for_index
(
card_index
);
gclog_or_tty
->
print_cr
(
" Card "
PTR_FORMAT
"."
,
card_start
);
gclog_or_tty
->
print_cr
(
" Card "
PTR_FORMAT
,
card_start
);
}
// XXX
if
(
iter
.
n_yielded
()
!=
occupied
())
{
...
...
@@ -1157,6 +1155,14 @@ void HeapRegionRemSet::par_cleanup() {
void
HeapRegionRemSet
::
clear
()
{
_other_regions
.
clear
();
assert
(
occupied
()
==
0
,
"Should be clear."
);
reset_for_par_iteration
();
}
void
HeapRegionRemSet
::
reset_for_par_iteration
()
{
_iter_state
=
Unclaimed
;
_iter_claimed
=
0
;
// It's good to check this to make sure that the two methods are in sync.
assert
(
verify_ready_for_par_iteration
(),
"post-condition"
);
}
void
HeapRegionRemSet
::
scrub
(
CardTableModRefBS
*
ctbs
,
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
浏览文件 @
1c7f20f7
...
...
@@ -262,8 +262,6 @@ public:
virtual void cleanup() = 0;
#endif
// Should be called from single-threaded code.
void
init_for_par_iteration
();
// Attempt to claim the region. Returns true iff this call caused an
// atomic transition from Unclaimed to Claimed.
bool
claim_iter
();
...
...
@@ -273,7 +271,6 @@ public:
bool
iter_is_complete
();
// Support for claiming blocks of cards during iteration
void
set_iter_claimed
(
size_t
x
)
{
_iter_claimed
=
(
jlong
)
x
;
}
size_t
iter_claimed
()
const
{
return
(
size_t
)
_iter_claimed
;
}
// Claim the next block of cards
size_t
iter_claimed_next
(
size_t
step
)
{
...
...
@@ -284,6 +281,11 @@ public:
}
while
(
Atomic
::
cmpxchg
((
jlong
)
next
,
&
_iter_claimed
,
(
jlong
)
current
)
!=
(
jlong
)
current
);
return
current
;
}
void
reset_for_par_iteration
();
bool
verify_ready_for_par_iteration
()
{
return
(
_iter_state
==
Unclaimed
)
&&
(
_iter_claimed
==
0
);
}
// Initialize the given iterator to iterate over this rem set.
void
init_iterator
(
HeapRegionRemSetIterator
*
iter
)
const
;
...
...
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
浏览文件 @
1c7f20f7
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -25,23 +25,42 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
const
void
*
addr
)
{
assert
(
_seq_bottom
!=
NULL
,
"bad _seq_bottom in addr_to_region"
);
if
((
char
*
)
addr
>=
_seq_bottom
)
{
size_t
diff
=
(
size_t
)
pointer_delta
((
HeapWord
*
)
addr
,
(
HeapWord
*
)
_seq_bottom
);
int
index
=
(
int
)
(
diff
>>
HeapRegion
::
LogOfHRGrainWords
);
assert
(
index
>=
0
,
"invariant / paranoia"
);
if
(
index
<
_regions
.
length
())
{
HeapRegion
*
hr
=
_regions
.
at
(
index
);
assert
(
hr
->
is_in_reserved
(
addr
),
"addr_to_region is wrong..."
);
return
hr
;
}
inline
size_t
HeapRegionSeq
::
addr_to_index_biased
(
HeapWord
*
addr
)
const
{
assert
(
_heap_bottom
<=
addr
&&
addr
<
_heap_end
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
" end: "
PTR_FORMAT
,
addr
,
_heap_bottom
,
_heap_end
));
size_t
index
=
(
size_t
)
addr
>>
_region_shift
;
return
index
;
}
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region_unsafe
(
HeapWord
*
addr
)
const
{
assert
(
_heap_bottom
<=
addr
&&
addr
<
_heap_end
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
" end: "
PTR_FORMAT
,
addr
,
_heap_bottom
,
_heap_end
));
size_t
index_biased
=
addr_to_index_biased
(
addr
);
HeapRegion
*
hr
=
_regions_biased
[
index_biased
];
assert
(
hr
!=
NULL
,
"invariant"
);
return
hr
;
}
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
HeapWord
*
addr
)
const
{
if
(
addr
!=
NULL
&&
addr
<
_heap_end
)
{
assert
(
addr
>=
_heap_bottom
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
,
addr
,
_heap_bottom
));
return
addr_to_region_unsafe
(
addr
);
}
return
NULL
;
}
inline
HeapRegion
*
HeapRegionSeq
::
at
(
size_t
index
)
const
{
assert
(
index
<
length
(),
"pre-condition"
);
HeapRegion
*
hr
=
_regions
[
index
];
assert
(
hr
!=
NULL
,
"sanity"
);
assert
(
hr
->
hrs_index
()
==
index
,
"sanity"
);
return
hr
;
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
浏览文件 @
1c7f20f7
/*
*
c
opyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
*
C
opyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
src/share/vm/gc_implementation/g1/heapRegionSets.cpp
浏览文件 @
1c7f20f7
...
...
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
//////////////////// FreeRegionList ////////////////////
...
...
@@ -38,6 +39,16 @@ const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
//////////////////// MasterFreeRegionList ////////////////////
const
char
*
MasterFreeRegionList
::
verify_region_extra
(
HeapRegion
*
hr
)
{
// We should reset the RSet for parallel iteration before we add it
// to the master free list so that it is ready when the region is
// re-allocated.
if
(
!
hr
->
rem_set
()
->
verify_ready_for_par_iteration
())
{
return
"the region's RSet should be ready for parallel iteration"
;
}
return
FreeRegionList
::
verify_region_extra
(
hr
);
}
bool
MasterFreeRegionList
::
check_mt_safety
()
{
// Master Free List MT safety protocol:
// (a) If we're at a safepoint, operations on the master free list
...
...
src/share/vm/gc_implementation/g1/heapRegionSets.hpp
浏览文件 @
1c7f20f7
/*
*
c
opyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
*
C
opyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -44,6 +44,7 @@ public:
class
MasterFreeRegionList
:
public
FreeRegionList
{
protected:
virtual
const
char
*
verify_region_extra
(
HeapRegion
*
hr
);
virtual
bool
check_mt_safety
();
public:
...
...
src/share/vm/gc_implementation/g1/sparsePRT.cpp
浏览文件 @
1c7f20f7
...
...
@@ -481,8 +481,9 @@ size_t SparsePRT::mem_size() const {
bool
SparsePRT
::
add_card
(
RegionIdx_t
region_id
,
CardIdx_t
card_index
)
{
#if SPARSE_PRT_VERBOSE
gclog_or_tty
->
print_cr
(
" Adding card %d from region %d to region %d sparse."
,
card_index
,
region_id
,
_hr
->
hrs_index
());
gclog_or_tty
->
print_cr
(
" Adding card %d from region %d to region "
SIZE_FORMAT
" sparse."
,
card_index
,
region_id
,
_hr
->
hrs_index
());
#endif
if
(
_next
->
occupied_entries
()
*
2
>
_next
->
capacity
())
{
expand
();
...
...
@@ -533,8 +534,8 @@ void SparsePRT::expand() {
_next
=
new
RSHashTable
(
last
->
capacity
()
*
2
);
#if SPARSE_PRT_VERBOSE
gclog_or_tty
->
print_cr
(
" Expanded sparse table for
%d
to %d."
,
_hr
->
hrs_index
(),
_next
->
capacity
());
gclog_or_tty
->
print_cr
(
" Expanded sparse table for
"
SIZE_FORMAT
"
to %d."
,
_hr
->
hrs_index
(),
_next
->
capacity
());
#endif
for
(
size_t
i
=
0
;
i
<
last
->
capacity
();
i
++
)
{
SparsePRTEntry
*
e
=
last
->
entry
((
int
)
i
);
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
浏览文件 @
1c7f20f7
...
...
@@ -99,6 +99,18 @@ void VM_G1IncCollectionPause::doit() {
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
bool
res
=
g1h
->
g1_policy
()
->
force_initial_mark_if_outside_cycle
();
// The above routine returns true if we were able to force the
// next GC pause to be an initial mark; it returns false if a
// marking cycle is already in progress.
//
// If a marking cycle is already in progress just return and skip
// the pause - the requesting thread should block in doit_epilogue
// until the marking cycle is complete.
if
(
!
res
)
{
assert
(
_word_size
==
0
,
"ExplicitGCInvokesConcurrent shouldn't be allocating"
);
return
;
}
}
_pause_succeeded
=
...
...
src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp
浏览文件 @
1c7f20f7
...
...
@@ -182,12 +182,12 @@ size_t PSOldGen::contiguous_available() const {
// Allocation. We report all successful allocations to the size policy
// Note that the perm gen does not use this method, and should not!
HeapWord
*
PSOldGen
::
allocate
(
size_t
word_size
,
bool
is_tlab
)
{
HeapWord
*
PSOldGen
::
allocate
(
size_t
word_size
)
{
assert_locked_or_safepoint
(
Heap_lock
);
HeapWord
*
res
=
allocate_noexpand
(
word_size
,
is_tlab
);
HeapWord
*
res
=
allocate_noexpand
(
word_size
);
if
(
res
==
NULL
)
{
res
=
expand_and_allocate
(
word_size
,
is_tlab
);
res
=
expand_and_allocate
(
word_size
);
}
// Allocations in the old generation need to be reported
...
...
@@ -199,13 +199,12 @@ HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
return
res
;
}
HeapWord
*
PSOldGen
::
expand_and_allocate
(
size_t
word_size
,
bool
is_tlab
)
{
assert
(
!
is_tlab
,
"TLAB's are not supported in PSOldGen"
);
HeapWord
*
PSOldGen
::
expand_and_allocate
(
size_t
word_size
)
{
expand
(
word_size
*
HeapWordSize
);
if
(
GCExpandToAllocateDelayMillis
>
0
)
{
os
::
sleep
(
Thread
::
current
(),
GCExpandToAllocateDelayMillis
,
false
);
}
return
allocate_noexpand
(
word_size
,
is_tlab
);
return
allocate_noexpand
(
word_size
);
}
HeapWord
*
PSOldGen
::
expand_and_cas_allocate
(
size_t
word_size
)
{
...
...
src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/shared/allocationStats.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/shared/concurrentGCThread.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/shared/concurrentGCThread.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_interface/collectedHeap.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/gc_interface/collectedHeap.inline.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/memory/collectorPolicy.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/memory/collectorPolicy.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/memory/genCollectedHeap.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/memory/genCollectedHeap.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/memory/universe.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/memory/universe.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/oops/methodOop.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/oops/typeArrayKlass.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/prims/jni.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/runtime/arguments.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/runtime/atomic.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/runtime/atomic.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/runtime/globals.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/runtime/java.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/runtime/safepoint.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/runtime/thread.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/utilities/bitMap.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/utilities/ostream.cpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/utilities/ostream.hpp
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/utilities/quickSort.cpp
0 → 100644
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
src/share/vm/utilities/quickSort.hpp
0 → 100644
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
test/gc/6941923/test6941923.sh
0 → 100644
浏览文件 @
1c7f20f7
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录