Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
d2f0b980
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d2f0b980
编写于
6月 10, 2011
作者:
M
minqi
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
e5224dcd
f122f55f
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
561 addition
and
557 deletion
+561
-557
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+129
-145
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+31
-31
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+8
-10
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+6
-15
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+12
-17
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+13
-11
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+7
-5
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+204
-243
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
+113
-62
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
+33
-14
src/share/vm/gc_implementation/g1/sparsePRT.cpp
src/share/vm/gc_implementation/g1/sparsePRT.cpp
+5
-4
未找到文件。
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
d2f0b980
...
...
@@ -578,16 +578,16 @@ HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
}
if
(
res
==
NULL
&&
do_expand
)
{
if
(
expand
(
word_size
*
HeapWordSize
))
{
// The expansion succeeded and so we should have at least one
// region on the free list.
res
=
_free_list
.
remove_head
();
// Even though the heap was expanded, it might not have reached
// the desired size. So, we cannot assume that the allocation
// will succeed.
res
=
_free_list
.
remove_head_or_null
();
}
}
if
(
res
!=
NULL
)
{
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
"new alloc region %d:["
PTR_FORMAT
","
PTR_FORMAT
"], "
"top "
PTR_FORMAT
,
res
->
hrs_index
(),
res
->
bottom
(),
res
->
end
(),
res
->
top
());
gclog_or_tty
->
print_cr
(
"new alloc region "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
res
));
}
}
return
res
;
...
...
@@ -608,12 +608,12 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
return
alloc_region
;
}
in
t
G1CollectedHeap
::
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
)
{
size_
t
G1CollectedHeap
::
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
)
{
assert
(
isHumongous
(
word_size
),
"word_size should be humongous"
);
assert
(
num_regions
*
HeapRegion
::
GrainWords
>=
word_size
,
"pre-condition"
);
int
first
=
-
1
;
size_t
first
=
G1_NULL_HRS_INDEX
;
if
(
num_regions
==
1
)
{
// Only one region to allocate, no need to go through the slower
// path. The caller will attempt the expasion if this fails, so
...
...
@@ -622,7 +622,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
if
(
hr
!=
NULL
)
{
first
=
hr
->
hrs_index
();
}
else
{
first
=
-
1
;
first
=
G1_NULL_HRS_INDEX
;
}
}
else
{
// We can't allocate humongous regions while cleanupComplete() is
...
...
@@ -637,10 +637,10 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
append_secondary_free_list_if_not_empty_with_lock
();
if
(
free_regions
()
>=
num_regions
)
{
first
=
_hrs
->
find_contiguous
(
num_regions
);
if
(
first
!=
-
1
)
{
for
(
int
i
=
first
;
i
<
first
+
(
int
)
num_regions
;
++
i
)
{
HeapRegion
*
hr
=
_hrs
->
at
(
i
);
first
=
_hrs
.
find_contiguous
(
num_regions
);
if
(
first
!=
G1_NULL_HRS_INDEX
)
{
for
(
size_t
i
=
first
;
i
<
first
+
num_regions
;
++
i
)
{
HeapRegion
*
hr
=
region_
at
(
i
);
assert
(
hr
->
is_empty
(),
"sanity"
);
assert
(
is_on_master_free_list
(
hr
),
"sanity"
);
hr
->
set_pending_removal
(
true
);
...
...
@@ -653,15 +653,15 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
}
HeapWord
*
G1CollectedHeap
::
humongous_obj_allocate_initialize_regions
(
in
t
first
,
G1CollectedHeap
::
humongous_obj_allocate_initialize_regions
(
size_
t
first
,
size_t
num_regions
,
size_t
word_size
)
{
assert
(
first
!=
-
1
,
"pre-condition"
);
assert
(
first
!=
G1_NULL_HRS_INDEX
,
"pre-condition"
);
assert
(
isHumongous
(
word_size
),
"word_size should be humongous"
);
assert
(
num_regions
*
HeapRegion
::
GrainWords
>=
word_size
,
"pre-condition"
);
// Index of last region in the series + 1.
int
last
=
first
+
(
int
)
num_regions
;
size_t
last
=
first
+
num_regions
;
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
...
...
@@ -676,7 +676,7 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
assert
(
word_size
<=
word_size_sum
,
"sanity"
);
// This will be the "starts humongous" region.
HeapRegion
*
first_hr
=
_hrs
->
at
(
first
);
HeapRegion
*
first_hr
=
region_
at
(
first
);
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord
*
new_obj
=
first_hr
->
bottom
();
...
...
@@ -711,8 +711,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion
*
hr
=
NULL
;
for
(
in
t
i
=
first
+
1
;
i
<
last
;
++
i
)
{
hr
=
_hrs
->
at
(
i
);
for
(
size_
t
i
=
first
+
1
;
i
<
last
;
++
i
)
{
hr
=
region_
at
(
i
);
hr
->
set_continuesHumongous
(
first_hr
);
}
// If we have "continues humongous" regions (hr != NULL), then the
...
...
@@ -746,8 +746,8 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(int first,
// last one) is actually used when we will free up the humongous
// region in free_humongous_region().
hr
=
NULL
;
for
(
in
t
i
=
first
+
1
;
i
<
last
;
++
i
)
{
hr
=
_hrs
->
at
(
i
);
for
(
size_
t
i
=
first
+
1
;
i
<
last
;
++
i
)
{
hr
=
region_
at
(
i
);
if
((
i
+
1
)
==
last
)
{
// last continues humongous region
assert
(
hr
->
bottom
()
<
new_top
&&
new_top
<=
hr
->
end
(),
...
...
@@ -783,9 +783,9 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
size_t
num_regions
=
round_to
(
word_size
,
HeapRegion
::
GrainWords
)
/
HeapRegion
::
GrainWords
;
size_t
x_size
=
expansion_regions
();
size_t
fs
=
_hrs
->
free_suffix
();
in
t
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
if
(
first
==
-
1
)
{
size_t
fs
=
_hrs
.
free_suffix
();
size_
t
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
if
(
first
==
G1_NULL_HRS_INDEX
)
{
// The only thing we can do now is attempt expansion.
if
(
fs
+
x_size
>=
num_regions
)
{
// If the number of regions we're trying to allocate for this
...
...
@@ -799,16 +799,16 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert
(
num_regions
>
fs
,
"earlier allocation should have succeeded"
);
if
(
expand
((
num_regions
-
fs
)
*
HeapRegion
::
GrainBytes
))
{
// Even though the heap was expanded, it might not have
// reached the desired size. So, we cannot assume that the
// allocation will succeed.
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
// If the expansion was successful then the allocation
// should have been successful.
assert
(
first
!=
-
1
,
"this should have worked"
);
}
}
}
HeapWord
*
result
=
NULL
;
if
(
first
!=
-
1
)
{
if
(
first
!=
G1_NULL_HRS_INDEX
)
{
result
=
humongous_obj_allocate_initialize_regions
(
first
,
num_regions
,
word_size
);
assert
(
result
!=
NULL
,
"it should always return a valid result"
);
...
...
@@ -1366,6 +1366,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Update the number of full collections that have been completed.
increment_full_collections_completed
(
false
/* concurrent */
);
_hrs
.
verify_optional
();
verify_region_sets_optional
();
if
(
PrintHeapAtGC
)
{
...
...
@@ -1589,6 +1590,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
size_t
expand_bytes
=
MAX2
(
word_size
*
HeapWordSize
,
MinHeapDeltaBytes
);
if
(
expand
(
expand_bytes
))
{
_hrs
.
verify_optional
();
verify_region_sets_optional
();
return
attempt_allocation_at_safepoint
(
word_size
,
false
/* expect_null_mutator_alloc_region */
);
...
...
@@ -1596,6 +1598,19 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
return
NULL
;
}
void
G1CollectedHeap
::
update_committed_space
(
HeapWord
*
old_end
,
HeapWord
*
new_end
)
{
assert
(
old_end
!=
new_end
,
"don't call this otherwise"
);
assert
((
HeapWord
*
)
_g1_storage
.
high
()
==
new_end
,
"invariant"
);
// Update the committed mem region.
_g1_committed
.
set_end
(
new_end
);
// Tell the card table about the update.
Universe
::
heap
()
->
barrier_set
()
->
resize_covered_region
(
_g1_committed
);
// Tell the BOT about the update.
_bot_shared
->
resize
(
_g1_committed
.
word_size
());
}
bool
G1CollectedHeap
::
expand
(
size_t
expand_bytes
)
{
size_t
old_mem_size
=
_g1_storage
.
committed_size
();
size_t
aligned_expand_bytes
=
ReservedSpace
::
page_align_size_up
(
expand_bytes
);
...
...
@@ -1607,47 +1622,37 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
old_mem_size
/
K
,
aligned_expand_bytes
/
K
);
}
HeapWord
*
old_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
// First commit the memory.
HeapWord
*
old_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
bool
successful
=
_g1_storage
.
expand_by
(
aligned_expand_bytes
);
if
(
successful
)
{
HeapWord
*
new_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
// Expand the committed region.
_g1_committed
.
set_end
(
new_end
);
// Tell the cardtable about the expansion.
Universe
::
heap
()
->
barrier_set
()
->
resize_covered_region
(
_g1_committed
);
// And the offset table as well.
_bot_shared
->
resize
(
_g1_committed
.
word_size
());
expand_bytes
=
aligned_expand_bytes
;
HeapWord
*
base
=
old_end
;
// Create the heap regions for [old_end, new_end)
while
(
expand_bytes
>
0
)
{
HeapWord
*
high
=
base
+
HeapRegion
::
GrainWords
;
// Create a new HeapRegion.
MemRegion
mr
(
base
,
high
);
bool
is_zeroed
=
!
_g1_max_committed
.
contains
(
base
);
HeapRegion
*
hr
=
new
HeapRegion
(
_bot_shared
,
mr
,
is_zeroed
);
// Add it to the HeapRegionSeq.
_hrs
->
insert
(
hr
);
_free_list
.
add_as_tail
(
hr
);
// And we used up an expansion region to create it.
_expansion_regions
--
;
expand_bytes
-=
HeapRegion
::
GrainBytes
;
base
+=
HeapRegion
::
GrainWords
;
// Then propagate this update to the necessary data structures.
HeapWord
*
new_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
update_committed_space
(
old_end
,
new_end
);
FreeRegionList
expansion_list
(
"Local Expansion List"
);
MemRegion
mr
=
_hrs
.
expand_by
(
old_end
,
new_end
,
&
expansion_list
);
assert
(
mr
.
start
()
==
old_end
,
"post-condition"
);
// mr might be a smaller region than what was requested if
// expand_by() was unable to allocate the HeapRegion instances
assert
(
mr
.
end
()
<=
new_end
,
"post-condition"
);
size_t
actual_expand_bytes
=
mr
.
byte_size
();
assert
(
actual_expand_bytes
<=
aligned_expand_bytes
,
"post-condition"
);
assert
(
actual_expand_bytes
==
expansion_list
.
total_capacity_bytes
(),
"post-condition"
);
if
(
actual_expand_bytes
<
aligned_expand_bytes
)
{
// We could not expand _hrs to the desired size. In this case we
// need to shrink the committed space accordingly.
assert
(
mr
.
end
()
<
new_end
,
"invariant"
);
size_t
diff_bytes
=
aligned_expand_bytes
-
actual_expand_bytes
;
// First uncommit the memory.
_g1_storage
.
shrink_by
(
diff_bytes
);
// Then propagate this update to the necessary data structures.
update_committed_space
(
new_end
,
mr
.
end
());
}
assert
(
base
==
new_end
,
"sanity"
);
// Now update max_committed if necessary.
_g1_max_committed
.
set_end
(
MAX2
(
_g1_max_committed
.
end
(),
new_end
));
_free_list
.
add_as_tail
(
&
expansion_list
);
}
else
{
// The expansion of the virtual storage space was unsuccessful.
// Let's see if it was because we ran out of swap.
...
...
@@ -1667,37 +1672,31 @@ bool G1CollectedHeap::expand(size_t expand_bytes) {
return
successful
;
}
void
G1CollectedHeap
::
shrink_helper
(
size_t
shrink_bytes
)
{
void
G1CollectedHeap
::
shrink_helper
(
size_t
shrink_bytes
)
{
size_t
old_mem_size
=
_g1_storage
.
committed_size
();
size_t
aligned_shrink_bytes
=
ReservedSpace
::
page_align_size_down
(
shrink_bytes
);
aligned_shrink_bytes
=
align_size_down
(
aligned_shrink_bytes
,
HeapRegion
::
GrainBytes
);
size_t
num_regions_deleted
=
0
;
MemRegion
mr
=
_hrs
->
shrink_by
(
aligned_shrink_bytes
,
num_regions_deleted
);
assert
(
mr
.
end
()
==
(
HeapWord
*
)
_g1_storage
.
high
(),
"Bad shrink!
"
);
if
(
mr
.
byte_size
()
>
0
)
MemRegion
mr
=
_hrs
.
shrink_by
(
aligned_shrink_bytes
,
&
num_regions_deleted
);
HeapWord
*
old_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
assert
(
mr
.
end
()
==
old_end
,
"post-condition
"
);
if
(
mr
.
byte_size
()
>
0
)
{
_g1_storage
.
shrink_by
(
mr
.
byte_size
());
assert
(
mr
.
start
()
==
(
HeapWord
*
)
_g1_storage
.
high
(),
"Bad shrink!"
);
_g1_committed
.
set_end
(
mr
.
start
());
_expansion_regions
+=
num_regions_deleted
;
// Tell the cardtable about it.
Universe
::
heap
()
->
barrier_set
()
->
resize_covered_region
(
_g1_committed
);
// And the offset table as well.
_bot_shared
->
resize
(
_g1_committed
.
word_size
());
HeapRegionRemSet
::
shrink_heap
(
n_regions
());
if
(
Verbose
&&
PrintGC
)
{
size_t
new_mem_size
=
_g1_storage
.
committed_size
();
gclog_or_tty
->
print_cr
(
"Shrinking garbage-first heap from %ldK by %ldK to %ldK"
,
old_mem_size
/
K
,
aligned_shrink_bytes
/
K
,
new_mem_size
/
K
);
HeapWord
*
new_end
=
(
HeapWord
*
)
_g1_storage
.
high
();
assert
(
mr
.
start
()
==
new_end
,
"post-condition"
);
_expansion_regions
+=
num_regions_deleted
;
update_committed_space
(
old_end
,
new_end
);
HeapRegionRemSet
::
shrink_heap
(
n_regions
());
if
(
Verbose
&&
PrintGC
)
{
size_t
new_mem_size
=
_g1_storage
.
committed_size
();
gclog_or_tty
->
print_cr
(
"Shrinking garbage-first heap from %ldK by %ldK to %ldK"
,
old_mem_size
/
K
,
aligned_shrink_bytes
/
K
,
new_mem_size
/
K
);
}
}
}
...
...
@@ -1712,6 +1711,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
shrink_helper
(
shrink_bytes
);
rebuild_region_lists
();
_hrs
.
verify_optional
();
verify_region_sets_optional
();
}
...
...
@@ -1890,9 +1890,9 @@ jint G1CollectedHeap::initialize() {
_g1_storage
.
initialize
(
g1_rs
,
0
);
_g1_committed
=
MemRegion
((
HeapWord
*
)
_g1_storage
.
low
(),
(
size_t
)
0
);
_
g1_max_committed
=
_g1_committed
;
_hrs
=
new
HeapRegionSeq
(
_expansion_regions
);
guarantee
(
_hrs
!=
NULL
,
"Couldn't allocate HeapRegionSeq"
);
_
hrs
.
initialize
((
HeapWord
*
)
_g1_reserved
.
start
(),
(
HeapWord
*
)
_g1_reserved
.
end
(),
_expansion_regions
);
// 6843694 - ensure that the maximum region index can fit
// in the remembered set structures.
...
...
@@ -1991,8 +1991,9 @@ jint G1CollectedHeap::initialize() {
// Here we allocate the dummy full region that is required by the
// G1AllocRegion class. If we don't pass an address in the reserved
// space here, lots of asserts fire.
MemRegion
mr
(
_g1_reserved
.
start
(),
HeapRegion
::
GrainWords
);
HeapRegion
*
dummy_region
=
new
HeapRegion
(
_bot_shared
,
mr
,
true
);
HeapRegion
*
dummy_region
=
new_heap_region
(
0
/* index of bottom region */
,
_g1_reserved
.
start
());
// We'll re-use the same region whether the alloc region will
// require BOT updates or not and, if it doesn't, then a non-young
// region will complain that it cannot support allocations without
...
...
@@ -2100,7 +2101,7 @@ public:
size_t
G1CollectedHeap
::
recalculate_used
()
const
{
SumUsedClosure
blk
;
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
return
blk
.
result
();
}
...
...
@@ -2120,7 +2121,7 @@ public:
size_t
G1CollectedHeap
::
recalculate_used_regions
()
const
{
SumUsedRegionsClosure
blk
;
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
return
blk
.
result
();
}
#endif // PRODUCT
...
...
@@ -2285,8 +2286,8 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
}
bool
G1CollectedHeap
::
is_in
(
const
void
*
p
)
const
{
if
(
_g1_committed
.
contains
(
p
))
{
HeapRegion
*
hr
=
_hrs
->
addr_to_region
(
p
);
HeapRegion
*
hr
=
_hrs
.
addr_to_region
((
HeapWord
*
)
p
);
if
(
hr
!=
NULL
)
{
return
hr
->
is_in
(
p
);
}
else
{
return
_perm_gen
->
as_gen
()
->
is_in
(
p
);
...
...
@@ -2314,7 +2315,7 @@ public:
void
G1CollectedHeap
::
oop_iterate
(
OopClosure
*
cl
,
bool
do_perm
)
{
IterateOopClosureRegionClosure
blk
(
_g1_committed
,
cl
);
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
if
(
do_perm
)
{
perm_gen
()
->
oop_iterate
(
cl
);
}
...
...
@@ -2322,7 +2323,7 @@ void G1CollectedHeap::oop_iterate(OopClosure* cl, bool do_perm) {
void
G1CollectedHeap
::
oop_iterate
(
MemRegion
mr
,
OopClosure
*
cl
,
bool
do_perm
)
{
IterateOopClosureRegionClosure
blk
(
mr
,
cl
);
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
if
(
do_perm
)
{
perm_gen
()
->
oop_iterate
(
cl
);
}
...
...
@@ -2344,7 +2345,7 @@ public:
void
G1CollectedHeap
::
object_iterate
(
ObjectClosure
*
cl
,
bool
do_perm
)
{
IterateObjectClosureRegionClosure
blk
(
cl
);
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
if
(
do_perm
)
{
perm_gen
()
->
object_iterate
(
cl
);
}
...
...
@@ -2369,25 +2370,18 @@ public:
void
G1CollectedHeap
::
space_iterate
(
SpaceClosure
*
cl
)
{
SpaceClosureRegionClosure
blk
(
cl
);
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
}
void
G1CollectedHeap
::
heap_region_iterate
(
HeapRegionClosure
*
cl
)
{
_hrs
->
iterate
(
cl
);
void
G1CollectedHeap
::
heap_region_iterate
(
HeapRegionClosure
*
cl
)
const
{
_hrs
.
iterate
(
cl
);
}
void
G1CollectedHeap
::
heap_region_iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
cl
)
{
_hrs
->
iterate_from
(
r
,
cl
);
HeapRegionClosure
*
cl
)
const
{
_hrs
.
iterate_from
(
r
,
cl
);
}
void
G1CollectedHeap
::
heap_region_iterate_from
(
int
idx
,
HeapRegionClosure
*
cl
)
{
_hrs
->
iterate_from
(
idx
,
cl
);
}
HeapRegion
*
G1CollectedHeap
::
region_at
(
size_t
idx
)
{
return
_hrs
->
at
(
idx
);
}
void
G1CollectedHeap
::
heap_region_par_iterate_chunked
(
HeapRegionClosure
*
cl
,
int
worker
,
...
...
@@ -2568,7 +2562,7 @@ void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
}
CompactibleSpace
*
G1CollectedHeap
::
first_compactible_space
()
{
return
_hrs
->
length
()
>
0
?
_hrs
->
at
(
0
)
:
NULL
;
return
n_regions
()
>
0
?
region_
at
(
0
)
:
NULL
;
}
...
...
@@ -2881,7 +2875,7 @@ void G1CollectedHeap::verify(bool allow_dirty,
"sanity check"
);
}
else
{
VerifyRegionClosure
blk
(
allow_dirty
,
false
,
use_prev_marking
);
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
if
(
blk
.
failures
())
{
failures
=
true
;
}
...
...
@@ -2950,7 +2944,7 @@ void G1CollectedHeap::print_on(outputStream* st, bool extended) const {
void
G1CollectedHeap
::
print_on_extended
(
outputStream
*
st
)
const
{
PrintRegionClosure
blk
(
st
);
_hrs
->
iterate
(
&
blk
);
heap_region_
iterate
(
&
blk
);
}
void
G1CollectedHeap
::
print_gc_threads_on
(
outputStream
*
st
)
const
{
...
...
@@ -2989,15 +2983,6 @@ void G1CollectedHeap::print_tracing_info() const {
SpecializationStats
::
print
();
}
int
G1CollectedHeap
::
addr_to_arena_id
(
void
*
addr
)
const
{
HeapRegion
*
hr
=
heap_region_containing
(
addr
);
if
(
hr
==
NULL
)
{
return
0
;
}
else
{
return
1
;
}
}
G1CollectedHeap
*
G1CollectedHeap
::
heap
()
{
assert
(
_sh
->
kind
()
==
CollectedHeap
::
G1CollectedHeap
,
"not a garbage-first heap"
);
...
...
@@ -3477,6 +3462,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
}
}
_hrs
.
verify_optional
();
verify_region_sets_optional
();
TASKQUEUE_STATS_ONLY
(
if
(
ParallelGCVerbose
)
print_taskqueue_stats
());
...
...
@@ -3609,8 +3595,8 @@ class FindGCAllocRegion: public HeapRegionClosure {
public:
bool
doHeapRegion
(
HeapRegion
*
r
)
{
if
(
r
->
is_gc_alloc_region
())
{
gclog_or_tty
->
print_cr
(
"Region
%d ["
PTR_FORMAT
"...] is still a gc_alloc_region.
"
,
r
->
hrs_index
(),
r
->
bottom
(
));
gclog_or_tty
->
print_cr
(
"Region
"
HR_FORMAT
" is still a GC alloc region
"
,
HR_FORMAT_PARAMS
(
r
));
}
return
false
;
}
...
...
@@ -3695,9 +3681,8 @@ void G1CollectedHeap::get_gc_alloc_regions() {
// the region was retained from the last collection
++
_gc_alloc_region_counts
[
ap
];
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
"new alloc region %d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
,
alloc_region
->
hrs_index
(),
alloc_region
->
bottom
(),
alloc_region
->
end
(),
alloc_region
->
top
());
gclog_or_tty
->
print_cr
(
"new alloc region "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
alloc_region
));
}
}
...
...
@@ -4908,10 +4893,10 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
hr
->
set_notHumongous
();
free_region
(
hr
,
&
hr_pre_used
,
free_list
,
par
);
in
t
i
=
hr
->
hrs_index
()
+
1
;
size_
t
i
=
hr
->
hrs_index
()
+
1
;
size_t
num
=
1
;
while
(
(
size_t
)
i
<
n_regions
())
{
HeapRegion
*
curr_hr
=
_hrs
->
at
(
i
);
while
(
i
<
n_regions
())
{
HeapRegion
*
curr_hr
=
region_
at
(
i
);
if
(
!
curr_hr
->
continuesHumongous
())
{
break
;
}
...
...
@@ -5271,16 +5256,6 @@ void G1CollectedHeap::wait_while_free_regions_coming() {
}
}
size_t
G1CollectedHeap
::
n_regions
()
{
return
_hrs
->
length
();
}
size_t
G1CollectedHeap
::
max_regions
()
{
return
(
size_t
)
align_size_up
(
max_capacity
(),
HeapRegion
::
GrainBytes
)
/
HeapRegion
::
GrainBytes
;
}
void
G1CollectedHeap
::
set_region_short_lived_locked
(
HeapRegion
*
hr
)
{
assert
(
heap_lock_held_for_gc
(),
"the heap lock should already be held by or for this thread"
);
...
...
@@ -5477,6 +5452,15 @@ public:
}
};
HeapRegion
*
G1CollectedHeap
::
new_heap_region
(
size_t
hrs_index
,
HeapWord
*
bottom
)
{
HeapWord
*
end
=
bottom
+
HeapRegion
::
GrainWords
;
MemRegion
mr
(
bottom
,
end
);
assert
(
_g1_reserved
.
contains
(
mr
),
"invariant"
);
// This might return NULL if the allocation fails
return
new
HeapRegion
(
hrs_index
,
_bot_shared
,
mr
,
true
/* is_zeroed */
);
}
void
G1CollectedHeap
::
verify_region_sets
()
{
assert_heap_locked_or_at_safepoint
(
true
/* should_be_vm_thread */
);
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
d2f0b980
...
...
@@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
...
...
@@ -42,7 +43,6 @@
// heap subsets that will yield large amounts of garbage.
class
HeapRegion
;
class
HeapRegionSeq
;
class
HRRSCleanupTask
;
class
PermanentGenerationSpec
;
class
GenerationSpec
;
...
...
@@ -196,9 +196,6 @@ private:
// The part of _g1_storage that is currently committed.
MemRegion
_g1_committed
;
// The maximum part of _g1_storage that has ever been committed.
MemRegion
_g1_max_committed
;
// The master free list. It will satisfy all new region allocations.
MasterFreeRegionList
_free_list
;
...
...
@@ -222,7 +219,7 @@ private:
void
rebuild_region_lists
();
// The sequence of all heap regions in the heap.
HeapRegionSeq
*
_hrs
;
HeapRegionSeq
_hrs
;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion
_mutator_alloc_region
;
...
...
@@ -421,13 +418,15 @@ protected:
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful.
int
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
);
// index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful.
size_t
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord
*
humongous_obj_allocate_initialize_regions
(
in
t
first
,
HeapWord
*
humongous_obj_allocate_initialize_regions
(
size_
t
first
,
size_t
num_regions
,
size_t
word_size
);
...
...
@@ -587,8 +586,8 @@ public:
void
register_region_with_in_cset_fast_test
(
HeapRegion
*
r
)
{
assert
(
_in_cset_fast_test_base
!=
NULL
,
"sanity"
);
assert
(
r
->
in_collection_set
(),
"invariant"
);
in
t
index
=
r
->
hrs_index
();
assert
(
0
<=
index
&&
(
size_t
)
index
<
_in_cset_fast_test_length
,
"invariant"
);
size_
t
index
=
r
->
hrs_index
();
assert
(
index
<
_in_cset_fast_test_length
,
"invariant"
);
assert
(
!
_in_cset_fast_test_base
[
index
],
"invariant"
);
_in_cset_fast_test_base
[
index
]
=
true
;
}
...
...
@@ -754,6 +753,11 @@ protected:
HumongousRegionSet
*
humongous_proxy_set
,
bool
par
);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void
update_committed_space
(
HeapWord
*
old_end
,
HeapWord
*
new_end
);
// The concurrent marker (and the thread it runs in.)
ConcurrentMark
*
_cm
;
ConcurrentMarkThread
*
_cmThread
;
...
...
@@ -816,7 +820,6 @@ protected:
oop
handle_evacuation_failure_par
(
OopsInHeapRegionClosure
*
cl
,
oop
obj
);
void
handle_evacuation_failure_common
(
oop
obj
,
markOop
m
);
// Ensure that the relevant gc_alloc regions are set.
void
get_gc_alloc_regions
();
// We're done with GC alloc regions. We are going to tear down the
...
...
@@ -967,15 +970,13 @@ public:
}
// The total number of regions in the heap.
size_t
n_regions
()
;
size_t
n_regions
()
{
return
_hrs
.
length
();
}
// The
number of regions that are completely free
.
size_t
max_regions
()
;
// The
max number of regions in the heap
.
size_t
max_regions
()
{
return
_hrs
.
max_length
();
}
// The number of regions that are completely free.
size_t
free_regions
()
{
return
_free_list
.
length
();
}
size_t
free_regions
()
{
return
_free_list
.
length
();
}
// The number of regions that are not completely free.
size_t
used_regions
()
{
return
n_regions
()
-
free_regions
();
}
...
...
@@ -983,6 +984,10 @@ public:
// The number of regions available for "regular" expansion.
size_t
expansion_regions
()
{
return
_expansion_regions
;
}
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion
*
new_heap_region
(
size_t
hrs_index
,
HeapWord
*
bottom
);
void
verify_not_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
void
verify_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
void
verify_dirty_young_list
(
HeapRegion
*
head
)
PRODUCT_RETURN
;
...
...
@@ -1144,17 +1149,15 @@ public:
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void
heap_region_iterate
(
HeapRegionClosure
*
blk
);
void
heap_region_iterate
(
HeapRegionClosure
*
blk
)
const
;
// Iterate over heap regions starting with r (or the first region if "r"
// is NULL), in address order, terminating early if the "doHeapRegion"
// method returns "true".
void
heap_region_iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
);
void
heap_region_iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
)
const
;
// As above but starting from the region at index idx.
void
heap_region_iterate_from
(
int
idx
,
HeapRegionClosure
*
blk
);
HeapRegion
*
region_at
(
size_t
idx
);
// Return the region with the given index. It assumes the index is valid.
HeapRegion
*
region_at
(
size_t
index
)
const
{
return
_hrs
.
at
(
index
);
}
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
...
...
@@ -1195,12 +1198,14 @@ public:
// A G1CollectedHeap will contain some number of heap regions. This
// finds the region containing a given address, or else returns NULL.
HeapRegion
*
heap_region_containing
(
const
void
*
addr
)
const
;
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing
(
const
T
addr
)
const
;
// Like the above, but requires "addr" to be in the heap (to avoid a
// null-check), and unlike the above, may return an continuing humongous
// region.
HeapRegion
*
heap_region_containing_raw
(
const
void
*
addr
)
const
;
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing_raw
(
const
T
addr
)
const
;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
...
...
@@ -1262,7 +1267,7 @@ public:
return
true
;
}
bool
is_in_young
(
oop
obj
)
{
bool
is_in_young
(
const
oop
obj
)
{
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
return
hr
!=
NULL
&&
hr
->
is_young
();
}
...
...
@@ -1368,11 +1373,6 @@ public:
// Override
void
print_tracing_info
()
const
;
// If "addr" is a pointer into the (reserved?) heap, returns a positive
// number indicating the "arena" within the heap in which "addr" falls.
// Or else returns 0.
virtual
int
addr_to_arena_id
(
void
*
addr
)
const
;
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static
G1CollectedHeap
*
heap
();
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
浏览文件 @
d2f0b980
...
...
@@ -34,9 +34,10 @@
// Inline functions for G1CollectedHeap
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing
(
const
void
*
addr
)
const
{
HeapRegion
*
hr
=
_hrs
->
addr_to_region
(
addr
);
G1CollectedHeap
::
heap_region_containing
(
const
T
addr
)
const
{
HeapRegion
*
hr
=
_hrs
.
addr_to_region
((
HeapWord
*
)
addr
);
// hr can be null if addr in perm_gen
if
(
hr
!=
NULL
&&
hr
->
continuesHumongous
())
{
hr
=
hr
->
humongous_start_region
();
...
...
@@ -44,19 +45,16 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
return
hr
;
}
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing_raw
(
const
void
*
addr
)
const
{
assert
(
_g1_reserved
.
contains
(
addr
),
"invariant"
);
size_t
index
=
pointer_delta
(
addr
,
_g1_reserved
.
start
(),
1
)
>>
HeapRegion
::
LogOfHRGrainBytes
;
HeapRegion
*
res
=
_hrs
->
at
(
index
);
assert
(
res
==
_hrs
->
addr_to_region
(
addr
),
"sanity"
);
G1CollectedHeap
::
heap_region_containing_raw
(
const
T
addr
)
const
{
assert
(
_g1_reserved
.
contains
((
const
void
*
)
addr
),
"invariant"
);
HeapRegion
*
res
=
_hrs
.
addr_to_region_unsafe
((
HeapWord
*
)
addr
);
return
res
;
}
inline
bool
G1CollectedHeap
::
obj_in_cs
(
oop
obj
)
{
HeapRegion
*
r
=
_hrs
->
addr_to_region
(
obj
);
HeapRegion
*
r
=
_hrs
.
addr_to_region
((
HeapWord
*
)
obj
);
return
r
!=
NULL
&&
r
->
in_collection_set
();
}
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
d2f0b980
...
...
@@ -2639,11 +2639,8 @@ add_to_collection_set(HeapRegion* hr) {
assert
(
!
hr
->
is_young
(),
"non-incremental add of young region"
);
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
"added region to cset "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
hr
->
is_young
()
?
"YOUNG"
:
"NOT_YOUNG"
);
gclog_or_tty
->
print_cr
(
"added region to cset "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
hr
));
}
if
(
_g1
->
mark_in_progress
())
...
...
@@ -2813,11 +2810,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
_inc_cset_tail
=
hr
;
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
" added region to incremental cset (RHS) "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", young %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
(
hr
->
is_young
())
?
"YES"
:
"NO"
);
gclog_or_tty
->
print_cr
(
" added region to incremental cset (RHS) "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
hr
));
}
}
...
...
@@ -2838,11 +2832,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
_inc_cset_head
=
hr
;
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
" added region to incremental cset (LHS) "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", young %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
(
hr
->
is_young
())
?
"YES"
:
"NO"
);
gclog_or_tty
->
print_cr
(
" added region to incremental cset (LHS) "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
hr
));
}
}
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
d2f0b980
...
...
@@ -159,20 +159,16 @@ public:
gclog_or_tty
->
print_cr
(
"----------"
);
}
gclog_or_tty
->
print_cr
(
"Missing rem set entry:"
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" of obj "
PTR_FORMAT
", in region %d ["
PTR_FORMAT
", "
PTR_FORMAT
"),"
,
p
,
(
void
*
)
_containing_obj
,
from
->
hrs_index
(),
from
->
bottom
(),
from
->
end
());
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" "
"of obj "
PTR_FORMAT
", "
"in region "
HR_FORMAT
,
p
,
(
void
*
)
_containing_obj
,
HR_FORMAT_PARAMS
(
from
));
_containing_obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" in region %d ["
PTR_FORMAT
", "
PTR_FORMAT
")."
,
(
void
*
)
obj
,
to
->
hrs_index
(),
to
->
bottom
(),
to
->
end
());
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" "
"in region "
HR_FORMAT
,
(
void
*
)
obj
,
HR_FORMAT_PARAMS
(
to
));
obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"Obj head CTE = %d, field CTE = %d."
,
cv_obj
,
cv_field
);
...
...
@@ -484,11 +480,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
HeapRegion
::
HeapRegion
(
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
)
HeapRegion
(
size_t
hrs_index
,
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
)
:
G1OffsetTableContigSpace
(
sharedOffsetArray
,
mr
,
is_zeroed
),
_next_fk
(
HeapRegionDCTOC
::
NoFilterKind
),
_hrs_index
(
-
1
),
_next_fk
(
HeapRegionDCTOC
::
NoFilterKind
),
_hrs_index
(
hrs_index
),
_humongous_type
(
NotHumongous
),
_humongous_start_region
(
NULL
),
_in_collection_set
(
false
),
_is_gc_alloc_region
(
false
),
_next_in_special_set
(
NULL
),
_orig_end
(
NULL
),
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
d2f0b980
...
...
@@ -52,9 +52,11 @@ class HeapRegionRemSetIterator;
class
HeapRegion
;
class
HeapRegionSetBase
;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
(_hr_)->top(), (_hr_)->end()
#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
(_hr_)->hrs_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
...
...
@@ -237,9 +239,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetArrayContigSpace
*
offsets
()
{
return
&
_offsets
;
}
protected:
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int
_hrs_index
;
// The index of this region in the heap region sequence.
size_t
_hrs_index
;
HumongousType
_humongous_type
;
// For a humongous region, region in which it starts.
...
...
@@ -296,8 +297,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
enum
YoungType
{
NotYoung
,
// a region is not young
Young
,
// a region is young
Survivor
// a region is young and it contains
// survivor
Survivor
// a region is young and it contains survivors
};
volatile
YoungType
_young_type
;
...
...
@@ -351,7 +351,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion
(
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
HeapRegion
(
size_t
hrs_index
,
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
);
static
int
LogOfHRGrainBytes
;
...
...
@@ -393,8 +394,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int
hrs_index
()
const
{
return
_hrs_index
;
}
void
set_hrs_index
(
int
index
)
{
_hrs_index
=
index
;
}
size_t
hrs_index
()
const
{
return
_hrs_index
;
}
// The number of bytes marked live in the region in the last marking phase.
size_t
marked_bytes
()
{
return
_prev_marked_bytes
;
}
...
...
@@ -579,6 +579,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
void
set_next_dirty_cards_region
(
HeapRegion
*
hr
)
{
_next_dirty_cards_region
=
hr
;
}
bool
is_on_dirty_cards_region_list
()
const
{
return
get_next_dirty_cards_region
()
!=
NULL
;
}
HeapWord
*
orig_end
()
{
return
_orig_end
;
}
// Allows logical separation between objects allocated before and after.
void
save_marks
();
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
浏览文件 @
d2f0b980
...
...
@@ -834,7 +834,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
#endif
// Set the corresponding coarse bit.
in
t
max_hrs_index
=
max
->
hr
()
->
hrs_index
();
size_
t
max_hrs_index
=
max
->
hr
()
->
hrs_index
();
if
(
!
_coarse_map
.
at
(
max_hrs_index
))
{
_coarse_map
.
at_put
(
max_hrs_index
,
true
);
_n_coarse_entries
++
;
...
...
@@ -860,7 +860,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{
// First eliminated garbage regions from the coarse map.
if
(
G1RSScrubVerbose
)
gclog_or_tty
->
print_cr
(
"Scrubbing region %d:"
,
hr
()
->
hrs_index
());
gclog_or_tty
->
print_cr
(
"Scrubbing region "
SIZE_FORMAT
":"
,
hr
()
->
hrs_index
());
assert
(
_coarse_map
.
size
()
==
region_bm
->
size
(),
"Precondition"
);
if
(
G1RSScrubVerbose
)
...
...
@@ -878,7 +879,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
PosParPRT
*
nxt
=
cur
->
next
();
// If the entire region is dead, eliminate.
if
(
G1RSScrubVerbose
)
gclog_or_tty
->
print_cr
(
" For other region %d:"
,
cur
->
hr
()
->
hrs_index
());
gclog_or_tty
->
print_cr
(
" For other region "
SIZE_FORMAT
":"
,
cur
->
hr
()
->
hrs_index
());
if
(
!
region_bm
->
at
(
cur
->
hr
()
->
hrs_index
()))
{
*
prev
=
nxt
;
cur
->
set_next
(
NULL
);
...
...
@@ -994,7 +996,7 @@ void OtherRegionsTable::clear() {
void
OtherRegionsTable
::
clear_incoming_entry
(
HeapRegion
*
from_hr
)
{
MutexLockerEx
x
(
&
_m
,
Mutex
::
_no_safepoint_check_flag
);
size_t
hrs_ind
=
(
size_t
)
from_hr
->
hrs_index
();
size_t
hrs_ind
=
from_hr
->
hrs_index
();
size_t
ind
=
hrs_ind
&
_mod_max_fine_entries_mask
;
if
(
del_single_region_table
(
ind
,
from_hr
))
{
assert
(
!
_coarse_map
.
at
(
hrs_ind
),
"Inv"
);
...
...
@@ -1002,7 +1004,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map
.
par_at_put
(
hrs_ind
,
0
);
}
// Check to see if any of the fcc entries come from here.
in
t
hr_ind
=
hr
()
->
hrs_index
();
size_
t
hr_ind
=
hr
()
->
hrs_index
();
for
(
int
tid
=
0
;
tid
<
HeapRegionRemSet
::
num_par_rem_sets
();
tid
++
)
{
int
fcc_ent
=
_from_card_cache
[
tid
][
hr_ind
];
if
(
fcc_ent
!=
-
1
)
{
...
...
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
浏览文件 @
d2f0b980
...
...
@@ -23,259 +23,182 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "memory/allocation.hpp"
// Local to this file.
static
int
orderRegions
(
HeapRegion
**
hr1p
,
HeapRegion
**
hr2p
)
{
if
((
*
hr1p
)
->
end
()
<=
(
*
hr2p
)
->
bottom
())
return
-
1
;
else
if
((
*
hr2p
)
->
end
()
<=
(
*
hr1p
)
->
bottom
())
return
1
;
else
if
(
*
hr1p
==
*
hr2p
)
return
0
;
else
{
assert
(
false
,
"We should never compare distinct overlapping regions."
);
}
return
0
;
}
HeapRegionSeq
::
HeapRegionSeq
(
const
size_t
max_size
)
:
_alloc_search_start
(
0
),
// The line below is the worst bit of C++ hackery I've ever written
// (Detlefs, 11/23). You should think of it as equivalent to
// "_regions(100, true)": initialize the growable array and inform it
// that it should allocate its elem array(s) on the C heap.
//
// The first argument, however, is actually a comma expression
// (set_allocation_type(this, C_HEAP), 100). The purpose of the
// set_allocation_type() call is to replace the default allocation
// type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
// allow to pass the assert in GenericGrowableArray() which checks
// that a growable array object must be on C heap if elements are.
//
// Note: containing object is allocated on C heap since it is CHeapObj.
//
_regions
((
ResourceObj
::
set_allocation_type
((
address
)
&
_regions
,
ResourceObj
::
C_HEAP
),
(
int
)
max_size
),
true
),
_next_rr_candidate
(
0
),
_seq_bottom
(
NULL
)
{}
// Private methods.
void
HeapRegionSeq
::
print_empty_runs
()
{
int
empty_run
=
0
;
int
n_empty
=
0
;
int
empty_run_start
;
for
(
int
i
=
0
;
i
<
_regions
.
length
();
i
++
)
{
HeapRegion
*
r
=
_regions
.
at
(
i
);
if
(
r
->
continuesHumongous
())
continue
;
if
(
r
->
is_empty
())
{
assert
(
!
r
->
isHumongous
(),
"H regions should not be empty."
);
if
(
empty_run
==
0
)
empty_run_start
=
i
;
empty_run
++
;
n_empty
++
;
}
else
{
if
(
empty_run
>
0
)
{
gclog_or_tty
->
print
(
" %d:%d"
,
empty_run_start
,
empty_run
);
empty_run
=
0
;
}
}
}
if
(
empty_run
>
0
)
{
gclog_or_tty
->
print
(
" %d:%d"
,
empty_run_start
,
empty_run
);
}
gclog_or_tty
->
print_cr
(
" [tot = %d]"
,
n_empty
);
}
int
HeapRegionSeq
::
find
(
HeapRegion
*
hr
)
{
// FIXME: optimized for adjacent regions of fixed size.
int
ind
=
hr
->
hrs_index
();
if
(
ind
!=
-
1
)
{
assert
(
_regions
.
at
(
ind
)
==
hr
,
"Mismatch"
);
}
return
ind
;
}
// Private
size_t
HeapRegionSeq
::
find_contiguous_from
(
size_t
from
,
size_t
num
)
{
size_t
len
=
length
();
assert
(
num
>
1
,
"use this only for sequences of length 2 or greater"
);
assert
(
from
<=
len
,
err_msg
(
"from: "
SIZE_FORMAT
" should be valid and <= than "
SIZE_FORMAT
,
from
,
len
));
// Public methods.
void
HeapRegionSeq
::
insert
(
HeapRegion
*
hr
)
{
assert
(
!
_regions
.
is_full
(),
"Too many elements in HeapRegionSeq"
);
if
(
_regions
.
length
()
==
0
||
_regions
.
top
()
->
end
()
<=
hr
->
bottom
())
{
hr
->
set_hrs_index
(
_regions
.
length
());
_regions
.
append
(
hr
);
}
else
{
_regions
.
append
(
hr
);
_regions
.
sort
(
orderRegions
);
for
(
int
i
=
0
;
i
<
_regions
.
length
();
i
++
)
{
_regions
.
at
(
i
)
->
set_hrs_index
(
i
);
}
}
char
*
bot
=
(
char
*
)
_regions
.
at
(
0
)
->
bottom
();
if
(
_seq_bottom
==
NULL
||
bot
<
_seq_bottom
)
_seq_bottom
=
bot
;
}
size_t
HeapRegionSeq
::
length
()
{
return
_regions
.
length
();
}
size_t
HeapRegionSeq
::
free_suffix
()
{
size_t
res
=
0
;
int
first
=
_regions
.
length
()
-
1
;
int
cur
=
first
;
while
(
cur
>=
0
&&
(
_regions
.
at
(
cur
)
->
is_empty
()
&&
(
first
==
cur
||
(
_regions
.
at
(
cur
+
1
)
->
bottom
()
==
_regions
.
at
(
cur
)
->
end
()))))
{
res
++
;
cur
--
;
}
return
res
;
}
int
HeapRegionSeq
::
find_contiguous_from
(
int
from
,
size_t
num
)
{
assert
(
num
>
1
,
"pre-condition"
);
assert
(
0
<=
from
&&
from
<=
_regions
.
length
(),
err_msg
(
"from: %d should be valid and <= than %d"
,
from
,
_regions
.
length
()));
int
curr
=
from
;
int
first
=
-
1
;
size_t
curr
=
from
;
size_t
first
=
G1_NULL_HRS_INDEX
;
size_t
num_so_far
=
0
;
while
(
curr
<
_regions
.
length
()
&&
num_so_far
<
num
)
{
HeapRegion
*
curr_hr
=
_regions
.
at
(
curr
);
if
(
curr_hr
->
is_empty
())
{
if
(
first
==
-
1
)
{
while
(
curr
<
len
&&
num_so_far
<
num
)
{
if
(
at
(
curr
)
->
is_empty
())
{
if
(
first
==
G1_NULL_HRS_INDEX
)
{
first
=
curr
;
num_so_far
=
1
;
}
else
{
num_so_far
+=
1
;
}
}
else
{
first
=
-
1
;
first
=
G1_NULL_HRS_INDEX
;
num_so_far
=
0
;
}
curr
+=
1
;
}
assert
(
num_so_far
<=
num
,
"post-condition"
);
if
(
num_so_far
==
num
)
{
// we found enough space for the humongous object
assert
(
from
<=
first
&&
first
<
_regions
.
length
()
,
"post-condition"
);
assert
(
first
<
curr
&&
(
curr
-
first
)
==
(
int
)
num
,
"post-condition"
);
for
(
int
i
=
first
;
i
<
first
+
(
int
)
num
;
++
i
)
{
assert
(
_regions
.
at
(
i
)
->
is_empty
(),
"post-condition"
);
assert
(
from
<=
first
&&
first
<
len
,
"post-condition"
);
assert
(
first
<
curr
&&
(
curr
-
first
)
==
num
,
"post-condition"
);
for
(
size_t
i
=
first
;
i
<
first
+
num
;
++
i
)
{
assert
(
at
(
i
)
->
is_empty
(),
"post-condition"
);
}
return
first
;
}
else
{
// we failed to find enough space for the humongous object
return
-
1
;
return
G1_NULL_HRS_INDEX
;
}
}
int
HeapRegionSeq
::
find_contiguous
(
size_t
num
)
{
assert
(
num
>
1
,
"otherwise we should not be calling this"
);
assert
(
0
<=
_alloc_search_start
&&
_alloc_search_start
<=
_regions
.
length
(),
err_msg
(
"_alloc_search_start: %d should be valid and <= than %d"
,
_alloc_search_start
,
_regions
.
length
()));
// Public
int
start
=
_alloc_search_start
;
int
res
=
find_contiguous_from
(
start
,
num
);
if
(
res
==
-
1
&&
start
!=
0
)
{
// Try starting from the beginning. If _alloc_search_start was 0,
// no point in doing this again.
res
=
find_contiguous_from
(
0
,
num
);
}
if
(
res
!=
-
1
)
{
assert
(
0
<=
res
&&
res
<
_regions
.
length
(),
err_msg
(
"res: %d should be valid"
,
res
));
_alloc_search_start
=
res
+
(
int
)
num
;
assert
(
0
<
_alloc_search_start
&&
_alloc_search_start
<=
_regions
.
length
(),
err_msg
(
"_alloc_search_start: %d should be valid"
,
_alloc_search_start
));
}
return
res
;
}
void
HeapRegionSeq
::
initialize
(
HeapWord
*
bottom
,
HeapWord
*
end
,
size_t
max_length
)
{
assert
((
size_t
)
bottom
%
HeapRegion
::
GrainBytes
==
0
,
"bottom should be heap region aligned"
);
assert
((
size_t
)
end
%
HeapRegion
::
GrainBytes
==
0
,
"end should be heap region aligned"
);
void
HeapRegionSeq
::
iterate
(
HeapRegionClosure
*
blk
)
{
iterate_from
((
HeapRegion
*
)
NULL
,
blk
);
}
_length
=
0
;
_heap_bottom
=
bottom
;
_heap_end
=
end
;
_region_shift
=
HeapRegion
::
LogOfHRGrainBytes
;
_next_search_index
=
0
;
_allocated_length
=
0
;
_max_length
=
max_length
;
_regions
=
NEW_C_HEAP_ARRAY
(
HeapRegion
*
,
max_length
);
memset
(
_regions
,
0
,
max_length
*
sizeof
(
HeapRegion
*
));
_regions_biased
=
_regions
-
((
size_t
)
bottom
>>
_region_shift
);
// The first argument r is the heap region at which iteration begins.
// This operation runs fastest when r is NULL, or the heap region for
// which a HeapRegionClosure most recently returned true, or the
// heap region immediately to its right in the sequence. In all
// other cases a linear search is required to find the index of r.
assert
(
&
_regions
[
0
]
==
&
_regions_biased
[
addr_to_index_biased
(
bottom
)],
"bottom should be included in the region with index 0"
);
}
void
HeapRegionSeq
::
iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
)
{
MemRegion
HeapRegionSeq
::
expand_by
(
HeapWord
*
old_end
,
HeapWord
*
new_end
,
FreeRegionList
*
list
)
{
assert
(
old_end
<
new_end
,
"don't call it otherwise"
);
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
// :::: FIXME ::::
// Static cache value is bad, especially when we start doing parallel
// remembered set update. For now just don't cache anything (the
// code in the def'd out blocks).
HeapWord
*
next_bottom
=
old_end
;
assert
(
_heap_bottom
<=
next_bottom
,
"invariant"
);
while
(
next_bottom
<
new_end
)
{
assert
(
next_bottom
<
_heap_end
,
"invariant"
);
size_t
index
=
length
();
#if 0
static int cached_j = 0;
#endif
int
len
=
_regions
.
length
();
int
j
=
0
;
// Find the index of r.
if
(
r
!=
NULL
)
{
#if 0
assert(cached_j >= 0, "Invariant.");
if ((cached_j < len) && (r == _regions.at(cached_j))) {
j = cached_j;
} else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
j = cached_j + 1;
assert
(
index
<
_max_length
,
"otherwise we cannot expand further"
);
if
(
index
==
0
)
{
// We have not allocated any regions so far
assert
(
next_bottom
==
_heap_bottom
,
"invariant"
);
}
else
{
j = find(r);
#endif
if
(
j
<
0
)
{
j
=
0
;
// next_bottom should match the end of the last/previous region
assert
(
next_bottom
==
at
(
index
-
1
)
->
end
(),
"invariant"
);
}
if
(
index
==
_allocated_length
)
{
// We have to allocate a new HeapRegion.
HeapRegion
*
new_hr
=
g1h
->
new_heap_region
(
index
,
next_bottom
);
if
(
new_hr
==
NULL
)
{
// allocation failed, we bail out and return what we have done so far
return
MemRegion
(
old_end
,
next_bottom
);
}
#if 0
assert
(
_regions
[
index
]
==
NULL
,
"invariant"
);
_regions
[
index
]
=
new_hr
;
increment_length
(
&
_allocated_length
);
}
#endif
// Have to increment the length first, otherwise we will get an
// assert failure at(index) below.
increment_length
(
&
_length
);
HeapRegion
*
hr
=
at
(
index
);
list
->
add_as_tail
(
hr
);
next_bottom
=
hr
->
end
();
}
int
i
;
for
(
i
=
j
;
i
<
len
;
i
+=
1
)
{
int
res
=
blk
->
doHeapRegion
(
_regions
.
at
(
i
));
if
(
res
)
{
#if 0
cached_j = i;
#endif
blk
->
incomplete
();
return
;
assert
(
next_bottom
==
new_end
,
"post-condition"
);
return
MemRegion
(
old_end
,
next_bottom
);
}
size_t
HeapRegionSeq
::
free_suffix
()
{
size_t
res
=
0
;
size_t
index
=
length
();
while
(
index
>
0
)
{
index
-=
1
;
if
(
!
at
(
index
)
->
is_empty
())
{
break
;
}
res
+=
1
;
}
for
(
i
=
0
;
i
<
j
;
i
+=
1
)
{
int
res
=
blk
->
doHeapRegion
(
_regions
.
at
(
i
));
if
(
res
)
{
#if 0
cached_j = i;
#endif
blk
->
incomplete
();
return
;
}
return
res
;
}
size_t
HeapRegionSeq
::
find_contiguous
(
size_t
num
)
{
assert
(
num
>
1
,
"use this only for sequences of length 2 or greater"
);
assert
(
_next_search_index
<=
length
(),
err_msg
(
"_next_search_indeex: "
SIZE_FORMAT
" "
"should be valid and <= than "
SIZE_FORMAT
,
_next_search_index
,
length
()));
size_t
start
=
_next_search_index
;
size_t
res
=
find_contiguous_from
(
start
,
num
);
if
(
res
==
G1_NULL_HRS_INDEX
&&
start
>
0
)
{
// Try starting from the beginning. If _next_search_index was 0,
// no point in doing this again.
res
=
find_contiguous_from
(
0
,
num
);
}
if
(
res
!=
G1_NULL_HRS_INDEX
)
{
assert
(
res
<
length
(),
err_msg
(
"res: "
SIZE_FORMAT
" should be valid"
,
res
));
_next_search_index
=
res
+
num
;
assert
(
_next_search_index
<=
length
(),
err_msg
(
"_next_search_indeex: "
SIZE_FORMAT
" "
"should be valid and <= than "
SIZE_FORMAT
,
_next_search_index
,
length
()));
}
return
res
;
}
void
HeapRegionSeq
::
iterate
(
HeapRegionClosure
*
blk
)
const
{
iterate_from
((
HeapRegion
*
)
NULL
,
blk
);
}
void
HeapRegionSeq
::
iterate_from
(
int
idx
,
HeapRegionClosure
*
blk
)
{
int
len
=
_regions
.
length
();
int
i
;
for
(
i
=
idx
;
i
<
len
;
i
++
)
{
if
(
blk
->
doHeapRegion
(
_regions
.
at
(
i
)))
{
void
HeapRegionSeq
::
iterate_from
(
HeapRegion
*
hr
,
HeapRegionClosure
*
blk
)
const
{
size_t
hr_index
=
0
;
if
(
hr
!=
NULL
)
{
hr_index
=
(
size_t
)
hr
->
hrs_index
();
}
size_t
len
=
length
();
for
(
size_t
i
=
hr_index
;
i
<
len
;
i
+=
1
)
{
bool
res
=
blk
->
doHeapRegion
(
at
(
i
));
if
(
res
)
{
blk
->
incomplete
();
return
;
}
}
for
(
i
=
0
;
i
<
idx
;
i
++
)
{
if
(
blk
->
doHeapRegion
(
_regions
.
at
(
i
)))
{
for
(
size_t
i
=
0
;
i
<
hr_index
;
i
+=
1
)
{
bool
res
=
blk
->
doHeapRegion
(
at
(
i
));
if
(
res
)
{
blk
->
incomplete
();
return
;
}
...
...
@@ -283,54 +206,92 @@ void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
}
MemRegion
HeapRegionSeq
::
shrink_by
(
size_t
shrink_bytes
,
size_t
&
num_regions_deleted
)
{
size_t
*
num_regions_deleted
)
{
// Reset this in case it's currently pointing into the regions that
// we just removed.
_
alloc_search_start
=
0
;
_
next_search_index
=
0
;
assert
(
shrink_bytes
%
os
::
vm_page_size
()
==
0
,
"unaligned"
);
assert
(
shrink_bytes
%
HeapRegion
::
GrainBytes
==
0
,
"unaligned"
);
assert
(
length
()
>
0
,
"the region sequence should not be empty"
);
assert
(
length
()
<=
_allocated_length
,
"invariant"
);
assert
(
_allocated_length
>
0
,
"we should have at least one region committed"
);
if
(
_regions
.
length
()
==
0
)
{
num_regions_deleted
=
0
;
return
MemRegion
();
}
int
j
=
_regions
.
length
()
-
1
;
HeapWord
*
end
=
_regions
.
at
(
j
)
->
end
();
// around the loop, i will be the next region to be removed
size_t
i
=
length
()
-
1
;
assert
(
i
>
0
,
"we should never remove all regions"
);
// [last_start, end) is the MemRegion that covers the regions we will remove.
HeapWord
*
end
=
at
(
i
)
->
end
();
HeapWord
*
last_start
=
end
;
while
(
j
>=
0
&&
shrink_bytes
>
0
)
{
HeapRegion
*
cur
=
_regions
.
at
(
j
);
// We have to leave humongous regions where they are,
// and work around them.
if
(
cur
->
isHumongous
())
{
return
MemRegion
(
last_start
,
end
);
}
assert
(
cur
==
_regions
.
top
(),
"Should be top"
);
*
num_regions_deleted
=
0
;
while
(
shrink_bytes
>
0
)
{
HeapRegion
*
cur
=
at
(
i
);
// We should leave the humongous regions where they are.
if
(
cur
->
isHumongous
())
break
;
// We should stop shrinking if we come across a non-empty region.
if
(
!
cur
->
is_empty
())
break
;
i
-=
1
;
*
num_regions_deleted
+=
1
;
shrink_bytes
-=
cur
->
capacity
();
num_regions_deleted
++
;
_regions
.
pop
();
last_start
=
cur
->
bottom
();
// We need to delete these somehow, but can't currently do so here: if
// we do, the ZF thread may still access the deleted region. We'll
// leave this here as a reminder that we have to do something about
// this.
// delete cur;
j
--
;
decrement_length
(
&
_length
);
// We will reclaim the HeapRegion. _allocated_length should be
// covering this index. So, even though we removed the region from
// the active set by decreasing _length, we still have it
// available in the future if we need to re-use it.
assert
(
i
>
0
,
"we should never remove all regions"
);
assert
(
length
()
>
0
,
"we should never remove all regions"
);
}
return
MemRegion
(
last_start
,
end
);
}
class
PrintHeapRegionClosure
:
public
HeapRegionClosure
{
public:
bool
doHeapRegion
(
HeapRegion
*
r
)
{
gclog_or_tty
->
print
(
PTR_FORMAT
":"
,
r
);
r
->
print
();
return
false
;
}
};
#ifndef PRODUCT
void
HeapRegionSeq
::
verify_optional
()
{
guarantee
(
_length
<=
_allocated_length
,
err_msg
(
"invariant: _length: "
SIZE_FORMAT
" "
"_allocated_length: "
SIZE_FORMAT
,
_length
,
_allocated_length
));
guarantee
(
_allocated_length
<=
_max_length
,
err_msg
(
"invariant: _allocated_length: "
SIZE_FORMAT
" "
"_max_length: "
SIZE_FORMAT
,
_allocated_length
,
_max_length
));
guarantee
(
_next_search_index
<=
_length
,
err_msg
(
"invariant: _next_search_index: "
SIZE_FORMAT
" "
"_length: "
SIZE_FORMAT
,
_next_search_index
,
_length
));
void
HeapRegionSeq
::
print
()
{
PrintHeapRegionClosure
cl
;
iterate
(
&
cl
);
HeapWord
*
prev_end
=
_heap_bottom
;
for
(
size_t
i
=
0
;
i
<
_allocated_length
;
i
+=
1
)
{
HeapRegion
*
hr
=
_regions
[
i
];
guarantee
(
hr
!=
NULL
,
err_msg
(
"invariant: i: "
SIZE_FORMAT
,
i
));
guarantee
(
hr
->
bottom
()
==
prev_end
,
err_msg
(
"invariant i: "
SIZE_FORMAT
" "
HR_FORMAT
" "
"prev_end: "
PTR_FORMAT
,
i
,
HR_FORMAT_PARAMS
(
hr
),
prev_end
));
guarantee
(
hr
->
hrs_index
()
==
i
,
err_msg
(
"invariant: i: "
SIZE_FORMAT
" hrs_index(): "
SIZE_FORMAT
,
i
,
hr
->
hrs_index
()));
if
(
i
<
_length
)
{
// Asserts will fire if i is >= _length
HeapWord
*
addr
=
hr
->
bottom
();
guarantee
(
addr_to_region
(
addr
)
==
hr
,
"sanity"
);
guarantee
(
addr_to_region_unsafe
(
addr
)
==
hr
,
"sanity"
);
}
else
{
guarantee
(
hr
->
is_empty
(),
"sanity"
);
guarantee
(
!
hr
->
isHumongous
(),
"sanity"
);
// using assert instead of guarantee here since containing_set()
// is only available in non-product builds.
assert
(
hr
->
containing_set
()
==
NULL
,
"sanity"
);
}
if
(
hr
->
startsHumongous
())
{
prev_end
=
hr
->
orig_end
();
}
else
{
prev_end
=
hr
->
end
();
}
}
for
(
size_t
i
=
_allocated_length
;
i
<
_max_length
;
i
+=
1
)
{
guarantee
(
_regions
[
i
]
==
NULL
,
err_msg
(
"invariant i: "
SIZE_FORMAT
,
i
));
}
}
#endif // PRODUCT
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
浏览文件 @
d2f0b980
...
...
@@ -25,92 +25,143 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
class
HeapRegion
;
class
HeapRegionClosure
;
class
FreeRegionList
;
#define G1_NULL_HRS_INDEX ((size_t) -1)
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
// the one after it, etc.). Two regions that are consecutive in the
// array should also be adjacent in the address space (i.e.,
// region(i).end() == region(i+1).bottom().
//
// We create a HeapRegion when we commit the region's address space
// for the first time. When we uncommit the address space of a
// region we retain the HeapRegion to be able to re-use it in the
// future (in case we recommit it).
//
// We keep track of three lengths:
//
// * _length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
// * _max_length (returned by max_length()) is the maximum number of
// regions the heap can have.
//
// and maintain that: _length <= _allocated_length <= _max_length
class
HeapRegionSeq
:
public
CHeapObj
{
// _regions is kept sorted by start address order, and no two regions are
// overlapping.
GrowableArray
<
HeapRegion
*>
_regions
;
// The array that holds the HeapRegions.
HeapRegion
**
_regions
;
// The index in "_regions" at which to start the next allocation search.
// (For efficiency only; private to obj_allocate after initialization.)
int
_alloc_search_start
;
// Version of _regions biased to address 0
HeapRegion
**
_regions_biased
;
// Finds a contiguous set of empty regions of length num, starting
// from a given index.
int
find_contiguous_from
(
int
from
,
size_t
num
);
// The number of regions committed in the heap.
size_t
_length
;
// Currently, we're choosing collection sets in a round-robin fashion,
// starting here.
int
_next_rr_candidate
;
// The address of the first reserved word in the heap.
HeapWord
*
_heap_bottom
;
// The bottom address of the bottom-most region, or else NULL if there
// are no regions in the sequence.
char
*
_seq_bottom
;
// The address of the last reserved word in the heap - 1.
HeapWord
*
_heap_end
;
public:
// Initializes "this" to the empty sequence of regions.
HeapRegionSeq
(
const
size_t
max_size
);
// The log of the region byte size.
size_t
_region_shift
;
// Adds "hr" to "this" sequence. Requires "hr" not to overlap with
// any region already in "this". (Will perform better if regions are
// inserted in ascending address order.)
void
insert
(
HeapRegion
*
hr
);
// A hint for which index to start searching from for humongous
// allocations.
size_t
_next_search_index
;
// Given a HeapRegion*, returns its index within _regions,
// or returns -1 if not found.
int
find
(
HeapRegion
*
hr
);
// The number of regions for which we have allocated HeapRegions for.
size_t
_allocated_length
;
//
Requires the index to be valid, and return the region at the index
.
HeapRegion
*
at
(
size_t
i
)
{
return
_regions
.
at
((
int
)
i
);
}
//
The maximum number of regions in the heap
.
size_t
_max_length
;
// Return the number of regions in the sequence.
size_t
length
();
// Find a contiguous set of empty regions of length num, starting
// from the given index.
size_t
find_contiguous_from
(
size_t
from
,
size_t
num
);
//
Returns the number of contiguous regions at the end of the sequenc
e
//
that are available for allocation
.
size_t
free_suffix
()
;
//
Map a heap address to a biased region index. Assume that th
e
//
address is valid
.
inline
size_t
addr_to_index_biased
(
HeapWord
*
addr
)
const
;
// Find a contiguous set of empty regions of length num and return
// the index of the first region or -1 if the search was unsuccessful.
int
find_contiguous
(
size_t
num
);
void
increment_length
(
size_t
*
length
)
{
assert
(
*
length
<
_max_length
,
"pre-condition"
);
*
length
+=
1
;
}
void
decrement_length
(
size_t
*
length
)
{
assert
(
*
length
>
0
,
"pre-condition"
);
*
length
-=
1
;
}
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
// in address order, terminating the iteration early
// if the "doHeapRegion" method returns "true".
void
iterate
(
HeapRegionClosure
*
blk
);
public:
// Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq
()
{
}
void
initialize
(
HeapWord
*
bottom
,
HeapWord
*
end
,
size_t
max_length
);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
inline
HeapRegion
*
at
(
size_t
index
)
const
;
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
// starting at "r" (or first region, if "r" is NULL), in a circular
// manner, terminating the iteration early if the "doHeapRegion" method
// returns "true".
void
iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
);
// If addr is within the committed space return its corresponding
// HeapRegion, otherwise return NULL.
inline
HeapRegion
*
addr_to_region
(
HeapWord
*
addr
)
const
;
//
As above, but start from a given index in the sequence
//
instead of a given heap region
.
void
iterate_from
(
int
idx
,
HeapRegionClosure
*
blk
)
;
//
Return the HeapRegion that corresponds to the given
//
address. Assume the address is valid
.
inline
HeapRegion
*
addr_to_region_unsafe
(
HeapWord
*
addr
)
const
;
// Requires "shrink_bytes" to be a multiple of the page size and heap
// region granularity. Deletes as many "rightmost" completely free heap
// regions from the sequence as comprise shrink_bytes bytes. Returns the
// MemRegion indicating the region those regions comprised, and sets
// "num_regions_deleted" to the number of regions deleted.
MemRegion
shrink_by
(
size_t
shrink_bytes
,
size_t
&
num_regions_deleted
);
// Return the number of regions that have been committed in the heap.
size_t
length
()
const
{
return
_length
;
}
// If "addr" falls within a region in the sequence, return that region,
// or else NULL.
inline
HeapRegion
*
addr_to_region
(
const
void
*
addr
);
// Return the maximum number of regions in the heap.
size_t
max_length
()
const
{
return
_max_length
;
}
void
print
();
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
// existing ones, and return them in the given list. Returns the
// memory region that covers the newly-created regions. If a
// HeapRegion allocation fails, the result memory region might be
// smaller than the desired one.
MemRegion
expand_by
(
HeapWord
*
old_end
,
HeapWord
*
new_end
,
FreeRegionList
*
list
);
// Prints out runs of empty regions.
void
print_empty_runs
();
// Return the number of contiguous regions at the end of the sequence
// that are available for allocation.
size_t
free_suffix
();
// Find a contiguous set of empty regions of length num and return
// the index of the first region or G1_NULL_HRS_INDEX if the
// search was unsuccessful.
size_t
find_contiguous
(
size_t
num
);
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
void
iterate
(
HeapRegionClosure
*
blk
)
const
;
// As above, but start the iteration from hr and loop around. If hr
// is NULL, we start from the first region in the heap.
void
iterate_from
(
HeapRegion
*
hr
,
HeapRegionClosure
*
blk
)
const
;
// Tag as uncommitted as many regions that are completely free as
// possible, up to shrink_bytes, from the suffix of the committed
// sequence. Return a MemRegion that corresponds to the address
// range of the uncommitted regions. Assume shrink_bytes is page and
// heap region aligned.
MemRegion
shrink_by
(
size_t
shrink_bytes
,
size_t
*
num_regions_deleted
);
// Do some sanity checking.
void
verify_optional
()
PRODUCT_RETURN
;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
浏览文件 @
d2f0b980
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -25,23 +25,42 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
const
void
*
addr
)
{
assert
(
_seq_bottom
!=
NULL
,
"bad _seq_bottom in addr_to_region"
);
if
((
char
*
)
addr
>=
_seq_bottom
)
{
size_t
diff
=
(
size_t
)
pointer_delta
((
HeapWord
*
)
addr
,
(
HeapWord
*
)
_seq_bottom
);
int
index
=
(
int
)
(
diff
>>
HeapRegion
::
LogOfHRGrainWords
);
assert
(
index
>=
0
,
"invariant / paranoia"
);
if
(
index
<
_regions
.
length
())
{
HeapRegion
*
hr
=
_regions
.
at
(
index
);
assert
(
hr
->
is_in_reserved
(
addr
),
"addr_to_region is wrong..."
);
return
hr
;
}
inline
size_t
HeapRegionSeq
::
addr_to_index_biased
(
HeapWord
*
addr
)
const
{
assert
(
_heap_bottom
<=
addr
&&
addr
<
_heap_end
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
" end: "
PTR_FORMAT
,
addr
,
_heap_bottom
,
_heap_end
));
size_t
index
=
(
size_t
)
addr
>>
_region_shift
;
return
index
;
}
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region_unsafe
(
HeapWord
*
addr
)
const
{
assert
(
_heap_bottom
<=
addr
&&
addr
<
_heap_end
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
" end: "
PTR_FORMAT
,
addr
,
_heap_bottom
,
_heap_end
));
size_t
index_biased
=
addr_to_index_biased
(
addr
);
HeapRegion
*
hr
=
_regions_biased
[
index_biased
];
assert
(
hr
!=
NULL
,
"invariant"
);
return
hr
;
}
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
HeapWord
*
addr
)
const
{
if
(
addr
!=
NULL
&&
addr
<
_heap_end
)
{
assert
(
addr
>=
_heap_bottom
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
,
addr
,
_heap_bottom
));
return
addr_to_region_unsafe
(
addr
);
}
return
NULL
;
}
inline
HeapRegion
*
HeapRegionSeq
::
at
(
size_t
index
)
const
{
assert
(
index
<
length
(),
"pre-condition"
);
HeapRegion
*
hr
=
_regions
[
index
];
assert
(
hr
!=
NULL
,
"sanity"
);
assert
(
hr
->
hrs_index
()
==
index
,
"sanity"
);
return
hr
;
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
src/share/vm/gc_implementation/g1/sparsePRT.cpp
浏览文件 @
d2f0b980
...
...
@@ -481,8 +481,9 @@ size_t SparsePRT::mem_size() const {
bool
SparsePRT
::
add_card
(
RegionIdx_t
region_id
,
CardIdx_t
card_index
)
{
#if SPARSE_PRT_VERBOSE
gclog_or_tty
->
print_cr
(
" Adding card %d from region %d to region %d sparse."
,
card_index
,
region_id
,
_hr
->
hrs_index
());
gclog_or_tty
->
print_cr
(
" Adding card %d from region %d to region "
SIZE_FORMAT
" sparse."
,
card_index
,
region_id
,
_hr
->
hrs_index
());
#endif
if
(
_next
->
occupied_entries
()
*
2
>
_next
->
capacity
())
{
expand
();
...
...
@@ -533,8 +534,8 @@ void SparsePRT::expand() {
_next
=
new
RSHashTable
(
last
->
capacity
()
*
2
);
#if SPARSE_PRT_VERBOSE
gclog_or_tty
->
print_cr
(
" Expanded sparse table for
%d
to %d."
,
_hr
->
hrs_index
(),
_next
->
capacity
());
gclog_or_tty
->
print_cr
(
" Expanded sparse table for
"
SIZE_FORMAT
"
to %d."
,
_hr
->
hrs_index
(),
_next
->
capacity
());
#endif
for
(
size_t
i
=
0
;
i
<
last
->
capacity
();
i
++
)
{
SparsePRTEntry
*
e
=
last
->
entry
((
int
)
i
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录