Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
d2f0b980
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
d2f0b980
编写于
6月 10, 2011
作者:
M
minqi
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
e5224dcd
f122f55f
变更
11
展开全部
隐藏空白更改
内联
并排
Showing
11 changed file
with
561 addition
and
557 deletion
+561
-557
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+129
-145
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+31
-31
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+8
-10
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+6
-15
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+12
-17
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+13
-11
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+7
-5
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+204
-243
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
+113
-62
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
+33
-14
src/share/vm/gc_implementation/g1/sparsePRT.cpp
src/share/vm/gc_implementation/g1/sparsePRT.cpp
+5
-4
未找到文件。
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
d2f0b980
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
d2f0b980
...
...
@@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/parNew/parGCAllocBuffer.hpp"
...
...
@@ -42,7 +43,6 @@
// heap subsets that will yield large amounts of garbage.
class
HeapRegion
;
class
HeapRegionSeq
;
class
HRRSCleanupTask
;
class
PermanentGenerationSpec
;
class
GenerationSpec
;
...
...
@@ -196,9 +196,6 @@ private:
// The part of _g1_storage that is currently committed.
MemRegion
_g1_committed
;
// The maximum part of _g1_storage that has ever been committed.
MemRegion
_g1_max_committed
;
// The master free list. It will satisfy all new region allocations.
MasterFreeRegionList
_free_list
;
...
...
@@ -222,7 +219,7 @@ private:
void
rebuild_region_lists
();
// The sequence of all heap regions in the heap.
HeapRegionSeq
*
_hrs
;
HeapRegionSeq
_hrs
;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion
_mutator_alloc_region
;
...
...
@@ -421,13 +418,15 @@ protected:
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful.
int
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
);
// index of the first region or G1_NULL_HRS_INDEX if the search
// was unsuccessful.
size_t
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
);
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord
*
humongous_obj_allocate_initialize_regions
(
in
t
first
,
HeapWord
*
humongous_obj_allocate_initialize_regions
(
size_
t
first
,
size_t
num_regions
,
size_t
word_size
);
...
...
@@ -587,8 +586,8 @@ public:
void
register_region_with_in_cset_fast_test
(
HeapRegion
*
r
)
{
assert
(
_in_cset_fast_test_base
!=
NULL
,
"sanity"
);
assert
(
r
->
in_collection_set
(),
"invariant"
);
in
t
index
=
r
->
hrs_index
();
assert
(
0
<=
index
&&
(
size_t
)
index
<
_in_cset_fast_test_length
,
"invariant"
);
size_
t
index
=
r
->
hrs_index
();
assert
(
index
<
_in_cset_fast_test_length
,
"invariant"
);
assert
(
!
_in_cset_fast_test_base
[
index
],
"invariant"
);
_in_cset_fast_test_base
[
index
]
=
true
;
}
...
...
@@ -754,6 +753,11 @@ protected:
HumongousRegionSet
*
humongous_proxy_set
,
bool
par
);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
// after _g1_storage is updated.
void
update_committed_space
(
HeapWord
*
old_end
,
HeapWord
*
new_end
);
// The concurrent marker (and the thread it runs in.)
ConcurrentMark
*
_cm
;
ConcurrentMarkThread
*
_cmThread
;
...
...
@@ -816,7 +820,6 @@ protected:
oop
handle_evacuation_failure_par
(
OopsInHeapRegionClosure
*
cl
,
oop
obj
);
void
handle_evacuation_failure_common
(
oop
obj
,
markOop
m
);
// Ensure that the relevant gc_alloc regions are set.
void
get_gc_alloc_regions
();
// We're done with GC alloc regions. We are going to tear down the
...
...
@@ -967,15 +970,13 @@ public:
}
// The total number of regions in the heap.
size_t
n_regions
()
;
size_t
n_regions
()
{
return
_hrs
.
length
();
}
// The
number of regions that are completely free
.
size_t
max_regions
()
;
// The
max number of regions in the heap
.
size_t
max_regions
()
{
return
_hrs
.
max_length
();
}
// The number of regions that are completely free.
size_t
free_regions
()
{
return
_free_list
.
length
();
}
size_t
free_regions
()
{
return
_free_list
.
length
();
}
// The number of regions that are not completely free.
size_t
used_regions
()
{
return
n_regions
()
-
free_regions
();
}
...
...
@@ -983,6 +984,10 @@ public:
// The number of regions available for "regular" expansion.
size_t
expansion_regions
()
{
return
_expansion_regions
;
}
// Factory method for HeapRegion instances. It will return NULL if
// the allocation fails.
HeapRegion
*
new_heap_region
(
size_t
hrs_index
,
HeapWord
*
bottom
);
void
verify_not_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
void
verify_dirty_region
(
HeapRegion
*
hr
)
PRODUCT_RETURN
;
void
verify_dirty_young_list
(
HeapRegion
*
head
)
PRODUCT_RETURN
;
...
...
@@ -1144,17 +1149,15 @@ public:
// Iterate over heap regions, in address order, terminating the
// iteration early if the "doHeapRegion" method returns "true".
void
heap_region_iterate
(
HeapRegionClosure
*
blk
);
void
heap_region_iterate
(
HeapRegionClosure
*
blk
)
const
;
// Iterate over heap regions starting with r (or the first region if "r"
// is NULL), in address order, terminating early if the "doHeapRegion"
// method returns "true".
void
heap_region_iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
);
void
heap_region_iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
)
const
;
// As above but starting from the region at index idx.
void
heap_region_iterate_from
(
int
idx
,
HeapRegionClosure
*
blk
);
HeapRegion
*
region_at
(
size_t
idx
);
// Return the region with the given index. It assumes the index is valid.
HeapRegion
*
region_at
(
size_t
index
)
const
{
return
_hrs
.
at
(
index
);
}
// Divide the heap region sequence into "chunks" of some size (the number
// of regions divided by the number of parallel threads times some
...
...
@@ -1195,12 +1198,14 @@ public:
// A G1CollectedHeap will contain some number of heap regions. This
// finds the region containing a given address, or else returns NULL.
HeapRegion
*
heap_region_containing
(
const
void
*
addr
)
const
;
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing
(
const
T
addr
)
const
;
// Like the above, but requires "addr" to be in the heap (to avoid a
// null-check), and unlike the above, may return an continuing humongous
// region.
HeapRegion
*
heap_region_containing_raw
(
const
void
*
addr
)
const
;
template
<
class
T
>
inline
HeapRegion
*
heap_region_containing_raw
(
const
T
addr
)
const
;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
// each address in the (reserved) heap is a member of exactly
...
...
@@ -1262,7 +1267,7 @@ public:
return
true
;
}
bool
is_in_young
(
oop
obj
)
{
bool
is_in_young
(
const
oop
obj
)
{
HeapRegion
*
hr
=
heap_region_containing
(
obj
);
return
hr
!=
NULL
&&
hr
->
is_young
();
}
...
...
@@ -1368,11 +1373,6 @@ public:
// Override
void
print_tracing_info
()
const
;
// If "addr" is a pointer into the (reserved?) heap, returns a positive
// number indicating the "arena" within the heap in which "addr" falls.
// Or else returns 0.
virtual
int
addr_to_arena_id
(
void
*
addr
)
const
;
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static
G1CollectedHeap
*
heap
();
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
浏览文件 @
d2f0b980
...
...
@@ -34,9 +34,10 @@
// Inline functions for G1CollectedHeap
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing
(
const
void
*
addr
)
const
{
HeapRegion
*
hr
=
_hrs
->
addr_to_region
(
addr
);
G1CollectedHeap
::
heap_region_containing
(
const
T
addr
)
const
{
HeapRegion
*
hr
=
_hrs
.
addr_to_region
((
HeapWord
*
)
addr
);
// hr can be null if addr in perm_gen
if
(
hr
!=
NULL
&&
hr
->
continuesHumongous
())
{
hr
=
hr
->
humongous_start_region
();
...
...
@@ -44,19 +45,16 @@ G1CollectedHeap::heap_region_containing(const void* addr) const {
return
hr
;
}
template
<
class
T
>
inline
HeapRegion
*
G1CollectedHeap
::
heap_region_containing_raw
(
const
void
*
addr
)
const
{
assert
(
_g1_reserved
.
contains
(
addr
),
"invariant"
);
size_t
index
=
pointer_delta
(
addr
,
_g1_reserved
.
start
(),
1
)
>>
HeapRegion
::
LogOfHRGrainBytes
;
HeapRegion
*
res
=
_hrs
->
at
(
index
);
assert
(
res
==
_hrs
->
addr_to_region
(
addr
),
"sanity"
);
G1CollectedHeap
::
heap_region_containing_raw
(
const
T
addr
)
const
{
assert
(
_g1_reserved
.
contains
((
const
void
*
)
addr
),
"invariant"
);
HeapRegion
*
res
=
_hrs
.
addr_to_region_unsafe
((
HeapWord
*
)
addr
);
return
res
;
}
inline
bool
G1CollectedHeap
::
obj_in_cs
(
oop
obj
)
{
HeapRegion
*
r
=
_hrs
->
addr_to_region
(
obj
);
HeapRegion
*
r
=
_hrs
.
addr_to_region
((
HeapWord
*
)
obj
);
return
r
!=
NULL
&&
r
->
in_collection_set
();
}
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
d2f0b980
...
...
@@ -2639,11 +2639,8 @@ add_to_collection_set(HeapRegion* hr) {
assert
(
!
hr
->
is_young
(),
"non-incremental add of young region"
);
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
"added region to cset "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
hr
->
is_young
()
?
"YOUNG"
:
"NOT_YOUNG"
);
gclog_or_tty
->
print_cr
(
"added region to cset "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
hr
));
}
if
(
_g1
->
mark_in_progress
())
...
...
@@ -2813,11 +2810,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
_inc_cset_tail
=
hr
;
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
" added region to incremental cset (RHS) "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", young %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
(
hr
->
is_young
())
?
"YES"
:
"NO"
);
gclog_or_tty
->
print_cr
(
" added region to incremental cset (RHS) "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
hr
));
}
}
...
...
@@ -2838,11 +2832,8 @@ void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
_inc_cset_head
=
hr
;
if
(
G1PrintHeapRegions
)
{
gclog_or_tty
->
print_cr
(
" added region to incremental cset (LHS) "
"%d:["
PTR_FORMAT
", "
PTR_FORMAT
"], "
"top "
PTR_FORMAT
", young %s"
,
hr
->
hrs_index
(),
hr
->
bottom
(),
hr
->
end
(),
hr
->
top
(),
(
hr
->
is_young
())
?
"YES"
:
"NO"
);
gclog_or_tty
->
print_cr
(
" added region to incremental cset (LHS) "
HR_FORMAT
,
HR_FORMAT_PARAMS
(
hr
));
}
}
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
d2f0b980
...
...
@@ -159,20 +159,16 @@ public:
gclog_or_tty
->
print_cr
(
"----------"
);
}
gclog_or_tty
->
print_cr
(
"Missing rem set entry:"
);
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" of obj "
PTR_FORMAT
", in region %d ["
PTR_FORMAT
", "
PTR_FORMAT
"),"
,
p
,
(
void
*
)
_containing_obj
,
from
->
hrs_index
(),
from
->
bottom
(),
from
->
end
());
gclog_or_tty
->
print_cr
(
"Field "
PTR_FORMAT
" "
"of obj "
PTR_FORMAT
", "
"in region "
HR_FORMAT
,
p
,
(
void
*
)
_containing_obj
,
HR_FORMAT_PARAMS
(
from
));
_containing_obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" in region %d ["
PTR_FORMAT
", "
PTR_FORMAT
")."
,
(
void
*
)
obj
,
to
->
hrs_index
(),
to
->
bottom
(),
to
->
end
());
gclog_or_tty
->
print_cr
(
"points to obj "
PTR_FORMAT
" "
"in region "
HR_FORMAT
,
(
void
*
)
obj
,
HR_FORMAT_PARAMS
(
to
));
obj
->
print_on
(
gclog_or_tty
);
gclog_or_tty
->
print_cr
(
"Obj head CTE = %d, field CTE = %d."
,
cv_obj
,
cv_field
);
...
...
@@ -484,11 +480,10 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
HeapRegion
::
HeapRegion
(
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
)
HeapRegion
(
size_t
hrs_index
,
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
)
:
G1OffsetTableContigSpace
(
sharedOffsetArray
,
mr
,
is_zeroed
),
_next_fk
(
HeapRegionDCTOC
::
NoFilterKind
),
_hrs_index
(
-
1
),
_next_fk
(
HeapRegionDCTOC
::
NoFilterKind
),
_hrs_index
(
hrs_index
),
_humongous_type
(
NotHumongous
),
_humongous_start_region
(
NULL
),
_in_collection_set
(
false
),
_is_gc_alloc_region
(
false
),
_next_in_special_set
(
NULL
),
_orig_end
(
NULL
),
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
d2f0b980
...
...
@@ -52,9 +52,11 @@ class HeapRegionRemSetIterator;
class
HeapRegion
;
class
HeapRegionSetBase
;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) (_hr_)->hrs_index(), (_hr_)->bottom(), \
(_hr_)->top(), (_hr_)->end()
#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_hr_) \
(_hr_)->hrs_index(), \
(_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
(_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
...
...
@@ -237,9 +239,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetArrayContigSpace
*
offsets
()
{
return
&
_offsets
;
}
protected:
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int
_hrs_index
;
// The index of this region in the heap region sequence.
size_t
_hrs_index
;
HumongousType
_humongous_type
;
// For a humongous region, region in which it starts.
...
...
@@ -296,8 +297,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
enum
YoungType
{
NotYoung
,
// a region is not young
Young
,
// a region is young
Survivor
// a region is young and it contains
// survivor
Survivor
// a region is young and it contains survivors
};
volatile
YoungType
_young_type
;
...
...
@@ -351,7 +351,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
public:
// If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
HeapRegion
(
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
HeapRegion
(
size_t
hrs_index
,
G1BlockOffsetSharedArray
*
sharedOffsetArray
,
MemRegion
mr
,
bool
is_zeroed
);
static
int
LogOfHRGrainBytes
;
...
...
@@ -393,8 +394,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// If this region is a member of a HeapRegionSeq, the index in that
// sequence, otherwise -1.
int
hrs_index
()
const
{
return
_hrs_index
;
}
void
set_hrs_index
(
int
index
)
{
_hrs_index
=
index
;
}
size_t
hrs_index
()
const
{
return
_hrs_index
;
}
// The number of bytes marked live in the region in the last marking phase.
size_t
marked_bytes
()
{
return
_prev_marked_bytes
;
}
...
...
@@ -579,6 +579,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
void
set_next_dirty_cards_region
(
HeapRegion
*
hr
)
{
_next_dirty_cards_region
=
hr
;
}
bool
is_on_dirty_cards_region_list
()
const
{
return
get_next_dirty_cards_region
()
!=
NULL
;
}
HeapWord
*
orig_end
()
{
return
_orig_end
;
}
// Allows logical separation between objects allocated before and after.
void
save_marks
();
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
浏览文件 @
d2f0b980
...
...
@@ -834,7 +834,7 @@ PosParPRT* OtherRegionsTable::delete_region_table() {
#endif
// Set the corresponding coarse bit.
in
t
max_hrs_index
=
max
->
hr
()
->
hrs_index
();
size_
t
max_hrs_index
=
max
->
hr
()
->
hrs_index
();
if
(
!
_coarse_map
.
at
(
max_hrs_index
))
{
_coarse_map
.
at_put
(
max_hrs_index
,
true
);
_n_coarse_entries
++
;
...
...
@@ -860,7 +860,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap
*
region_bm
,
BitMap
*
card_bm
)
{
// First eliminated garbage regions from the coarse map.
if
(
G1RSScrubVerbose
)
gclog_or_tty
->
print_cr
(
"Scrubbing region %d:"
,
hr
()
->
hrs_index
());
gclog_or_tty
->
print_cr
(
"Scrubbing region "
SIZE_FORMAT
":"
,
hr
()
->
hrs_index
());
assert
(
_coarse_map
.
size
()
==
region_bm
->
size
(),
"Precondition"
);
if
(
G1RSScrubVerbose
)
...
...
@@ -878,7 +879,8 @@ void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
PosParPRT
*
nxt
=
cur
->
next
();
// If the entire region is dead, eliminate.
if
(
G1RSScrubVerbose
)
gclog_or_tty
->
print_cr
(
" For other region %d:"
,
cur
->
hr
()
->
hrs_index
());
gclog_or_tty
->
print_cr
(
" For other region "
SIZE_FORMAT
":"
,
cur
->
hr
()
->
hrs_index
());
if
(
!
region_bm
->
at
(
cur
->
hr
()
->
hrs_index
()))
{
*
prev
=
nxt
;
cur
->
set_next
(
NULL
);
...
...
@@ -994,7 +996,7 @@ void OtherRegionsTable::clear() {
void
OtherRegionsTable
::
clear_incoming_entry
(
HeapRegion
*
from_hr
)
{
MutexLockerEx
x
(
&
_m
,
Mutex
::
_no_safepoint_check_flag
);
size_t
hrs_ind
=
(
size_t
)
from_hr
->
hrs_index
();
size_t
hrs_ind
=
from_hr
->
hrs_index
();
size_t
ind
=
hrs_ind
&
_mod_max_fine_entries_mask
;
if
(
del_single_region_table
(
ind
,
from_hr
))
{
assert
(
!
_coarse_map
.
at
(
hrs_ind
),
"Inv"
);
...
...
@@ -1002,7 +1004,7 @@ void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
_coarse_map
.
par_at_put
(
hrs_ind
,
0
);
}
// Check to see if any of the fcc entries come from here.
in
t
hr_ind
=
hr
()
->
hrs_index
();
size_
t
hr_ind
=
hr
()
->
hrs_index
();
for
(
int
tid
=
0
;
tid
<
HeapRegionRemSet
::
num_par_rem_sets
();
tid
++
)
{
int
fcc_ent
=
_from_card_cache
[
tid
][
hr_ind
];
if
(
fcc_ent
!=
-
1
)
{
...
...
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
浏览文件 @
d2f0b980
...
...
@@ -23,259 +23,182 @@
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSets.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "memory/allocation.hpp"
// Local to this file.
static
int
orderRegions
(
HeapRegion
**
hr1p
,
HeapRegion
**
hr2p
)
{
if
((
*
hr1p
)
->
end
()
<=
(
*
hr2p
)
->
bottom
())
return
-
1
;
else
if
((
*
hr2p
)
->
end
()
<=
(
*
hr1p
)
->
bottom
())
return
1
;
else
if
(
*
hr1p
==
*
hr2p
)
return
0
;
else
{
assert
(
false
,
"We should never compare distinct overlapping regions."
);
}
return
0
;
}
HeapRegionSeq
::
HeapRegionSeq
(
const
size_t
max_size
)
:
_alloc_search_start
(
0
),
// The line below is the worst bit of C++ hackery I've ever written
// (Detlefs, 11/23). You should think of it as equivalent to
// "_regions(100, true)": initialize the growable array and inform it
// that it should allocate its elem array(s) on the C heap.
//
// The first argument, however, is actually a comma expression
// (set_allocation_type(this, C_HEAP), 100). The purpose of the
// set_allocation_type() call is to replace the default allocation
// type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
// allow to pass the assert in GenericGrowableArray() which checks
// that a growable array object must be on C heap if elements are.
//
// Note: containing object is allocated on C heap since it is CHeapObj.
//
_regions
((
ResourceObj
::
set_allocation_type
((
address
)
&
_regions
,
ResourceObj
::
C_HEAP
),
(
int
)
max_size
),
true
),
_next_rr_candidate
(
0
),
_seq_bottom
(
NULL
)
{}
// Private methods.
void
HeapRegionSeq
::
print_empty_runs
()
{
int
empty_run
=
0
;
int
n_empty
=
0
;
int
empty_run_start
;
for
(
int
i
=
0
;
i
<
_regions
.
length
();
i
++
)
{
HeapRegion
*
r
=
_regions
.
at
(
i
);
if
(
r
->
continuesHumongous
())
continue
;
if
(
r
->
is_empty
())
{
assert
(
!
r
->
isHumongous
(),
"H regions should not be empty."
);
if
(
empty_run
==
0
)
empty_run_start
=
i
;
empty_run
++
;
n_empty
++
;
}
else
{
if
(
empty_run
>
0
)
{
gclog_or_tty
->
print
(
" %d:%d"
,
empty_run_start
,
empty_run
);
empty_run
=
0
;
}
}
}
if
(
empty_run
>
0
)
{
gclog_or_tty
->
print
(
" %d:%d"
,
empty_run_start
,
empty_run
);
}
gclog_or_tty
->
print_cr
(
" [tot = %d]"
,
n_empty
);
}
int
HeapRegionSeq
::
find
(
HeapRegion
*
hr
)
{
// FIXME: optimized for adjacent regions of fixed size.
int
ind
=
hr
->
hrs_index
();
if
(
ind
!=
-
1
)
{
assert
(
_regions
.
at
(
ind
)
==
hr
,
"Mismatch"
);
}
return
ind
;
}
// Private
size_t
HeapRegionSeq
::
find_contiguous_from
(
size_t
from
,
size_t
num
)
{
size_t
len
=
length
();
assert
(
num
>
1
,
"use this only for sequences of length 2 or greater"
);
assert
(
from
<=
len
,
err_msg
(
"from: "
SIZE_FORMAT
" should be valid and <= than "
SIZE_FORMAT
,
from
,
len
));
// Public methods.
void
HeapRegionSeq
::
insert
(
HeapRegion
*
hr
)
{
assert
(
!
_regions
.
is_full
(),
"Too many elements in HeapRegionSeq"
);
if
(
_regions
.
length
()
==
0
||
_regions
.
top
()
->
end
()
<=
hr
->
bottom
())
{
hr
->
set_hrs_index
(
_regions
.
length
());
_regions
.
append
(
hr
);
}
else
{
_regions
.
append
(
hr
);
_regions
.
sort
(
orderRegions
);
for
(
int
i
=
0
;
i
<
_regions
.
length
();
i
++
)
{
_regions
.
at
(
i
)
->
set_hrs_index
(
i
);
}
}
char
*
bot
=
(
char
*
)
_regions
.
at
(
0
)
->
bottom
();
if
(
_seq_bottom
==
NULL
||
bot
<
_seq_bottom
)
_seq_bottom
=
bot
;
}
size_t
HeapRegionSeq
::
length
()
{
return
_regions
.
length
();
}
size_t
HeapRegionSeq
::
free_suffix
()
{
size_t
res
=
0
;
int
first
=
_regions
.
length
()
-
1
;
int
cur
=
first
;
while
(
cur
>=
0
&&
(
_regions
.
at
(
cur
)
->
is_empty
()
&&
(
first
==
cur
||
(
_regions
.
at
(
cur
+
1
)
->
bottom
()
==
_regions
.
at
(
cur
)
->
end
()))))
{
res
++
;
cur
--
;
}
return
res
;
}
int
HeapRegionSeq
::
find_contiguous_from
(
int
from
,
size_t
num
)
{
assert
(
num
>
1
,
"pre-condition"
);
assert
(
0
<=
from
&&
from
<=
_regions
.
length
(),
err_msg
(
"from: %d should be valid and <= than %d"
,
from
,
_regions
.
length
()));
int
curr
=
from
;
int
first
=
-
1
;
size_t
curr
=
from
;
size_t
first
=
G1_NULL_HRS_INDEX
;
size_t
num_so_far
=
0
;
while
(
curr
<
_regions
.
length
()
&&
num_so_far
<
num
)
{
HeapRegion
*
curr_hr
=
_regions
.
at
(
curr
);
if
(
curr_hr
->
is_empty
())
{
if
(
first
==
-
1
)
{
while
(
curr
<
len
&&
num_so_far
<
num
)
{
if
(
at
(
curr
)
->
is_empty
())
{
if
(
first
==
G1_NULL_HRS_INDEX
)
{
first
=
curr
;
num_so_far
=
1
;
}
else
{
num_so_far
+=
1
;
}
}
else
{
first
=
-
1
;
first
=
G1_NULL_HRS_INDEX
;
num_so_far
=
0
;
}
curr
+=
1
;
}
assert
(
num_so_far
<=
num
,
"post-condition"
);
if
(
num_so_far
==
num
)
{
// we found enough space for the humongous object
assert
(
from
<=
first
&&
first
<
_regions
.
length
()
,
"post-condition"
);
assert
(
first
<
curr
&&
(
curr
-
first
)
==
(
int
)
num
,
"post-condition"
);
for
(
int
i
=
first
;
i
<
first
+
(
int
)
num
;
++
i
)
{
assert
(
_regions
.
at
(
i
)
->
is_empty
(),
"post-condition"
);
assert
(
from
<=
first
&&
first
<
len
,
"post-condition"
);
assert
(
first
<
curr
&&
(
curr
-
first
)
==
num
,
"post-condition"
);
for
(
size_t
i
=
first
;
i
<
first
+
num
;
++
i
)
{
assert
(
at
(
i
)
->
is_empty
(),
"post-condition"
);
}
return
first
;
}
else
{
// we failed to find enough space for the humongous object
return
-
1
;
return
G1_NULL_HRS_INDEX
;
}
}
int
HeapRegionSeq
::
find_contiguous
(
size_t
num
)
{
assert
(
num
>
1
,
"otherwise we should not be calling this"
);
assert
(
0
<=
_alloc_search_start
&&
_alloc_search_start
<=
_regions
.
length
(),
err_msg
(
"_alloc_search_start: %d should be valid and <= than %d"
,
_alloc_search_start
,
_regions
.
length
()));
// Public
int
start
=
_alloc_search_start
;
int
res
=
find_contiguous_from
(
start
,
num
);
if
(
res
==
-
1
&&
start
!=
0
)
{
// Try starting from the beginning. If _alloc_search_start was 0,
// no point in doing this again.
res
=
find_contiguous_from
(
0
,
num
);
}
if
(
res
!=
-
1
)
{
assert
(
0
<=
res
&&
res
<
_regions
.
length
(),
err_msg
(
"res: %d should be valid"
,
res
));
_alloc_search_start
=
res
+
(
int
)
num
;
assert
(
0
<
_alloc_search_start
&&
_alloc_search_start
<=
_regions
.
length
(),
err_msg
(
"_alloc_search_start: %d should be valid"
,
_alloc_search_start
));
}
return
res
;
}
void
HeapRegionSeq
::
initialize
(
HeapWord
*
bottom
,
HeapWord
*
end
,
size_t
max_length
)
{
assert
((
size_t
)
bottom
%
HeapRegion
::
GrainBytes
==
0
,
"bottom should be heap region aligned"
);
assert
((
size_t
)
end
%
HeapRegion
::
GrainBytes
==
0
,
"end should be heap region aligned"
);
void
HeapRegionSeq
::
iterate
(
HeapRegionClosure
*
blk
)
{
iterate_from
((
HeapRegion
*
)
NULL
,
blk
);
}
_length
=
0
;
_heap_bottom
=
bottom
;
_heap_end
=
end
;
_region_shift
=
HeapRegion
::
LogOfHRGrainBytes
;
_next_search_index
=
0
;
_allocated_length
=
0
;
_max_length
=
max_length
;
_regions
=
NEW_C_HEAP_ARRAY
(
HeapRegion
*
,
max_length
);
memset
(
_regions
,
0
,
max_length
*
sizeof
(
HeapRegion
*
));
_regions_biased
=
_regions
-
((
size_t
)
bottom
>>
_region_shift
);
// The first argument r is the heap region at which iteration begins.
// This operation runs fastest when r is NULL, or the heap region for
// which a HeapRegionClosure most recently returned true, or the
// heap region immediately to its right in the sequence. In all
// other cases a linear search is required to find the index of r.
assert
(
&
_regions
[
0
]
==
&
_regions_biased
[
addr_to_index_biased
(
bottom
)],
"bottom should be included in the region with index 0"
);
}
void
HeapRegionSeq
::
iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
)
{
MemRegion
HeapRegionSeq
::
expand_by
(
HeapWord
*
old_end
,
HeapWord
*
new_end
,
FreeRegionList
*
list
)
{
assert
(
old_end
<
new_end
,
"don't call it otherwise"
);
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
// :::: FIXME ::::
// Static cache value is bad, especially when we start doing parallel
// remembered set update. For now just don't cache anything (the
// code in the def'd out blocks).
HeapWord
*
next_bottom
=
old_end
;
assert
(
_heap_bottom
<=
next_bottom
,
"invariant"
);
while
(
next_bottom
<
new_end
)
{
assert
(
next_bottom
<
_heap_end
,
"invariant"
);
size_t
index
=
length
();
#if 0
static int cached_j = 0;
#endif
int
len
=
_regions
.
length
();
int
j
=
0
;
// Find the index of r.
if
(
r
!=
NULL
)
{
#if 0
assert(cached_j >= 0, "Invariant.");
if ((cached_j < len) && (r == _regions.at(cached_j))) {
j = cached_j;
} else if ((cached_j + 1 < len) && (r == _regions.at(cached_j + 1))) {
j = cached_j + 1;
assert
(
index
<
_max_length
,
"otherwise we cannot expand further"
);
if
(
index
==
0
)
{
// We have not allocated any regions so far
assert
(
next_bottom
==
_heap_bottom
,
"invariant"
);
}
else
{
j = find(r);
#endif
if
(
j
<
0
)
{
j
=
0
;
// next_bottom should match the end of the last/previous region
assert
(
next_bottom
==
at
(
index
-
1
)
->
end
(),
"invariant"
);
}
if
(
index
==
_allocated_length
)
{
// We have to allocate a new HeapRegion.
HeapRegion
*
new_hr
=
g1h
->
new_heap_region
(
index
,
next_bottom
);
if
(
new_hr
==
NULL
)
{
// allocation failed, we bail out and return what we have done so far
return
MemRegion
(
old_end
,
next_bottom
);
}
#if 0
assert
(
_regions
[
index
]
==
NULL
,
"invariant"
);
_regions
[
index
]
=
new_hr
;
increment_length
(
&
_allocated_length
);
}
#endif
// Have to increment the length first, otherwise we will get an
// assert failure at(index) below.
increment_length
(
&
_length
);
HeapRegion
*
hr
=
at
(
index
);
list
->
add_as_tail
(
hr
);
next_bottom
=
hr
->
end
();
}
int
i
;
for
(
i
=
j
;
i
<
len
;
i
+=
1
)
{
int
res
=
blk
->
doHeapRegion
(
_regions
.
at
(
i
));
if
(
res
)
{
#if 0
cached_j = i;
#endif
blk
->
incomplete
();
return
;
assert
(
next_bottom
==
new_end
,
"post-condition"
);
return
MemRegion
(
old_end
,
next_bottom
);
}
size_t
HeapRegionSeq
::
free_suffix
()
{
size_t
res
=
0
;
size_t
index
=
length
();
while
(
index
>
0
)
{
index
-=
1
;
if
(
!
at
(
index
)
->
is_empty
())
{
break
;
}
res
+=
1
;
}
for
(
i
=
0
;
i
<
j
;
i
+=
1
)
{
int
res
=
blk
->
doHeapRegion
(
_regions
.
at
(
i
));
if
(
res
)
{
#if 0
cached_j = i;
#endif
blk
->
incomplete
();
return
;
}
return
res
;
}
size_t
HeapRegionSeq
::
find_contiguous
(
size_t
num
)
{
assert
(
num
>
1
,
"use this only for sequences of length 2 or greater"
);
assert
(
_next_search_index
<=
length
(),
err_msg
(
"_next_search_indeex: "
SIZE_FORMAT
" "
"should be valid and <= than "
SIZE_FORMAT
,
_next_search_index
,
length
()));
size_t
start
=
_next_search_index
;
size_t
res
=
find_contiguous_from
(
start
,
num
);
if
(
res
==
G1_NULL_HRS_INDEX
&&
start
>
0
)
{
// Try starting from the beginning. If _next_search_index was 0,
// no point in doing this again.
res
=
find_contiguous_from
(
0
,
num
);
}
if
(
res
!=
G1_NULL_HRS_INDEX
)
{
assert
(
res
<
length
(),
err_msg
(
"res: "
SIZE_FORMAT
" should be valid"
,
res
));
_next_search_index
=
res
+
num
;
assert
(
_next_search_index
<=
length
(),
err_msg
(
"_next_search_indeex: "
SIZE_FORMAT
" "
"should be valid and <= than "
SIZE_FORMAT
,
_next_search_index
,
length
()));
}
return
res
;
}
void
HeapRegionSeq
::
iterate
(
HeapRegionClosure
*
blk
)
const
{
iterate_from
((
HeapRegion
*
)
NULL
,
blk
);
}
void
HeapRegionSeq
::
iterate_from
(
int
idx
,
HeapRegionClosure
*
blk
)
{
int
len
=
_regions
.
length
();
int
i
;
for
(
i
=
idx
;
i
<
len
;
i
++
)
{
if
(
blk
->
doHeapRegion
(
_regions
.
at
(
i
)))
{
void
HeapRegionSeq
::
iterate_from
(
HeapRegion
*
hr
,
HeapRegionClosure
*
blk
)
const
{
size_t
hr_index
=
0
;
if
(
hr
!=
NULL
)
{
hr_index
=
(
size_t
)
hr
->
hrs_index
();
}
size_t
len
=
length
();
for
(
size_t
i
=
hr_index
;
i
<
len
;
i
+=
1
)
{
bool
res
=
blk
->
doHeapRegion
(
at
(
i
));
if
(
res
)
{
blk
->
incomplete
();
return
;
}
}
for
(
i
=
0
;
i
<
idx
;
i
++
)
{
if
(
blk
->
doHeapRegion
(
_regions
.
at
(
i
)))
{
for
(
size_t
i
=
0
;
i
<
hr_index
;
i
+=
1
)
{
bool
res
=
blk
->
doHeapRegion
(
at
(
i
));
if
(
res
)
{
blk
->
incomplete
();
return
;
}
...
...
@@ -283,54 +206,92 @@ void HeapRegionSeq::iterate_from(int idx, HeapRegionClosure* blk) {
}
MemRegion
HeapRegionSeq
::
shrink_by
(
size_t
shrink_bytes
,
size_t
&
num_regions_deleted
)
{
size_t
*
num_regions_deleted
)
{
// Reset this in case it's currently pointing into the regions that
// we just removed.
_
alloc_search_start
=
0
;
_
next_search_index
=
0
;
assert
(
shrink_bytes
%
os
::
vm_page_size
()
==
0
,
"unaligned"
);
assert
(
shrink_bytes
%
HeapRegion
::
GrainBytes
==
0
,
"unaligned"
);
assert
(
length
()
>
0
,
"the region sequence should not be empty"
);
assert
(
length
()
<=
_allocated_length
,
"invariant"
);
assert
(
_allocated_length
>
0
,
"we should have at least one region committed"
);
if
(
_regions
.
length
()
==
0
)
{
num_regions_deleted
=
0
;
return
MemRegion
();
}
int
j
=
_regions
.
length
()
-
1
;
HeapWord
*
end
=
_regions
.
at
(
j
)
->
end
();
// around the loop, i will be the next region to be removed
size_t
i
=
length
()
-
1
;
assert
(
i
>
0
,
"we should never remove all regions"
);
// [last_start, end) is the MemRegion that covers the regions we will remove.
HeapWord
*
end
=
at
(
i
)
->
end
();
HeapWord
*
last_start
=
end
;
while
(
j
>=
0
&&
shrink_bytes
>
0
)
{
HeapRegion
*
cur
=
_regions
.
at
(
j
);
// We have to leave humongous regions where they are,
// and work around them.
if
(
cur
->
isHumongous
())
{
return
MemRegion
(
last_start
,
end
);
}
assert
(
cur
==
_regions
.
top
(),
"Should be top"
);
*
num_regions_deleted
=
0
;
while
(
shrink_bytes
>
0
)
{
HeapRegion
*
cur
=
at
(
i
);
// We should leave the humongous regions where they are.
if
(
cur
->
isHumongous
())
break
;
// We should stop shrinking if we come across a non-empty region.
if
(
!
cur
->
is_empty
())
break
;
i
-=
1
;
*
num_regions_deleted
+=
1
;
shrink_bytes
-=
cur
->
capacity
();
num_regions_deleted
++
;
_regions
.
pop
();
last_start
=
cur
->
bottom
();
// We need to delete these somehow, but can't currently do so here: if
// we do, the ZF thread may still access the deleted region. We'll
// leave this here as a reminder that we have to do something about
// this.
// delete cur;
j
--
;
decrement_length
(
&
_length
);
// We will reclaim the HeapRegion. _allocated_length should be
// covering this index. So, even though we removed the region from
// the active set by decreasing _length, we still have it
// available in the future if we need to re-use it.
assert
(
i
>
0
,
"we should never remove all regions"
);
assert
(
length
()
>
0
,
"we should never remove all regions"
);
}
return
MemRegion
(
last_start
,
end
);
}
class
PrintHeapRegionClosure
:
public
HeapRegionClosure
{
public:
bool
doHeapRegion
(
HeapRegion
*
r
)
{
gclog_or_tty
->
print
(
PTR_FORMAT
":"
,
r
);
r
->
print
();
return
false
;
}
};
#ifndef PRODUCT
void
HeapRegionSeq
::
verify_optional
()
{
guarantee
(
_length
<=
_allocated_length
,
err_msg
(
"invariant: _length: "
SIZE_FORMAT
" "
"_allocated_length: "
SIZE_FORMAT
,
_length
,
_allocated_length
));
guarantee
(
_allocated_length
<=
_max_length
,
err_msg
(
"invariant: _allocated_length: "
SIZE_FORMAT
" "
"_max_length: "
SIZE_FORMAT
,
_allocated_length
,
_max_length
));
guarantee
(
_next_search_index
<=
_length
,
err_msg
(
"invariant: _next_search_index: "
SIZE_FORMAT
" "
"_length: "
SIZE_FORMAT
,
_next_search_index
,
_length
));
void
HeapRegionSeq
::
print
()
{
PrintHeapRegionClosure
cl
;
iterate
(
&
cl
);
HeapWord
*
prev_end
=
_heap_bottom
;
for
(
size_t
i
=
0
;
i
<
_allocated_length
;
i
+=
1
)
{
HeapRegion
*
hr
=
_regions
[
i
];
guarantee
(
hr
!=
NULL
,
err_msg
(
"invariant: i: "
SIZE_FORMAT
,
i
));
guarantee
(
hr
->
bottom
()
==
prev_end
,
err_msg
(
"invariant i: "
SIZE_FORMAT
" "
HR_FORMAT
" "
"prev_end: "
PTR_FORMAT
,
i
,
HR_FORMAT_PARAMS
(
hr
),
prev_end
));
guarantee
(
hr
->
hrs_index
()
==
i
,
err_msg
(
"invariant: i: "
SIZE_FORMAT
" hrs_index(): "
SIZE_FORMAT
,
i
,
hr
->
hrs_index
()));
if
(
i
<
_length
)
{
// Asserts will fire if i is >= _length
HeapWord
*
addr
=
hr
->
bottom
();
guarantee
(
addr_to_region
(
addr
)
==
hr
,
"sanity"
);
guarantee
(
addr_to_region_unsafe
(
addr
)
==
hr
,
"sanity"
);
}
else
{
guarantee
(
hr
->
is_empty
(),
"sanity"
);
guarantee
(
!
hr
->
isHumongous
(),
"sanity"
);
// using assert instead of guarantee here since containing_set()
// is only available in non-product builds.
assert
(
hr
->
containing_set
()
==
NULL
,
"sanity"
);
}
if
(
hr
->
startsHumongous
())
{
prev_end
=
hr
->
orig_end
();
}
else
{
prev_end
=
hr
->
end
();
}
}
for
(
size_t
i
=
_allocated_length
;
i
<
_max_length
;
i
+=
1
)
{
guarantee
(
_regions
[
i
]
==
NULL
,
err_msg
(
"invariant i: "
SIZE_FORMAT
,
i
));
}
}
#endif // PRODUCT
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
浏览文件 @
d2f0b980
...
...
@@ -25,92 +25,143 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "utilities/growableArray.hpp"
class
HeapRegion
;
class
HeapRegionClosure
;
class
FreeRegionList
;
#define G1_NULL_HRS_INDEX ((size_t) -1)
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
// the heap (i.e., 0 is the region at the bottom of the heap, 1 is
// the one after it, etc.). Two regions that are consecutive in the
// array should also be adjacent in the address space (i.e.,
// region(i).end() == region(i+1).bottom().
//
// We create a HeapRegion when we commit the region's address space
// for the first time. When we uncommit the address space of a
// region we retain the HeapRegion to be able to re-use it in the
// future (in case we recommit it).
//
// We keep track of three lengths:
//
// * _length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
// * _max_length (returned by max_length()) is the maximum number of
// regions the heap can have.
//
// and maintain that: _length <= _allocated_length <= _max_length
class
HeapRegionSeq
:
public
CHeapObj
{
// _regions is kept sorted by start address order, and no two regions are
// overlapping.
GrowableArray
<
HeapRegion
*>
_regions
;
// The array that holds the HeapRegions.
HeapRegion
**
_regions
;
// The index in "_regions" at which to start the next allocation search.
// (For efficiency only; private to obj_allocate after initialization.)
int
_alloc_search_start
;
// Version of _regions biased to address 0
HeapRegion
**
_regions_biased
;
// Finds a contiguous set of empty regions of length num, starting
// from a given index.
int
find_contiguous_from
(
int
from
,
size_t
num
);
// The number of regions committed in the heap.
size_t
_length
;
// Currently, we're choosing collection sets in a round-robin fashion,
// starting here.
int
_next_rr_candidate
;
// The address of the first reserved word in the heap.
HeapWord
*
_heap_bottom
;
// The bottom address of the bottom-most region, or else NULL if there
// are no regions in the sequence.
char
*
_seq_bottom
;
// The address of the last reserved word in the heap - 1.
HeapWord
*
_heap_end
;
public:
// Initializes "this" to the empty sequence of regions.
HeapRegionSeq
(
const
size_t
max_size
);
// The log of the region byte size.
size_t
_region_shift
;
// Adds "hr" to "this" sequence. Requires "hr" not to overlap with
// any region already in "this". (Will perform better if regions are
// inserted in ascending address order.)
void
insert
(
HeapRegion
*
hr
);
// A hint for which index to start searching from for humongous
// allocations.
size_t
_next_search_index
;
// Given a HeapRegion*, returns its index within _regions,
// or returns -1 if not found.
int
find
(
HeapRegion
*
hr
);
// The number of regions for which we have allocated HeapRegions for.
size_t
_allocated_length
;
//
Requires the index to be valid, and return the region at the index
.
HeapRegion
*
at
(
size_t
i
)
{
return
_regions
.
at
((
int
)
i
);
}
//
The maximum number of regions in the heap
.
size_t
_max_length
;
// Return the number of regions in the sequence.
size_t
length
();
// Find a contiguous set of empty regions of length num, starting
// from the given index.
size_t
find_contiguous_from
(
size_t
from
,
size_t
num
);
//
Returns the number of contiguous regions at the end of the sequenc
e
//
that are available for allocation
.
size_t
free_suffix
()
;
//
Map a heap address to a biased region index. Assume that th
e
//
address is valid
.
inline
size_t
addr_to_index_biased
(
HeapWord
*
addr
)
const
;
// Find a contiguous set of empty regions of length num and return
// the index of the first region or -1 if the search was unsuccessful.
int
find_contiguous
(
size_t
num
);
void
increment_length
(
size_t
*
length
)
{
assert
(
*
length
<
_max_length
,
"pre-condition"
);
*
length
+=
1
;
}
void
decrement_length
(
size_t
*
length
)
{
assert
(
*
length
>
0
,
"pre-condition"
);
*
length
-=
1
;
}
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
// in address order, terminating the iteration early
// if the "doHeapRegion" method returns "true".
void
iterate
(
HeapRegionClosure
*
blk
);
public:
// Empty contructor, we'll initialize it with the initialize() method.
HeapRegionSeq
()
{
}
void
initialize
(
HeapWord
*
bottom
,
HeapWord
*
end
,
size_t
max_length
);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
inline
HeapRegion
*
at
(
size_t
index
)
const
;
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
// starting at "r" (or first region, if "r" is NULL), in a circular
// manner, terminating the iteration early if the "doHeapRegion" method
// returns "true".
void
iterate_from
(
HeapRegion
*
r
,
HeapRegionClosure
*
blk
);
// If addr is within the committed space return its corresponding
// HeapRegion, otherwise return NULL.
inline
HeapRegion
*
addr_to_region
(
HeapWord
*
addr
)
const
;
//
As above, but start from a given index in the sequence
//
instead of a given heap region
.
void
iterate_from
(
int
idx
,
HeapRegionClosure
*
blk
)
;
//
Return the HeapRegion that corresponds to the given
//
address. Assume the address is valid
.
inline
HeapRegion
*
addr_to_region_unsafe
(
HeapWord
*
addr
)
const
;
// Requires "shrink_bytes" to be a multiple of the page size and heap
// region granularity. Deletes as many "rightmost" completely free heap
// regions from the sequence as comprise shrink_bytes bytes. Returns the
// MemRegion indicating the region those regions comprised, and sets
// "num_regions_deleted" to the number of regions deleted.
MemRegion
shrink_by
(
size_t
shrink_bytes
,
size_t
&
num_regions_deleted
);
// Return the number of regions that have been committed in the heap.
size_t
length
()
const
{
return
_length
;
}
// If "addr" falls within a region in the sequence, return that region,
// or else NULL.
inline
HeapRegion
*
addr_to_region
(
const
void
*
addr
);
// Return the maximum number of regions in the heap.
size_t
max_length
()
const
{
return
_max_length
;
}
void
print
();
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
// existing ones, and return them in the given list. Returns the
// memory region that covers the newly-created regions. If a
// HeapRegion allocation fails, the result memory region might be
// smaller than the desired one.
MemRegion
expand_by
(
HeapWord
*
old_end
,
HeapWord
*
new_end
,
FreeRegionList
*
list
);
// Prints out runs of empty regions.
void
print_empty_runs
();
// Return the number of contiguous regions at the end of the sequence
// that are available for allocation.
size_t
free_suffix
();
// Find a contiguous set of empty regions of length num and return
// the index of the first region or G1_NULL_HRS_INDEX if the
// search was unsuccessful.
size_t
find_contiguous
(
size_t
num
);
// Apply blk->doHeapRegion() on all committed regions in address order,
// terminating the iteration early if doHeapRegion() returns true.
void
iterate
(
HeapRegionClosure
*
blk
)
const
;
// As above, but start the iteration from hr and loop around. If hr
// is NULL, we start from the first region in the heap.
void
iterate_from
(
HeapRegion
*
hr
,
HeapRegionClosure
*
blk
)
const
;
// Tag as uncommitted as many regions that are completely free as
// possible, up to shrink_bytes, from the suffix of the committed
// sequence. Return a MemRegion that corresponds to the address
// range of the uncommitted regions. Assume shrink_bytes is page and
// heap region aligned.
MemRegion
shrink_by
(
size_t
shrink_bytes
,
size_t
*
num_regions_deleted
);
// Do some sanity checking.
void
verify_optional
()
PRODUCT_RETURN
;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
浏览文件 @
d2f0b980
/*
* Copyright (c) 2001, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -25,23 +25,42 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
const
void
*
addr
)
{
assert
(
_seq_bottom
!=
NULL
,
"bad _seq_bottom in addr_to_region"
);
if
((
char
*
)
addr
>=
_seq_bottom
)
{
size_t
diff
=
(
size_t
)
pointer_delta
((
HeapWord
*
)
addr
,
(
HeapWord
*
)
_seq_bottom
);
int
index
=
(
int
)
(
diff
>>
HeapRegion
::
LogOfHRGrainWords
);
assert
(
index
>=
0
,
"invariant / paranoia"
);
if
(
index
<
_regions
.
length
())
{
HeapRegion
*
hr
=
_regions
.
at
(
index
);
assert
(
hr
->
is_in_reserved
(
addr
),
"addr_to_region is wrong..."
);
return
hr
;
}
inline
size_t
HeapRegionSeq
::
addr_to_index_biased
(
HeapWord
*
addr
)
const
{
assert
(
_heap_bottom
<=
addr
&&
addr
<
_heap_end
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
" end: "
PTR_FORMAT
,
addr
,
_heap_bottom
,
_heap_end
));
size_t
index
=
(
size_t
)
addr
>>
_region_shift
;
return
index
;
}
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region_unsafe
(
HeapWord
*
addr
)
const
{
assert
(
_heap_bottom
<=
addr
&&
addr
<
_heap_end
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
" end: "
PTR_FORMAT
,
addr
,
_heap_bottom
,
_heap_end
));
size_t
index_biased
=
addr_to_index_biased
(
addr
);
HeapRegion
*
hr
=
_regions_biased
[
index_biased
];
assert
(
hr
!=
NULL
,
"invariant"
);
return
hr
;
}
inline
HeapRegion
*
HeapRegionSeq
::
addr_to_region
(
HeapWord
*
addr
)
const
{
if
(
addr
!=
NULL
&&
addr
<
_heap_end
)
{
assert
(
addr
>=
_heap_bottom
,
err_msg
(
"addr: "
PTR_FORMAT
" bottom: "
PTR_FORMAT
,
addr
,
_heap_bottom
));
return
addr_to_region_unsafe
(
addr
);
}
return
NULL
;
}
inline
HeapRegion
*
HeapRegionSeq
::
at
(
size_t
index
)
const
{
assert
(
index
<
length
(),
"pre-condition"
);
HeapRegion
*
hr
=
_regions
[
index
];
assert
(
hr
!=
NULL
,
"sanity"
);
assert
(
hr
->
hrs_index
()
==
index
,
"sanity"
);
return
hr
;
}
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
src/share/vm/gc_implementation/g1/sparsePRT.cpp
浏览文件 @
d2f0b980
...
...
@@ -481,8 +481,9 @@ size_t SparsePRT::mem_size() const {
bool
SparsePRT
::
add_card
(
RegionIdx_t
region_id
,
CardIdx_t
card_index
)
{
#if SPARSE_PRT_VERBOSE
gclog_or_tty
->
print_cr
(
" Adding card %d from region %d to region %d sparse."
,
card_index
,
region_id
,
_hr
->
hrs_index
());
gclog_or_tty
->
print_cr
(
" Adding card %d from region %d to region "
SIZE_FORMAT
" sparse."
,
card_index
,
region_id
,
_hr
->
hrs_index
());
#endif
if
(
_next
->
occupied_entries
()
*
2
>
_next
->
capacity
())
{
expand
();
...
...
@@ -533,8 +534,8 @@ void SparsePRT::expand() {
_next
=
new
RSHashTable
(
last
->
capacity
()
*
2
);
#if SPARSE_PRT_VERBOSE
gclog_or_tty
->
print_cr
(
" Expanded sparse table for
%d
to %d."
,
_hr
->
hrs_index
(),
_next
->
capacity
());
gclog_or_tty
->
print_cr
(
" Expanded sparse table for
"
SIZE_FORMAT
"
to %d."
,
_hr
->
hrs_index
(),
_next
->
capacity
());
#endif
for
(
size_t
i
=
0
;
i
<
last
->
capacity
();
i
++
)
{
SparsePRTEntry
*
e
=
last
->
entry
((
int
)
i
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录