Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
76f2e04b
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
76f2e04b
编写于
3月 04, 2011
作者:
T
tonyp
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
7014923: G1: code cleanup
Summary: Some G1 code cleanup. Reviewed-by: johnc, jcoomes, jwilhelm
上级
6afa7345
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
311 addition
and
319 deletion
+311
-319
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+9
-9
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
+2
-2
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+147
-133
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+44
-44
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+5
-8
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+0
-22
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+13
-12
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+1
-1
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
+2
-1
src/share/vm/gc_implementation/g1/heapRegionSet.cpp
src/share/vm/gc_implementation/g1/heapRegionSet.cpp
+44
-44
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
+19
-19
src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp
src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp
+17
-17
src/share/vm/gc_implementation/g1/heapRegionSets.cpp
src/share/vm/gc_implementation/g1/heapRegionSets.cpp
+4
-4
src/share/vm/utilities/debug.hpp
src/share/vm/utilities/debug.hpp
+3
-2
src/share/vm/utilities/globalDefinitions.hpp
src/share/vm/utilities/globalDefinitions.hpp
+1
-1
未找到文件。
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
76f2e04b
...
...
@@ -1828,7 +1828,7 @@ void ConcurrentMark::completeCleanup() {
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
_cleanup_list
.
verify_optional
();
FreeRegionList
local_free_list
(
"Local Cleanup
List"
);
FreeRegionList
tmp_free_list
(
"Tmp Free
List"
);
if
(
G1ConcRegionFreeingVerbose
)
{
gclog_or_tty
->
print_cr
(
"G1ConcRegionFreeing [complete cleanup] : "
...
...
@@ -1842,7 +1842,7 @@ void ConcurrentMark::completeCleanup() {
HeapRegion
*
hr
=
_cleanup_list
.
remove_head
();
assert
(
hr
!=
NULL
,
"the list was not empty"
);
hr
->
rem_set
()
->
clear
();
local
_free_list
.
add_as_tail
(
hr
);
tmp
_free_list
.
add_as_tail
(
hr
);
// Instead of adding one region at a time to the secondary_free_list,
// we accumulate them in the local list and move them a few at a
...
...
@@ -1850,20 +1850,20 @@ void ConcurrentMark::completeCleanup() {
// we do during this process. We'll also append the local list when
// _cleanup_list is empty (which means we just removed the last
// region from the _cleanup_list).
if
((
local
_free_list
.
length
()
%
G1SecondaryFreeListAppendLength
==
0
)
||
if
((
tmp
_free_list
.
length
()
%
G1SecondaryFreeListAppendLength
==
0
)
||
_cleanup_list
.
is_empty
())
{
if
(
G1ConcRegionFreeingVerbose
)
{
gclog_or_tty
->
print_cr
(
"G1ConcRegionFreeing [complete cleanup] : "
"appending "
SIZE_FORMAT
" entries to the "
"secondary_free_list, clean list still has "
SIZE_FORMAT
" entries"
,
local
_free_list
.
length
(),
tmp
_free_list
.
length
(),
_cleanup_list
.
length
());
}
{
MutexLockerEx
x
(
SecondaryFreeList_lock
,
Mutex
::
_no_safepoint_check_flag
);
g1h
->
secondary_free_list_add_as_tail
(
&
local
_free_list
);
g1h
->
secondary_free_list_add_as_tail
(
&
tmp
_free_list
);
SecondaryFreeList_lock
->
notify_all
();
}
...
...
@@ -1874,7 +1874,7 @@ void ConcurrentMark::completeCleanup() {
}
}
}
assert
(
local
_free_list
.
is_empty
(),
"post-condition"
);
assert
(
tmp
_free_list
.
is_empty
(),
"post-condition"
);
}
// Support closures for reference procssing in G1
...
...
@@ -3182,7 +3182,7 @@ public:
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
assert
(
_g1h
->
is_in_g1_reserved
((
HeapWord
*
)
p
),
"invariant"
);
assert
(
!
_g1h
->
is_on_free_list
(
assert
(
!
_g1h
->
is_on_
master_
free_list
(
_g1h
->
heap_region_containing
((
HeapWord
*
)
p
)),
"invariant"
);
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
...
...
@@ -3403,7 +3403,7 @@ void CMTask::deal_with_reference(oop obj) {
void
CMTask
::
push
(
oop
obj
)
{
HeapWord
*
objAddr
=
(
HeapWord
*
)
obj
;
assert
(
_g1h
->
is_in_g1_reserved
(
objAddr
),
"invariant"
);
assert
(
!
_g1h
->
is_on_free_list
(
assert
(
!
_g1h
->
is_on_
master_
free_list
(
_g1h
->
heap_region_containing
((
HeapWord
*
)
objAddr
)),
"invariant"
);
assert
(
!
_g1h
->
is_obj_ill
(
obj
),
"invariant"
);
assert
(
_nextMarkBitMap
->
isMarked
(
objAddr
),
"invariant"
);
...
...
@@ -3649,7 +3649,7 @@ void CMTask::drain_local_queue(bool partially) {
(
void
*
)
obj
);
assert
(
_g1h
->
is_in_g1_reserved
((
HeapWord
*
)
obj
),
"invariant"
);
assert
(
!
_g1h
->
is_on_free_list
(
assert
(
!
_g1h
->
is_on_
master_
free_list
(
_g1h
->
heap_region_containing
((
HeapWord
*
)
obj
)),
"invariant"
);
scan_object
(
obj
);
...
...
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
浏览文件 @
76f2e04b
...
...
@@ -237,9 +237,9 @@ void ConcurrentMarkThread::run() {
// The following will finish freeing up any regions that we
// found to be empty during cleanup. We'll do this part
// without joining the suspendible set. If an evacuation pause
// takes place
s
, then we would carry on freeing regions in
// takes place, then we would carry on freeing regions in
// case they are needed by the pause. If a Full GC takes
// place
s
, it would wait for us to process the regions
// place, it would wait for us to process the regions
// reclaimed by cleanup.
double
cleanup_start_sec
=
os
::
elapsedTime
();
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
76f2e04b
...
...
@@ -479,7 +479,7 @@ G1CollectedHeap* G1CollectedHeap::_g1h;
// Private methods.
HeapRegion
*
G1CollectedHeap
::
new_region_try_secondary_free_list
(
size_t
word_size
)
{
G1CollectedHeap
::
new_region_try_secondary_free_list
()
{
MutexLockerEx
x
(
SecondaryFreeList_lock
,
Mutex
::
_no_safepoint_check_flag
);
while
(
!
_secondary_free_list
.
is_empty
()
||
free_regions_coming
())
{
if
(
!
_secondary_free_list
.
is_empty
())
{
...
...
@@ -531,7 +531,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
gclog_or_tty
->
print_cr
(
"G1ConcRegionFreeing [region alloc] : "
"forced to look at the secondary_free_list"
);
}
res
=
new_region_try_secondary_free_list
(
word_size
);
res
=
new_region_try_secondary_free_list
();
if
(
res
!=
NULL
)
{
return
res
;
}
...
...
@@ -543,7 +543,7 @@ HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
gclog_or_tty
->
print_cr
(
"G1ConcRegionFreeing [region alloc] : "
"res == NULL, trying the secondary_free_list"
);
}
res
=
new_region_try_secondary_free_list
(
word_size
);
res
=
new_region_try_secondary_free_list
();
}
if
(
res
==
NULL
&&
do_expand
)
{
if
(
expand
(
word_size
*
HeapWordSize
))
{
...
...
@@ -579,6 +579,9 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
int
G1CollectedHeap
::
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
)
{
assert
(
isHumongous
(
word_size
),
"word_size should be humongous"
);
assert
(
num_regions
*
HeapRegion
::
GrainWords
>=
word_size
,
"pre-condition"
);
int
first
=
-
1
;
if
(
num_regions
==
1
)
{
// Only one region to allocate, no need to go through the slower
...
...
@@ -600,7 +603,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
// request. If we are only allocating one region we use the common
// region allocation code (see above).
wait_while_free_regions_coming
();
append_secondary_free_list_if_not_empty
();
append_secondary_free_list_if_not_empty
_with_lock
();
if
(
free_regions
()
>=
num_regions
)
{
first
=
_hrs
->
find_contiguous
(
num_regions
);
...
...
@@ -608,7 +611,7 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
for
(
int
i
=
first
;
i
<
first
+
(
int
)
num_regions
;
++
i
)
{
HeapRegion
*
hr
=
_hrs
->
at
(
i
);
assert
(
hr
->
is_empty
(),
"sanity"
);
assert
(
is_on_free_list
(
hr
),
"sanity"
);
assert
(
is_on_
master_
free_list
(
hr
),
"sanity"
);
hr
->
set_pending_removal
(
true
);
}
_free_list
.
remove_all_pending
(
num_regions
);
...
...
@@ -618,42 +621,14 @@ int G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
return
first
;
}
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
HeapWord
*
G1CollectedHeap
::
humongous_obj_allocate
(
size_t
word_size
)
{
assert_heap_locked_or_at_safepoint
(
true
/* should_be_vm_thread */
);
verify_region_sets_optional
();
size_t
num_regions
=
round_to
(
word_size
,
HeapRegion
::
GrainWords
)
/
HeapRegion
::
GrainWords
;
size_t
x_size
=
expansion_regions
();
size_t
fs
=
_hrs
->
free_suffix
();
int
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
if
(
first
==
-
1
)
{
// The only thing we can do now is attempt expansion.
if
(
fs
+
x_size
>=
num_regions
)
{
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
// should have succeeded and we wouldn't be here.
//
// We should only be trying to expand when the free suffix is
// not sufficient for the object _and_ we have some expansion
// room available.
assert
(
num_regions
>
fs
,
"earlier allocation should have succeeded"
);
if
(
expand
((
num_regions
-
fs
)
*
HeapRegion
::
GrainBytes
))
{
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
// If the expansion was successful then the allocation
// should have been successful.
assert
(
first
!=
-
1
,
"this should have worked"
);
}
}
}
HeapWord
*
G1CollectedHeap
::
humongous_obj_allocate_initialize_regions
(
int
first
,
size_t
num_regions
,
size_t
word_size
)
{
assert
(
first
!=
-
1
,
"pre-condition"
);
assert
(
isHumongous
(
word_size
),
"word_size should be humongous"
);
assert
(
num_regions
*
HeapRegion
::
GrainWords
>=
word_size
,
"pre-condition"
);
if
(
first
!=
-
1
)
{
// Index of last region in the series + 1.
int
last
=
first
+
(
int
)
num_regions
;
...
...
@@ -764,10 +739,53 @@ HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
_humongous_set
.
add
(
first_hr
);
return
new_obj
;
}
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
HeapWord
*
G1CollectedHeap
::
humongous_obj_allocate
(
size_t
word_size
)
{
assert_heap_locked_or_at_safepoint
(
true
/* should_be_vm_thread */
);
verify_region_sets_optional
();
size_t
num_regions
=
round_to
(
word_size
,
HeapRegion
::
GrainWords
)
/
HeapRegion
::
GrainWords
;
size_t
x_size
=
expansion_regions
();
size_t
fs
=
_hrs
->
free_suffix
();
int
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
if
(
first
==
-
1
)
{
// The only thing we can do now is attempt expansion.
if
(
fs
+
x_size
>=
num_regions
)
{
// If the number of regions we're trying to allocate for this
// object is at most the number of regions in the free suffix,
// then the call to humongous_obj_allocate_find_first() above
// should have succeeded and we wouldn't be here.
//
// We should only be trying to expand when the free suffix is
// not sufficient for the object _and_ we have some expansion
// room available.
assert
(
num_regions
>
fs
,
"earlier allocation should have succeeded"
);
if
(
expand
((
num_regions
-
fs
)
*
HeapRegion
::
GrainBytes
))
{
first
=
humongous_obj_allocate_find_first
(
num_regions
,
word_size
);
// If the expansion was successful then the allocation
// should have been successful.
assert
(
first
!=
-
1
,
"this should have worked"
);
}
}
}
HeapWord
*
result
=
NULL
;
if
(
first
!=
-
1
)
{
result
=
humongous_obj_allocate_initialize_regions
(
first
,
num_regions
,
word_size
);
assert
(
result
!=
NULL
,
"it should always return a valid result"
);
}
verify_region_sets_optional
();
return
NULL
;
return
result
;
}
void
...
...
@@ -1389,7 +1407,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
g1_policy
()
->
record_full_collection_start
();
wait_while_free_regions_coming
();
append_secondary_free_list_if_not_empty
();
append_secondary_free_list_if_not_empty
_with_lock
();
gc_prologue
(
true
);
increment_total_collections
(
true
/* full gc */
);
...
...
@@ -3377,15 +3395,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
TraceMemoryManagerStats
tms
(
false
/* fullGC */
);
// If there are any free regions available on the secondary_free_list
// make sure we append them to the free_list. However, we don't
// have to wait for the rest of the cleanup operation to
// finish. If it's still going on that's OK. If we run out of
// regions, the region allocation code will check the
// secondary_free_list and potentially wait if more free regions
// are coming (see new_region_try_secondary_free_list()).
// If the secondary_free_list is not empty, append it to the
// free_list. No need to wait for the cleanup operation to finish;
// the region allocation code will check the secondary_free_list
// and wait if necessary. If the G1StressConcRegionFreeing flag is
// set, skip this step so that the region allocation code has to
// get entries from the secondary_free_list.
if
(
!
G1StressConcRegionFreeing
)
{
append_secondary_free_list_if_not_empty
();
append_secondary_free_list_if_not_empty
_with_lock
();
}
increment_gc_time_stamp
();
...
...
@@ -5199,7 +5216,7 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
size_t
rs_lengths
=
0
;
while
(
cur
!=
NULL
)
{
assert
(
!
is_on_free_list
(
cur
),
"sanity"
);
assert
(
!
is_on_
master_
free_list
(
cur
),
"sanity"
);
if
(
non_young
)
{
if
(
cur
->
is_young
())
{
...
...
@@ -5543,13 +5560,10 @@ void G1CollectedHeap::verify_region_sets() {
return
;
}
{
MutexLockerEx
x
(
SecondaryFreeList_lock
,
Mutex
::
_no_safepoint_check_flag
);
// Make sure we append the secondary_free_list on the free_list so
// that all free regions we will come across can be safely
// attributed to the free_list.
append_secondary_free_list
();
}
append_secondary_free_list_if_not_empty_with_lock
();
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
76f2e04b
...
...
@@ -56,7 +56,6 @@ class HeapRegionRemSetIterator;
class
ConcurrentMark
;
class
ConcurrentMarkThread
;
class
ConcurrentG1Refine
;
class
ConcurrentZFThread
;
typedef
OverflowTaskQueue
<
StarTask
>
RefToScanQueue
;
typedef
GenericTaskQueueSet
<
RefToScanQueue
>
RefToScanQueueSet
;
...
...
@@ -64,12 +63,6 @@ typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
typedef
int
RegionIdx_t
;
// needs to hold [ 0..max_regions() )
typedef
int
CardIdx_t
;
// needs to hold [ 0..CardsPerRegion )
enum
G1GCThreadGroups
{
G1CRGroup
=
0
,
G1ZFGroup
=
1
,
G1CMGroup
=
2
};
enum
GCAllocPurpose
{
GCAllocForTenured
,
GCAllocForSurvived
,
...
...
@@ -294,9 +287,9 @@ private:
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
#define heap_locking_asserts_err_msg(_
_extra_message
) \
#define heap_locking_asserts_err_msg(_
extra_message_
) \
err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
(_
_extra_message
), \
(_
extra_message_
), \
BOOL_TO_STR(Heap_lock->owned_by_self()), \
BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
BOOL_TO_STR(Thread::current()->is_VM_thread()))
...
...
@@ -307,11 +300,11 @@ private:
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
} while (0)
#define assert_heap_locked_or_at_safepoint(_
_should_be_vm_thread
) \
#define assert_heap_locked_or_at_safepoint(_
should_be_vm_thread_
) \
do { \
assert(Heap_lock->owned_by_self() || \
(SafepointSynchronize::is_at_safepoint() && \
((_
_should_be_vm_thread
) == Thread::current()->is_VM_thread())), \
((_
should_be_vm_thread_
) == Thread::current()->is_VM_thread())), \
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
"should be at a safepoint")); \
} while (0)
...
...
@@ -338,10 +331,10 @@ private:
"should not be at a safepoint")); \
} while (0)
#define assert_at_safepoint(_
_should_be_vm_thread
) \
#define assert_at_safepoint(_
should_be_vm_thread_
) \
do { \
assert(SafepointSynchronize::is_at_safepoint() && \
((_
_should_be_vm_thread
) == Thread::current()->is_VM_thread()), \
((_
should_be_vm_thread_
) == Thread::current()->is_VM_thread()), \
heap_locking_asserts_err_msg("should be at a safepoint")); \
} while (0)
...
...
@@ -371,35 +364,40 @@ protected:
// will check whether there's anything available in the
// secondary_free_list and/or wait for more regions to appear in that
// list, if _free_regions_coming is set.
HeapRegion
*
new_region_try_secondary_free_list
(
size_t
word_size
);
// It will try to allocate a single non-humongous HeapRegion
// sufficient for an allocation of the given word_size. If
// do_expand is true, it will attempt to expand the heap if
// necessary to satisfy the allocation request. Note that word_size
// is only used to make sure that we expand sufficiently but, given
// that the allocation request is assumed not to be humongous,
// having word_size is not strictly necessary (expanding by a single
// region will always be sufficient). But let's keep that parameter
// in case we need it in the future.
HeapRegion
*
new_region_try_secondary_free_list
();
// Try to allocate a single non-humongous HeapRegion sufficient for
// an allocation of the given word_size. If do_expand is true,
// attempt to expand the heap if necessary to satisfy the allocation
// request.
HeapRegion
*
new_region_work
(
size_t
word_size
,
bool
do_expand
);
//
It will try to allocate a new region to be used for allocation by
// mutator thread
s. It will not try to expand the heap if not region
//
is
available.
//
Try to allocate a new region to be used for allocation by a
// mutator thread
. Attempt to expand the heap if no region is
// available.
HeapRegion
*
new_alloc_region
(
size_t
word_size
)
{
return
new_region_work
(
word_size
,
false
/* do_expand */
);
}
// It will try to allocate a new region to be used for allocation by
// a GC thread. It will try to expand the heap if no region is
// available.
// Try to allocate a new region to be used for allocation by a GC
// thread. Attempt to expand the heap if no region is available.
HeapRegion
*
new_gc_alloc_region
(
int
purpose
,
size_t
word_size
);
// Attempt to satisfy a humongous allocation request of the given
// size by finding a contiguous set of free regions of num_regions
// length and remove them from the master free list. Return the
// index of the first region or -1 if the search was unsuccessful.
int
humongous_obj_allocate_find_first
(
size_t
num_regions
,
size_t
word_size
);
// Attempt to allocate an object of the given (very large) "word_size".
// Returns "NULL" on failure.
// Initialize a contiguous set of free regions of length num_regions
// and starting at index first so that they appear as a single
// humongous region.
HeapWord
*
humongous_obj_allocate_initialize_regions
(
int
first
,
size_t
num_regions
,
size_t
word_size
);
// Attempt to allocate a humongous object of the given size. Return
// NULL if unsuccessful.
HeapWord
*
humongous_obj_allocate
(
size_t
word_size
);
// The following two methods, allocate_new_tlab() and
...
...
@@ -776,7 +774,7 @@ protected:
// Invoke "save_marks" on all heap regions.
void
save_marks
();
//
It f
rees a non-humongous region by initializing its contents and
//
F
rees a non-humongous region by initializing its contents and
// adding it to the free list that's passed as a parameter (this is
// usually a local list which will be appended to the master free
// list later). The used bytes of freed regions are accumulated in
...
...
@@ -787,13 +785,13 @@ protected:
FreeRegionList
*
free_list
,
bool
par
);
//
It frees a humongous region by collapsing it into individual
//
regions and calling free_region() for each of them. The freed
//
regions will be added to the free list that's passed as a parameter
//
(this is usually a local list which will be appended to th
e
//
master free list later). The used bytes of freed regions are
//
accumulated in pre_used. If par is true, the region's RSet will
//
not be freed
up. The assumption is that this will be done later.
//
Frees a humongous region by collapsing it into individual regions
//
and calling free_region() for each of them. The freed regions
//
will be added to the free list that's passed as a parameter (this
//
is usually a local list which will be appended to the master fre
e
//
list later). The used bytes of freed regions are accumulated in
//
pre_used. If par is true, the region's RSet will not be freed
// up. The assumption is that this will be done later.
void
free_humongous_region
(
HeapRegion
*
hr
,
size_t
*
pre_used
,
FreeRegionList
*
free_list
,
...
...
@@ -1046,13 +1044,13 @@ public:
#endif // HEAP_REGION_SET_FORCE_VERIFY
#ifdef ASSERT
bool
is_on_free_list
(
HeapRegion
*
hr
)
{
bool
is_on_
master_
free_list
(
HeapRegion
*
hr
)
{
return
hr
->
containing_set
()
==
&
_free_list
;
}
bool
is_
o
n_humongous_set
(
HeapRegion
*
hr
)
{
bool
is_
i
n_humongous_set
(
HeapRegion
*
hr
)
{
return
hr
->
containing_set
()
==
&
_humongous_set
;
}
}
#endif // ASSERT
// Wrapper for the region list operations that can be called from
...
...
@@ -1066,7 +1064,9 @@ public:
_free_list
.
add_as_tail
(
&
_secondary_free_list
);
}
void
append_secondary_free_list_if_not_empty
()
{
void
append_secondary_free_list_if_not_empty_with_lock
()
{
// If the secondary free list looks empty there's no reason to
// take the lock and then try to append it.
if
(
!
_secondary_free_list
.
is_empty
())
{
MutexLockerEx
x
(
SecondaryFreeList_lock
,
Mutex
::
_no_safepoint_check_flag
);
append_secondary_free_list
();
...
...
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
浏览文件 @
76f2e04b
...
...
@@ -185,22 +185,22 @@ class G1PrepareCompactClosure: public HeapRegionClosure {
G1CollectedHeap
*
_g1h
;
ModRefBarrierSet
*
_mrbs
;
CompactPoint
_cp
;
size_t
_pre_used
;
FreeRegionList
_free_list
;
HumongousRegionSet
_humongous_proxy_set
;
void
free_humongous_region
(
HeapRegion
*
hr
)
{
HeapWord
*
end
=
hr
->
end
();
size_t
dummy_pre_used
;
FreeRegionList
dummy_free_list
(
"Dummy Free List for G1MarkSweep"
);
assert
(
hr
->
startsHumongous
(),
"Only the start of a humongous region should be freed."
);
_g1h
->
free_humongous_region
(
hr
,
&
_pre_used
,
&
_free_list
,
_g1h
->
free_humongous_region
(
hr
,
&
dummy_pre_used
,
&
dummy
_free_list
,
&
_humongous_proxy_set
,
false
/* par */
);
// Do we also need to do this for the continues humongous regions
// we just collapsed?
hr
->
prepare_for_compaction
(
&
_cp
);
// Also clear the part of the card table that will be unused after
// compaction.
_mrbs
->
clear
(
MemRegion
(
hr
->
compaction_top
(),
end
));
dummy_free_list
.
remove_all
();
}
public:
...
...
@@ -208,8 +208,6 @@ public:
:
_g1h
(
G1CollectedHeap
::
heap
()),
_mrbs
(
G1CollectedHeap
::
heap
()
->
mr_bs
()),
_cp
(
NULL
,
cs
,
cs
->
initialize_threshold
()),
_pre_used
(
0
),
_free_list
(
"Local Free List for G1MarkSweep"
),
_humongous_proxy_set
(
"G1MarkSweep Humongous Proxy Set"
)
{
}
void
update_sets
()
{
...
...
@@ -219,7 +217,6 @@ public:
NULL
,
/* free_list */
&
_humongous_proxy_set
,
false
/* par */
);
_free_list
.
remove_all
();
}
bool
doHeapRegion
(
HeapRegion
*
hr
)
{
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
76f2e04b
...
...
@@ -86,28 +86,6 @@ public:
bool
idempotent
()
{
return
true
;
}
};
class
IntoCSRegionClosure
:
public
HeapRegionClosure
{
IntoCSOopClosure
_blk
;
G1CollectedHeap
*
_g1
;
public:
IntoCSRegionClosure
(
G1CollectedHeap
*
g1
,
OopsInHeapRegionClosure
*
blk
)
:
_g1
(
g1
),
_blk
(
g1
,
blk
)
{}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
if
(
!
r
->
in_collection_set
())
{
_blk
.
set_region
(
r
);
if
(
r
->
isHumongous
())
{
if
(
r
->
startsHumongous
())
{
oop
obj
=
oop
(
r
->
bottom
());
obj
->
oop_iterate
(
&
_blk
);
}
}
else
{
r
->
oop_before_save_marks_iterate
(
&
_blk
);
}
}
return
false
;
}
};
class
VerifyRSCleanCardOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1
;
public:
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
76f2e04b
...
...
@@ -53,8 +53,8 @@ class HeapRegion;
class
HeapRegionSetBase
;
#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
#define HR_FORMAT_PARAMS(_
_hr) (__hr)->hrs_index(), (__hr
)->bottom(), \
(_
_hr)->top(), (__hr
)->end()
#define HR_FORMAT_PARAMS(_
hr_) (_hr_)->hrs_index(), (_hr_
)->bottom(), \
(_
hr_)->top(), (_hr_
)->end()
// A dirty card to oop closure for heap regions. It
// knows how to get the G1 heap and how to use the bitmap
...
...
@@ -518,13 +518,13 @@ class HeapRegion: public G1OffsetTableContigSpace {
containing_set
,
_containing_set
));
_containing_set
=
containing_set
;
}
}
HeapRegionSetBase
*
containing_set
()
{
return
_containing_set
;
}
#else // ASSERT
void
set_containing_set
(
HeapRegionSetBase
*
containing_set
)
{
}
// containing_set() is only used in asserts so there's no
t
reason
// containing_set() is only used in asserts so there's no reason
// to provide a dummy version of it.
#endif // ASSERT
...
...
@@ -535,14 +535,15 @@ class HeapRegion: public G1OffsetTableContigSpace {
bool
pending_removal
()
{
return
_pending_removal
;
}
void
set_pending_removal
(
bool
pending_removal
)
{
// We can only set pending_removal to true, if it's false and the
// region belongs to a set.
assert
(
!
pending_removal
||
(
!
_pending_removal
&&
containing_set
()
!=
NULL
),
"pre-condition"
);
// We can only set pending_removal to false, if it's true and the
// region does not belong to a set.
assert
(
pending_removal
||
(
_pending_removal
&&
containing_set
()
==
NULL
),
"pre-condition"
);
if
(
pending_removal
)
{
assert
(
!
_pending_removal
&&
containing_set
()
!=
NULL
,
"can only set pending removal to true if it's false and "
"the region belongs to a region set"
);
}
else
{
assert
(
_pending_removal
&&
containing_set
()
==
NULL
,
"can only set pending removal to false if it's true and "
"the region does not belong to a region set"
);
}
_pending_removal
=
pending_removal
;
}
...
...
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
浏览文件 @
76f2e04b
...
...
@@ -165,7 +165,7 @@ int HeapRegionSeq::find_contiguous_from(int from, size_t num) {
assert
(
num_so_far
<=
num
,
"post-condition"
);
if
(
num_so_far
==
num
)
{
// we f
i
nd enough space for the humongous object
// we f
ou
nd enough space for the humongous object
assert
(
from
<=
first
&&
first
<
_regions
.
length
(),
"post-condition"
);
assert
(
first
<
curr
&&
(
curr
-
first
)
==
(
int
)
num
,
"post-condition"
);
for
(
int
i
=
first
;
i
<
first
+
(
int
)
num
;
++
i
)
{
...
...
src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
浏览文件 @
76f2e04b
...
...
@@ -76,7 +76,8 @@ class HeapRegionSeq: public CHeapObj {
// that are available for allocation.
size_t
free_suffix
();
// Finds a contiguous set of empty regions of length num.
// Find a contiguous set of empty regions of length num and return
// the index of the first region or -1 if the search was unsuccessful.
int
find_contiguous
(
size_t
num
);
// Apply the "doHeapRegion" method of "blk" to all regions in "this",
...
...
src/share/vm/gc_implementation/g1/heapRegionSet.cpp
浏览文件 @
76f2e04b
...
...
@@ -42,7 +42,7 @@ size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
return
region_num
;
}
void
HeapRegionSetBase
::
fill_in_ext_msg
(
hr
l
_ext_msg
*
msg
,
const
char
*
message
)
{
void
HeapRegionSetBase
::
fill_in_ext_msg
(
hr
s
_ext_msg
*
msg
,
const
char
*
message
)
{
msg
->
append
(
"[%s] %s "
"ln: "
SIZE_FORMAT
" rn: "
SIZE_FORMAT
" "
"cy: "
SIZE_FORMAT
" ud: "
SIZE_FORMAT
,
...
...
@@ -109,30 +109,30 @@ void HeapRegionSetBase::verify() {
// for the verification calls. If we do verification without the
// appropriate locks and the set changes underneath our feet
// verification might fail and send us on a wild goose chase.
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
guarantee
((
is_empty
()
&&
length
()
==
0
&&
region_num
()
==
0
&&
total_used_bytes
()
==
0
&&
total_capacity_bytes
()
==
0
)
||
(
!
is_empty
()
&&
length
()
>=
0
&&
region_num
()
>=
0
&&
total_used_bytes
()
>=
0
&&
total_capacity_bytes
()
>=
0
),
hr
l
_ext_msg
(
this
,
"invariant"
));
hr
s
_ext_msg
(
this
,
"invariant"
));
guarantee
((
!
regions_humongous
()
&&
region_num
()
==
length
())
||
(
regions_humongous
()
&&
region_num
()
>=
length
()),
hr
l
_ext_msg
(
this
,
"invariant"
));
hr
s
_ext_msg
(
this
,
"invariant"
));
guarantee
(
!
regions_empty
()
||
total_used_bytes
()
==
0
,
hr
l
_ext_msg
(
this
,
"invariant"
));
hr
s
_ext_msg
(
this
,
"invariant"
));
guarantee
(
total_used_bytes
()
<=
total_capacity_bytes
(),
hr
l
_ext_msg
(
this
,
"invariant"
));
hr
s
_ext_msg
(
this
,
"invariant"
));
}
void
HeapRegionSetBase
::
verify_start
()
{
// See comment in verify() about MT safety and verification.
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
assert
(
!
_verify_in_progress
,
hr
l
_ext_msg
(
this
,
"verification should not be in progress"
));
hr
s
_ext_msg
(
this
,
"verification should not be in progress"
));
// Do the basic verification first before we do the checks over the regions.
HeapRegionSetBase
::
verify
();
...
...
@@ -146,11 +146,11 @@ void HeapRegionSetBase::verify_start() {
void
HeapRegionSetBase
::
verify_next_region
(
HeapRegion
*
hr
)
{
// See comment in verify() about MT safety and verification.
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
assert
(
_verify_in_progress
,
hr
l
_ext_msg
(
this
,
"verification should be in progress"
));
hr
s
_ext_msg
(
this
,
"verification should be in progress"
));
guarantee
(
verify_region
(
hr
,
this
),
hr
l
_ext_msg
(
this
,
"region verification"
));
guarantee
(
verify_region
(
hr
,
this
),
hr
s
_ext_msg
(
this
,
"region verification"
));
_calc_length
+=
1
;
if
(
!
hr
->
isHumongous
())
{
...
...
@@ -164,28 +164,28 @@ void HeapRegionSetBase::verify_next_region(HeapRegion* hr) {
void
HeapRegionSetBase
::
verify_end
()
{
// See comment in verify() about MT safety and verification.
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
assert
(
_verify_in_progress
,
hr
l
_ext_msg
(
this
,
"verification should be in progress"
));
hr
s
_ext_msg
(
this
,
"verification should be in progress"
));
guarantee
(
length
()
==
_calc_length
,
hr
l
_err_msg
(
"[%s] length: "
SIZE_FORMAT
" should be == "
hr
s
_err_msg
(
"[%s] length: "
SIZE_FORMAT
" should be == "
"calc length: "
SIZE_FORMAT
,
name
(),
length
(),
_calc_length
));
guarantee
(
region_num
()
==
_calc_region_num
,
hr
l
_err_msg
(
"[%s] region num: "
SIZE_FORMAT
" should be == "
hr
s
_err_msg
(
"[%s] region num: "
SIZE_FORMAT
" should be == "
"calc region num: "
SIZE_FORMAT
,
name
(),
region_num
(),
_calc_region_num
));
guarantee
(
total_capacity_bytes
()
==
_calc_total_capacity_bytes
,
hr
l
_err_msg
(
"[%s] capacity bytes: "
SIZE_FORMAT
" should be == "
hr
s
_err_msg
(
"[%s] capacity bytes: "
SIZE_FORMAT
" should be == "
"calc capacity bytes: "
SIZE_FORMAT
,
name
(),
total_capacity_bytes
(),
_calc_total_capacity_bytes
));
guarantee
(
total_used_bytes
()
==
_calc_total_used_bytes
,
hr
l
_err_msg
(
"[%s] used bytes: "
SIZE_FORMAT
" should be == "
hr
s
_err_msg
(
"[%s] used bytes: "
SIZE_FORMAT
" should be == "
"calc used bytes: "
SIZE_FORMAT
,
name
(),
total_used_bytes
(),
_calc_total_used_bytes
));
...
...
@@ -221,9 +221,9 @@ HeapRegionSetBase::HeapRegionSetBase(const char* name)
//////////////////// HeapRegionSet ////////////////////
void
HeapRegionSet
::
update_from_proxy
(
HeapRegionSet
*
proxy_set
)
{
hr
l
_assert_mt_safety_ok
(
this
);
hr
l
_assert_mt_safety_ok
(
proxy_set
);
hr
l
_assert_sets_match
(
this
,
proxy_set
);
hr
s
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
proxy_set
);
hr
s
_assert_sets_match
(
this
,
proxy_set
);
verify_optional
();
proxy_set
->
verify_optional
();
...
...
@@ -231,19 +231,19 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
if
(
proxy_set
->
is_empty
())
return
;
assert
(
proxy_set
->
length
()
<=
_length
,
hr
l
_err_msg
(
"[%s] proxy set length: "
SIZE_FORMAT
" "
hr
s
_err_msg
(
"[%s] proxy set length: "
SIZE_FORMAT
" "
"should be <= length: "
SIZE_FORMAT
,
name
(),
proxy_set
->
length
(),
_length
));
_length
-=
proxy_set
->
length
();
assert
(
proxy_set
->
region_num
()
<=
_region_num
,
hr
l
_err_msg
(
"[%s] proxy set region num: "
SIZE_FORMAT
" "
hr
s
_err_msg
(
"[%s] proxy set region num: "
SIZE_FORMAT
" "
"should be <= region num: "
SIZE_FORMAT
,
name
(),
proxy_set
->
region_num
(),
_region_num
));
_region_num
-=
proxy_set
->
region_num
();
assert
(
proxy_set
->
total_used_bytes
()
<=
_total_used_bytes
,
hr
l
_err_msg
(
"[%s] proxy set used bytes: "
SIZE_FORMAT
" "
hr
s
_err_msg
(
"[%s] proxy set used bytes: "
SIZE_FORMAT
" "
"should be <= used bytes: "
SIZE_FORMAT
,
name
(),
proxy_set
->
total_used_bytes
(),
_total_used_bytes
));
...
...
@@ -257,13 +257,13 @@ void HeapRegionSet::update_from_proxy(HeapRegionSet* proxy_set) {
//////////////////// HeapRegionLinkedList ////////////////////
void
HeapRegionLinkedList
::
fill_in_ext_msg_extra
(
hr
l
_ext_msg
*
msg
)
{
void
HeapRegionLinkedList
::
fill_in_ext_msg_extra
(
hr
s
_ext_msg
*
msg
)
{
msg
->
append
(
" hd: "
PTR_FORMAT
" tl: "
PTR_FORMAT
,
head
(),
tail
());
}
void
HeapRegionLinkedList
::
add_as_tail
(
HeapRegionLinkedList
*
from_list
)
{
hr
l
_assert_mt_safety_ok
(
this
);
hr
l
_assert_mt_safety_ok
(
from_list
);
hr
s
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
from_list
);
verify_optional
();
from_list
->
verify_optional
();
...
...
@@ -283,10 +283,10 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
#endif // ASSERT
if
(
_tail
!=
NULL
)
{
assert
(
length
()
>
0
&&
_head
!=
NULL
,
hr
l
_ext_msg
(
this
,
"invariant"
));
assert
(
length
()
>
0
&&
_head
!=
NULL
,
hr
s
_ext_msg
(
this
,
"invariant"
));
_tail
->
set_next
(
from_list
->
_head
);
}
else
{
assert
(
length
()
==
0
&&
_head
==
NULL
,
hr
l
_ext_msg
(
this
,
"invariant"
));
assert
(
length
()
==
0
&&
_head
==
NULL
,
hr
s
_ext_msg
(
this
,
"invariant"
));
_head
=
from_list
->
_head
;
}
_tail
=
from_list
->
_tail
;
...
...
@@ -301,12 +301,12 @@ void HeapRegionLinkedList::add_as_tail(HeapRegionLinkedList* from_list) {
}
void
HeapRegionLinkedList
::
remove_all
()
{
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
verify_optional
();
HeapRegion
*
curr
=
_head
;
while
(
curr
!=
NULL
)
{
hr
l
_assert_region_ok
(
this
,
curr
,
this
);
hr
s
_assert_region_ok
(
this
,
curr
,
this
);
HeapRegion
*
next
=
curr
->
next
();
curr
->
set_next
(
NULL
);
...
...
@@ -319,9 +319,9 @@ void HeapRegionLinkedList::remove_all() {
}
void
HeapRegionLinkedList
::
remove_all_pending
(
size_t
target_count
)
{
hr
l
_assert_mt_safety_ok
(
this
);
assert
(
target_count
>
1
,
hr
l
_ext_msg
(
this
,
"pre-condition"
));
assert
(
!
is_empty
(),
hr
l
_ext_msg
(
this
,
"pre-condition"
));
hr
s
_assert_mt_safety_ok
(
this
);
assert
(
target_count
>
1
,
hr
s
_ext_msg
(
this
,
"pre-condition"
));
assert
(
!
is_empty
(),
hr
s
_ext_msg
(
this
,
"pre-condition"
));
verify_optional
();
DEBUG_ONLY
(
size_t
old_length
=
length
();)
...
...
@@ -330,27 +330,27 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
HeapRegion
*
prev
=
NULL
;
size_t
count
=
0
;
while
(
curr
!=
NULL
)
{
hr
l
_assert_region_ok
(
this
,
curr
,
this
);
hr
s
_assert_region_ok
(
this
,
curr
,
this
);
HeapRegion
*
next
=
curr
->
next
();
if
(
curr
->
pending_removal
())
{
assert
(
count
<
target_count
,
hr
l
_err_msg
(
"[%s] should not come across more regions "
hr
s
_err_msg
(
"[%s] should not come across more regions "
"pending for removal than target_count: "
SIZE_FORMAT
,
name
(),
target_count
));
if
(
prev
==
NULL
)
{
assert
(
_head
==
curr
,
hr
l
_ext_msg
(
this
,
"invariant"
));
assert
(
_head
==
curr
,
hr
s
_ext_msg
(
this
,
"invariant"
));
_head
=
next
;
}
else
{
assert
(
_head
!=
curr
,
hr
l
_ext_msg
(
this
,
"invariant"
));
assert
(
_head
!=
curr
,
hr
s
_ext_msg
(
this
,
"invariant"
));
prev
->
set_next
(
next
);
}
if
(
next
==
NULL
)
{
assert
(
_tail
==
curr
,
hr
l
_ext_msg
(
this
,
"invariant"
));
assert
(
_tail
==
curr
,
hr
s
_ext_msg
(
this
,
"invariant"
));
_tail
=
prev
;
}
else
{
assert
(
_tail
!=
curr
,
hr
l
_ext_msg
(
this
,
"invariant"
));
assert
(
_tail
!=
curr
,
hr
s
_ext_msg
(
this
,
"invariant"
));
}
curr
->
set_next
(
NULL
);
...
...
@@ -371,10 +371,10 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
}
assert
(
count
==
target_count
,
hr
l
_err_msg
(
"[%s] count: "
SIZE_FORMAT
" should be == "
hr
s
_err_msg
(
"[%s] count: "
SIZE_FORMAT
" should be == "
"target_count: "
SIZE_FORMAT
,
name
(),
count
,
target_count
));
assert
(
length
()
+
target_count
==
old_length
,
hr
l
_err_msg
(
"[%s] new length should be consistent "
hr
s
_err_msg
(
"[%s] new length should be consistent "
"new length: "
SIZE_FORMAT
" old length: "
SIZE_FORMAT
" "
"target_count: "
SIZE_FORMAT
,
name
(),
length
(),
old_length
,
target_count
));
...
...
@@ -385,7 +385,7 @@ void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
void
HeapRegionLinkedList
::
verify
()
{
// See comment in HeapRegionSetBase::verify() about MT safety and
// verification.
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
// This will also do the basic verification too.
verify_start
();
...
...
@@ -399,7 +399,7 @@ void HeapRegionLinkedList::verify() {
count
+=
1
;
guarantee
(
count
<
_unrealistically_long_length
,
hr
l
_err_msg
(
"[%s] the calculated length: "
SIZE_FORMAT
" "
hr
s
_err_msg
(
"[%s] the calculated length: "
SIZE_FORMAT
" "
"seems very long, is there maybe a cycle? "
"curr: "
PTR_FORMAT
" prev0: "
PTR_FORMAT
" "
"prev1: "
PTR_FORMAT
" length: "
SIZE_FORMAT
,
...
...
@@ -410,7 +410,7 @@ void HeapRegionLinkedList::verify() {
curr
=
curr
->
next
();
}
guarantee
(
_tail
==
prev0
,
hr
l
_ext_msg
(
this
,
"post-condition"
));
guarantee
(
_tail
==
prev0
,
hr
s
_ext_msg
(
this
,
"post-condition"
));
verify_end
();
}
...
...
src/share/vm/gc_implementation/g1/heapRegionSet.hpp
浏览文件 @
76f2e04b
...
...
@@ -28,8 +28,8 @@
#include "gc_implementation/g1/heapRegion.hpp"
// Large buffer for some cases where the output might be larger than normal.
#define HR
L
_ERR_MSG_BUFSZ 512
typedef
FormatBuffer
<
HR
L_ERR_MSG_BUFSZ
>
hrl
_err_msg
;
#define HR
S
_ERR_MSG_BUFSZ 512
typedef
FormatBuffer
<
HR
S_ERR_MSG_BUFSZ
>
hrs
_err_msg
;
// Set verification will be forced either if someone defines
// HEAP_REGION_SET_FORCE_VERIFY to be 1, or in builds in which
...
...
@@ -45,10 +45,10 @@ typedef FormatBuffer<HRL_ERR_MSG_BUFSZ> hrl_err_msg;
// (e.g., length, region num, used bytes sum) plus any shared
// functionality (e.g., verification).
class
hr
l
_ext_msg
;
class
hr
s
_ext_msg
;
class
HeapRegionSetBase
VALUE_OBJ_CLASS_SPEC
{
friend
class
hr
l
_ext_msg
;
friend
class
hr
s
_ext_msg
;
protected:
static
size_t
calculate_region_num
(
HeapRegion
*
hr
);
...
...
@@ -104,10 +104,10 @@ protected:
virtual
bool
check_mt_safety
()
{
return
true
;
}
// fill_in_ext_msg() writes the the values of the set's attributes
// in the custom err_msg (hr
l
_ext_msg). fill_in_ext_msg_extra()
// in the custom err_msg (hr
s
_ext_msg). fill_in_ext_msg_extra()
// allows subclasses to append further information.
virtual
void
fill_in_ext_msg_extra
(
hr
l
_ext_msg
*
msg
)
{
}
void
fill_in_ext_msg
(
hr
l
_ext_msg
*
msg
,
const
char
*
message
);
virtual
void
fill_in_ext_msg_extra
(
hr
s
_ext_msg
*
msg
)
{
}
void
fill_in_ext_msg
(
hr
s
_ext_msg
*
msg
,
const
char
*
message
);
// It updates the fields of the set to reflect hr being added to
// the set.
...
...
@@ -170,9 +170,9 @@ public:
// the fields of the associated set. This can be very helpful in
// diagnosing failures.
class
hr
l_ext_msg
:
public
hrl
_err_msg
{
class
hr
s_ext_msg
:
public
hrs
_err_msg
{
public:
hr
l_ext_msg
(
HeapRegionSetBase
*
set
,
const
char
*
message
)
:
hrl
_err_msg
(
""
)
{
hr
s_ext_msg
(
HeapRegionSetBase
*
set
,
const
char
*
message
)
:
hrs
_err_msg
(
""
)
{
set
->
fill_in_ext_msg
(
this
,
message
);
}
};
...
...
@@ -180,25 +180,25 @@ public:
// These two macros are provided for convenience, to keep the uses of
// these two asserts a bit more concise.
#define hr
l
_assert_mt_safety_ok(_set_) \
#define hr
s
_assert_mt_safety_ok(_set_) \
do { \
assert((_set_)->check_mt_safety(), hr
l
_ext_msg((_set_), "MT safety")); \
assert((_set_)->check_mt_safety(), hr
s
_ext_msg((_set_), "MT safety")); \
} while (0)
#define hr
l
_assert_region_ok(_set_, _hr_, _expected_) \
#define hr
s
_assert_region_ok(_set_, _hr_, _expected_) \
do { \
assert((_set_)->verify_region((_hr_), (_expected_)), \
hr
l
_ext_msg((_set_), "region verification")); \
hr
s
_ext_msg((_set_), "region verification")); \
} while (0)
//////////////////// HeapRegionSet ////////////////////
#define hr
l
_assert_sets_match(_set1_, _set2_) \
#define hr
s
_assert_sets_match(_set1_, _set2_) \
do { \
assert(((_set1_)->regions_humongous() == \
(_set2_)->regions_humongous()) && \
((_set1_)->regions_empty() == (_set2_)->regions_empty()), \
hr
l
_err_msg("the contents of set %s and set %s should match", \
hr
s
_err_msg("the contents of set %s and set %s should match", \
(_set1_)->name(), (_set2_)->name())); \
} while (0)
...
...
@@ -267,7 +267,7 @@ private:
HeapRegion
*
tail
()
{
return
_tail
;
}
protected:
virtual
void
fill_in_ext_msg_extra
(
hr
l
_ext_msg
*
msg
);
virtual
void
fill_in_ext_msg_extra
(
hr
s
_ext_msg
*
msg
);
// See the comment for HeapRegionSetBase::clear()
virtual
void
clear
();
...
...
@@ -309,10 +309,10 @@ public:
virtual
void
print_on
(
outputStream
*
out
,
bool
print_contents
=
false
);
};
//////////////////// HeapRegionLinkedList ////////////////////
//////////////////// HeapRegionLinkedList
Iterator
////////////////////
// Iterator class that provides a convenient way to iterat
or
over the
// regions
in
a HeapRegionLinkedList instance.
// Iterator class that provides a convenient way to iterat
e
over the
// regions
of
a HeapRegionLinkedList instance.
class
HeapRegionLinkedListIterator
:
public
StackObj
{
private:
...
...
src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp
浏览文件 @
76f2e04b
...
...
@@ -42,8 +42,8 @@ inline void HeapRegionSetBase::update_for_addition(HeapRegion* hr) {
}
inline
void
HeapRegionSetBase
::
add_internal
(
HeapRegion
*
hr
)
{
hr
l
_assert_region_ok
(
this
,
hr
,
NULL
);
assert
(
hr
->
next
()
==
NULL
,
hr
l
_ext_msg
(
this
,
"should not already be linked"
));
hr
s
_assert_region_ok
(
this
,
hr
,
NULL
);
assert
(
hr
->
next
()
==
NULL
,
hr
s
_ext_msg
(
this
,
"should not already be linked"
));
update_for_addition
(
hr
);
hr
->
set_containing_set
(
this
);
...
...
@@ -51,7 +51,7 @@ inline void HeapRegionSetBase::add_internal(HeapRegion* hr) {
inline
void
HeapRegionSetBase
::
update_for_removal
(
HeapRegion
*
hr
)
{
// Assumes the caller has already verified the region.
assert
(
_length
>
0
,
hr
l
_ext_msg
(
this
,
"pre-condition"
));
assert
(
_length
>
0
,
hr
s
_ext_msg
(
this
,
"pre-condition"
));
_length
-=
1
;
size_t
region_num_diff
;
...
...
@@ -61,22 +61,22 @@ inline void HeapRegionSetBase::update_for_removal(HeapRegion* hr) {
region_num_diff
=
calculate_region_num
(
hr
);
}
assert
(
region_num_diff
<=
_region_num
,
hr
l
_err_msg
(
"[%s] region's region num: "
SIZE_FORMAT
" "
hr
s
_err_msg
(
"[%s] region's region num: "
SIZE_FORMAT
" "
"should be <= region num: "
SIZE_FORMAT
,
name
(),
region_num_diff
,
_region_num
));
_region_num
-=
region_num_diff
;
size_t
used_bytes
=
hr
->
used
();
assert
(
used_bytes
<=
_total_used_bytes
,
hr
l
_err_msg
(
"[%s] region's used bytes: "
SIZE_FORMAT
" "
hr
s
_err_msg
(
"[%s] region's used bytes: "
SIZE_FORMAT
" "
"should be <= used bytes: "
SIZE_FORMAT
,
name
(),
used_bytes
,
_total_used_bytes
));
_total_used_bytes
-=
used_bytes
;
}
inline
void
HeapRegionSetBase
::
remove_internal
(
HeapRegion
*
hr
)
{
hr
l
_assert_region_ok
(
this
,
hr
,
this
);
assert
(
hr
->
next
()
==
NULL
,
hr
l
_ext_msg
(
this
,
"should already be unlinked"
));
hr
s
_assert_region_ok
(
this
,
hr
,
this
);
assert
(
hr
->
next
()
==
NULL
,
hr
s
_ext_msg
(
this
,
"should already be unlinked"
));
hr
->
set_containing_set
(
NULL
);
update_for_removal
(
hr
);
...
...
@@ -85,13 +85,13 @@ inline void HeapRegionSetBase::remove_internal(HeapRegion* hr) {
//////////////////// HeapRegionSet ////////////////////
inline
void
HeapRegionSet
::
add
(
HeapRegion
*
hr
)
{
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
// add_internal() will verify the region.
add_internal
(
hr
);
}
inline
void
HeapRegionSet
::
remove
(
HeapRegion
*
hr
)
{
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
// remove_internal() will verify the region.
remove_internal
(
hr
);
}
...
...
@@ -101,8 +101,8 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
// No need to fo the MT safety check here given that this method
// does not update the contents of the set but instead accumulates
// the changes in proxy_set which is assumed to be thread-local.
hr
l
_assert_sets_match
(
this
,
proxy_set
);
hr
l
_assert_region_ok
(
this
,
hr
,
this
);
hr
s
_assert_sets_match
(
this
,
proxy_set
);
hr
s
_assert_region_ok
(
this
,
hr
,
this
);
hr
->
set_containing_set
(
NULL
);
proxy_set
->
update_for_addition
(
hr
);
...
...
@@ -111,10 +111,10 @@ inline void HeapRegionSet::remove_with_proxy(HeapRegion* hr,
//////////////////// HeapRegionLinkedList ////////////////////
inline
void
HeapRegionLinkedList
::
add_as_tail
(
HeapRegion
*
hr
)
{
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
assert
((
length
()
==
0
&&
_head
==
NULL
&&
_tail
==
NULL
)
||
(
length
()
>
0
&&
_head
!=
NULL
&&
_tail
!=
NULL
),
hr
l
_ext_msg
(
this
,
"invariant"
));
hr
s
_ext_msg
(
this
,
"invariant"
));
// add_internal() will verify the region.
add_internal
(
hr
);
...
...
@@ -128,10 +128,10 @@ inline void HeapRegionLinkedList::add_as_tail(HeapRegion* hr) {
}
inline
HeapRegion
*
HeapRegionLinkedList
::
remove_head
()
{
hr
l
_assert_mt_safety_ok
(
this
);
assert
(
!
is_empty
(),
hr
l
_ext_msg
(
this
,
"the list should not be empty"
));
hr
s
_assert_mt_safety_ok
(
this
);
assert
(
!
is_empty
(),
hr
s
_ext_msg
(
this
,
"the list should not be empty"
));
assert
(
length
()
>
0
&&
_head
!=
NULL
&&
_tail
!=
NULL
,
hr
l
_ext_msg
(
this
,
"invariant"
));
hr
s
_ext_msg
(
this
,
"invariant"
));
// We need to unlink it first.
HeapRegion
*
hr
=
_head
;
...
...
@@ -147,7 +147,7 @@ inline HeapRegion* HeapRegionLinkedList::remove_head() {
}
inline
HeapRegion
*
HeapRegionLinkedList
::
remove_head_or_null
()
{
hr
l
_assert_mt_safety_ok
(
this
);
hr
s
_assert_mt_safety_ok
(
this
);
if
(
!
is_empty
())
{
return
remove_head
();
...
...
src/share/vm/gc_implementation/g1/heapRegionSets.cpp
浏览文件 @
76f2e04b
...
...
@@ -52,7 +52,7 @@ bool MasterFreeRegionList::check_mt_safety() {
FreeList_lock
->
owned_by_self
()))
||
(
!
SafepointSynchronize
::
is_at_safepoint
()
&&
Heap_lock
->
owned_by_self
()),
hr
l
_ext_msg
(
this
,
"master free list MT safety protocol"
));
hr
s
_ext_msg
(
this
,
"master free list MT safety protocol"
));
return
FreeRegionList
::
check_mt_safety
();
}
...
...
@@ -65,7 +65,7 @@ bool SecondaryFreeRegionList::check_mt_safety() {
// while holding the SecondaryFreeList_lock.
guarantee
(
SecondaryFreeList_lock
->
owned_by_self
(),
hr
l
_ext_msg
(
this
,
"secondary free list MT safety protocol"
));
hr
s
_ext_msg
(
this
,
"secondary free list MT safety protocol"
));
return
FreeRegionList
::
check_mt_safety
();
}
...
...
@@ -81,7 +81,7 @@ const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
return
HeapRegionSet
::
verify_region_extra
(
hr
);
}
//////////////////// HumongousRegionSet ////////////////////
////////////////////
Master
HumongousRegionSet ////////////////////
bool
MasterHumongousRegionSet
::
check_mt_safety
()
{
// Master Humongous Set MT safety protocol:
...
...
@@ -97,6 +97,6 @@ bool MasterHumongousRegionSet::check_mt_safety() {
OldSets_lock
->
owned_by_self
()))
||
(
!
SafepointSynchronize
::
is_at_safepoint
()
&&
Heap_lock
->
owned_by_self
()),
hr
l
_ext_msg
(
this
,
"master humongous set MT safety protocol"
));
hr
s
_ext_msg
(
this
,
"master humongous set MT safety protocol"
));
return
HumongousRegionSet
::
check_mt_safety
();
}
src/share/vm/utilities/debug.hpp
浏览文件 @
76f2e04b
...
...
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_UTILITIES_DEBUG_HPP
#define SHARE_VM_UTILITIES_DEBUG_HPP
#include "prims/jvm.h"
#include "utilities/globalDefinitions.hpp"
#include <stdarg.h>
...
...
@@ -48,7 +49,7 @@ template <size_t bufsz>
FormatBuffer
<
bufsz
>::
FormatBuffer
(
const
char
*
format
,
...)
{
va_list
argp
;
va_start
(
argp
,
format
);
vsnprintf
(
_buf
,
bufsz
,
format
,
argp
);
jio_
vsnprintf
(
_buf
,
bufsz
,
format
,
argp
);
va_end
(
argp
);
}
...
...
@@ -61,7 +62,7 @@ void FormatBuffer<bufsz>::append(const char* format, ...) {
va_list
argp
;
va_start
(
argp
,
format
);
vsnprintf
(
buf_end
,
bufsz
-
len
,
format
,
argp
);
jio_
vsnprintf
(
buf_end
,
bufsz
-
len
,
format
,
argp
);
va_end
(
argp
);
}
...
...
src/share/vm/utilities/globalDefinitions.hpp
浏览文件 @
76f2e04b
...
...
@@ -1185,7 +1185,7 @@ inline int build_int_from_shorts( jushort low, jushort high ) {
// '%d' formats to indicate a 64-bit quantity; commonly "l" (in LP64) or "ll"
// (in ILP32).
#define BOOL_TO_STR(_
_b) (__b) ? "true" : "false"
#define BOOL_TO_STR(_
b_) ((_b_) ? "true" : "false")
// Format 32-bit quantities.
#define INT32_FORMAT "%d"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录