Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
0c811a79
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0c811a79
编写于
12月 12, 2008
作者:
J
jmasa
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
7bead513
1a26624c
变更
27
隐藏空白更改
内联
并排
Showing
27 changed file
with
1011 addition
and
399 deletion
+1011
-399
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+4
-5
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+1
-1
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
+7
-9
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+1
-1
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+10
-13
src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
..._implementation/parallelScavenge/psMarkSweepDecorator.cpp
+3
-16
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+563
-168
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
.../gc_implementation/parallelScavenge/psParallelCompact.hpp
+165
-22
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
...gc_implementation/parallelScavenge/psPromotionManager.cpp
+6
-17
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+6
-6
src/share/vm/gc_interface/collectedHeap.cpp
src/share/vm/gc_interface/collectedHeap.cpp
+102
-4
src/share/vm/gc_interface/collectedHeap.hpp
src/share/vm/gc_interface/collectedHeap.hpp
+39
-0
src/share/vm/gc_interface/collectedHeap.inline.hpp
src/share/vm/gc_interface/collectedHeap.inline.hpp
+3
-4
src/share/vm/includeDB_gc
src/share/vm/includeDB_gc
+4
-3
src/share/vm/memory/permGen.cpp
src/share/vm/memory/permGen.cpp
+32
-32
src/share/vm/memory/sharedHeap.cpp
src/share/vm/memory/sharedHeap.cpp
+0
-40
src/share/vm/memory/sharedHeap.hpp
src/share/vm/memory/sharedHeap.hpp
+0
-8
src/share/vm/memory/space.cpp
src/share/vm/memory/space.cpp
+3
-13
src/share/vm/memory/tenuredGeneration.cpp
src/share/vm/memory/tenuredGeneration.cpp
+1
-1
src/share/vm/memory/threadLocalAllocBuffer.cpp
src/share/vm/memory/threadLocalAllocBuffer.cpp
+1
-2
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+18
-15
src/share/vm/memory/universe.hpp
src/share/vm/memory/universe.hpp
+3
-0
src/share/vm/oops/arrayOop.hpp
src/share/vm/oops/arrayOop.hpp
+12
-11
src/share/vm/oops/typeArrayKlass.cpp
src/share/vm/oops/typeArrayKlass.cpp
+4
-3
src/share/vm/oops/typeArrayKlass.hpp
src/share/vm/oops/typeArrayKlass.hpp
+5
-1
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+10
-0
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+8
-4
未找到文件。
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
0c811a79
...
...
@@ -2954,7 +2954,7 @@ public:
// The object has been either evacuated or is dead. Fill it with a
// dummy object.
MemRegion
mr
((
HeapWord
*
)
obj
,
obj
->
size
());
SharedHeap
::
fill_region
_with_object
(
mr
);
CollectedHeap
::
fill
_with_object
(
mr
);
_cm
->
clearRangeBothMaps
(
mr
);
}
}
...
...
@@ -3225,7 +3225,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
// Otherwise, try to claim it.
block
=
r
->
par_allocate
(
free_words
);
}
while
(
block
==
NULL
);
SharedHeap
::
fill_region_with_object
(
MemRegion
(
block
,
free_words
)
);
fill_with_object
(
block
,
free_words
);
}
#define use_local_bitmaps 1
...
...
@@ -3619,9 +3619,8 @@ public:
guarantee
(
alloc_buffer
(
purpose
)
->
contains
(
obj
+
word_sz
-
1
),
"should contain whole object"
);
alloc_buffer
(
purpose
)
->
undo_allocation
(
obj
,
word_sz
);
}
else
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
obj
,
word_sz
));
}
else
{
CollectedHeap
::
fill_with_object
(
obj
,
word_sz
);
add_to_undo_waste
(
word_sz
);
}
}
...
...
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
浏览文件 @
0c811a79
...
...
@@ -102,7 +102,7 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
HeapWord
*
tmp
=
hr
->
allocate
(
sz
);
assert
(
tmp
!=
NULL
,
"Humongous allocation failure"
);
MemRegion
mr
=
MemRegion
(
tmp
,
sz
);
SharedHeap
::
fill_region
_with_object
(
mr
);
CollectedHeap
::
fill
_with_object
(
mr
);
hr
->
declare_filled_region_to_BOT
(
mr
);
if
(
i
==
first
)
{
first_hr
->
set_startsHumongous
();
...
...
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
浏览文件 @
0c811a79
...
...
@@ -51,14 +51,14 @@ void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
if
(
_retained
)
{
// If the buffer had been retained shorten the previous filler object.
assert
(
_retained_filler
.
end
()
<=
_top
,
"INVARIANT"
);
SharedHeap
::
fill_region
_with_object
(
_retained_filler
);
CollectedHeap
::
fill
_with_object
(
_retained_filler
);
// Wasted space book-keeping, otherwise (normally) done in invalidate()
_wasted
+=
_retained_filler
.
word_size
();
_retained
=
false
;
}
assert
(
!
end_of_gc
||
!
_retained
,
"At this point, end_of_gc ==> !_retained."
);
if
(
_top
<
_hard_end
)
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
_top
,
_hard_end
)
);
CollectedHeap
::
fill_with_object
(
_top
,
_hard_end
);
if
(
!
retain
)
{
invalidate
();
}
else
{
...
...
@@ -155,7 +155,7 @@ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
// modifying the _next_threshold state in the BOT.
void
ParGCAllocBufferWithBOT
::
fill_region_with_block
(
MemRegion
mr
,
bool
contig
)
{
SharedHeap
::
fill_region
_with_object
(
mr
);
CollectedHeap
::
fill
_with_object
(
mr
);
if
(
contig
)
{
_bt
.
alloc_block
(
mr
.
start
(),
mr
.
end
());
}
else
{
...
...
@@ -171,7 +171,7 @@ HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
"or else _true_end should be equal to _hard_end"
);
assert
(
_retained
,
"or else _true_end should be equal to _hard_end"
);
assert
(
_retained_filler
.
end
()
<=
_top
,
"INVARIANT"
);
SharedHeap
::
fill_region
_with_object
(
_retained_filler
);
CollectedHeap
::
fill
_with_object
(
_retained_filler
);
if
(
_top
<
_hard_end
)
{
fill_region_with_block
(
MemRegion
(
_top
,
_hard_end
),
true
);
}
...
...
@@ -316,11 +316,9 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
while
(
_top
<=
chunk_boundary
)
{
assert
(
pointer_delta
(
_hard_end
,
chunk_boundary
)
>=
AlignmentReserve
,
"Consequence of last card handling above."
);
MemRegion
chunk_portion
(
chunk_boundary
,
_hard_end
);
_bt
.
BlockOffsetArray
::
alloc_block
(
chunk_portion
.
start
(),
chunk_portion
.
end
());
SharedHeap
::
fill_region_with_object
(
chunk_portion
);
_hard_end
=
chunk_portion
.
start
();
_bt
.
BlockOffsetArray
::
alloc_block
(
chunk_boundary
,
_hard_end
);
CollectedHeap
::
fill_with_object
(
chunk_boundary
,
_hard_end
);
_hard_end
=
chunk_boundary
;
chunk_boundary
-=
ChunkSizeInWords
;
}
_end
=
_hard_end
-
AlignmentReserve
;
...
...
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
浏览文件 @
0c811a79
...
...
@@ -201,7 +201,7 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
"Should contain whole object."
);
to_space_alloc_buffer
()
->
undo_allocation
(
obj
,
word_sz
);
}
else
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
obj
,
word_sz
)
);
CollectedHeap
::
fill_with_object
(
obj
,
word_sz
);
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
浏览文件 @
0c811a79
...
...
@@ -389,7 +389,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// full GC.
const
size_t
alignment
=
old_gen
->
virtual_space
()
->
alignment
();
const
size_t
eden_used
=
eden_space
->
used_in_bytes
();
const
size_t
promoted
=
(
size_t
)
(
size_policy
->
avg_promoted
()
->
padded_average
()
);
const
size_t
promoted
=
(
size_t
)
size_policy
->
avg_promoted
()
->
padded_average
(
);
const
size_t
absorb_size
=
align_size_up
(
eden_used
+
promoted
,
alignment
);
const
size_t
eden_capacity
=
eden_space
->
capacity_in_bytes
();
...
...
@@ -416,16 +416,14 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Fill the unused part of the old gen.
MutableSpace
*
const
old_space
=
old_gen
->
object_space
();
MemRegion
old_gen_unused
(
old_space
->
top
(),
old_space
->
end
());
HeapWord
*
const
unused_start
=
old_space
->
top
();
size_t
const
unused_words
=
pointer_delta
(
old_space
->
end
(),
unused_start
);
// If the unused part of the old gen cannot be filled, skip
// absorbing eden.
if
(
old_gen_unused
.
word_size
()
<
SharedHeap
::
min_fill_size
())
{
return
false
;
}
if
(
!
old_gen_unused
.
is_empty
())
{
SharedHeap
::
fill_region_with_object
(
old_gen_unused
);
if
(
unused_words
>
0
)
{
if
(
unused_words
<
CollectedHeap
::
min_fill_size
())
{
return
false
;
// If the old gen cannot be filled, must give up.
}
CollectedHeap
::
fill_with_objects
(
unused_start
,
unused_words
);
}
// Take the live data from eden and set both top and end in the old gen to
...
...
@@ -441,9 +439,8 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Update the object start array for the filler object and the data from eden.
ObjectStartArray
*
const
start_array
=
old_gen
->
start_array
();
HeapWord
*
const
start
=
old_gen_unused
.
start
();
for
(
HeapWord
*
addr
=
start
;
addr
<
new_top
;
addr
+=
oop
(
addr
)
->
size
())
{
start_array
->
allocate_block
(
addr
);
for
(
HeapWord
*
p
=
unused_start
;
p
<
new_top
;
p
+=
oop
(
p
)
->
size
())
{
start_array
->
allocate_block
(
p
);
}
// Could update the promoted average here, but it is not typically updated at
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
浏览文件 @
0c811a79
...
...
@@ -275,22 +275,9 @@ bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord
*
q
,
size_t
deadlength
)
{
if
(
allowed_deadspace_words
>=
deadlength
)
{
allowed_deadspace_words
-=
deadlength
;
oop
(
q
)
->
set_mark
(
markOopDesc
::
prototype
()
->
set_marked
());
const
size_t
aligned_min_int_array_size
=
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
));
if
(
deadlength
>=
aligned_min_int_array_size
)
{
oop
(
q
)
->
set_klass
(
Universe
::
intArrayKlassObj
());
assert
(((
deadlength
-
aligned_min_int_array_size
)
*
(
HeapWordSize
/
sizeof
(
jint
)))
<
(
size_t
)
max_jint
,
"deadspace too big for Arrayoop"
);
typeArrayOop
(
q
)
->
set_length
((
int
)((
deadlength
-
aligned_min_int_array_size
)
*
(
HeapWordSize
/
sizeof
(
jint
))));
}
else
{
assert
((
int
)
deadlength
==
instanceOopDesc
::
header_size
(),
"size for smallest fake dead object doesn't match"
);
oop
(
q
)
->
set_klass
(
SystemDictionary
::
object_klass
());
}
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"make sure size for fake dead object match"
);
CollectedHeap
::
fill_with_object
(
q
,
deadlength
);
oop
(
q
)
->
set_mark
(
oop
(
q
)
->
mark
()
->
set_marked
());
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"bad filler object size"
);
// Recall that we required "q == compaction_top".
return
true
;
}
else
{
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
0c811a79
...
...
@@ -88,6 +88,72 @@ GrowableArray<HeapWord*>* PSParallelCompact::_last_gc_live_oops_moved_to = NULL;
GrowableArray
<
size_t
>
*
PSParallelCompact
::
_last_gc_live_oops_size
=
NULL
;
#endif
void
SplitInfo
::
record
(
size_t
src_region_idx
,
size_t
partial_obj_size
,
HeapWord
*
destination
)
{
assert
(
src_region_idx
!=
0
,
"invalid src_region_idx"
);
assert
(
partial_obj_size
!=
0
,
"invalid partial_obj_size argument"
);
assert
(
destination
!=
NULL
,
"invalid destination argument"
);
_src_region_idx
=
src_region_idx
;
_partial_obj_size
=
partial_obj_size
;
_destination
=
destination
;
// These fields may not be updated below, so make sure they're clear.
assert
(
_dest_region_addr
==
NULL
,
"should have been cleared"
);
assert
(
_first_src_addr
==
NULL
,
"should have been cleared"
);
// Determine the number of destination regions for the partial object.
HeapWord
*
const
last_word
=
destination
+
partial_obj_size
-
1
;
const
ParallelCompactData
&
sd
=
PSParallelCompact
::
summary_data
();
HeapWord
*
const
beg_region_addr
=
sd
.
region_align_down
(
destination
);
HeapWord
*
const
end_region_addr
=
sd
.
region_align_down
(
last_word
);
if
(
beg_region_addr
==
end_region_addr
)
{
// One destination region.
_destination_count
=
1
;
if
(
end_region_addr
==
destination
)
{
// The destination falls on a region boundary, thus the first word of the
// partial object will be the first word copied to the destination region.
_dest_region_addr
=
end_region_addr
;
_first_src_addr
=
sd
.
region_to_addr
(
src_region_idx
);
}
}
else
{
// Two destination regions. When copied, the partial object will cross a
// destination region boundary, so a word somewhere within the partial
// object will be the first word copied to the second destination region.
_destination_count
=
2
;
_dest_region_addr
=
end_region_addr
;
const
size_t
ofs
=
pointer_delta
(
end_region_addr
,
destination
);
assert
(
ofs
<
_partial_obj_size
,
"sanity"
);
_first_src_addr
=
sd
.
region_to_addr
(
src_region_idx
)
+
ofs
;
}
}
void
SplitInfo
::
clear
()
{
_src_region_idx
=
0
;
_partial_obj_size
=
0
;
_destination
=
NULL
;
_destination_count
=
0
;
_dest_region_addr
=
NULL
;
_first_src_addr
=
NULL
;
assert
(
!
is_valid
(),
"sanity"
);
}
#ifdef ASSERT
void
SplitInfo
::
verify_clear
()
{
assert
(
_src_region_idx
==
0
,
"not clear"
);
assert
(
_partial_obj_size
==
0
,
"not clear"
);
assert
(
_destination
==
NULL
,
"not clear"
);
assert
(
_destination_count
==
0
,
"not clear"
);
assert
(
_dest_region_addr
==
NULL
,
"not clear"
);
assert
(
_first_src_addr
==
NULL
,
"not clear"
);
}
#endif // #ifdef ASSERT
#ifndef PRODUCT
const
char
*
PSParallelCompact
::
space_names
[]
=
{
"perm"
,
"old "
,
"eden"
,
"from"
,
"to "
...
...
@@ -416,21 +482,134 @@ ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
}
}
bool
ParallelCompactData
::
summarize
(
HeapWord
*
target_beg
,
HeapWord
*
target_end
,
HeapWord
*
source_beg
,
HeapWord
*
source_end
,
HeapWord
**
target_next
,
HeapWord
**
source_next
)
{
// This is too strict.
// assert(region_offset(source_beg) == 0, "not RegionSize aligned");
// Find the point at which a space can be split and, if necessary, record the
// split point.
//
// If the current src region (which overflowed the destination space) doesn't
// have a partial object, the split point is at the beginning of the current src
// region (an "easy" split, no extra bookkeeping required).
//
// If the current src region has a partial object, the split point is in the
// region where that partial object starts (call it the split_region). If
// split_region has a partial object, then the split point is just after that
// partial object (a "hard" split where we have to record the split data and
// zero the partial_obj_size field). With a "hard" split, we know that the
// partial_obj ends within split_region because the partial object that caused
// the overflow starts in split_region. If split_region doesn't have a partial
// obj, then the split is at the beginning of split_region (another "easy"
// split).
HeapWord
*
ParallelCompactData
::
summarize_split_space
(
size_t
src_region
,
SplitInfo
&
split_info
,
HeapWord
*
destination
,
HeapWord
*
target_end
,
HeapWord
**
target_next
)
{
assert
(
destination
<=
target_end
,
"sanity"
);
assert
(
destination
+
_region_data
[
src_region
].
data_size
()
>
target_end
,
"region should not fit into target space"
);
size_t
split_region
=
src_region
;
HeapWord
*
split_destination
=
destination
;
size_t
partial_obj_size
=
_region_data
[
src_region
].
partial_obj_size
();
if
(
destination
+
partial_obj_size
>
target_end
)
{
// The split point is just after the partial object (if any) in the
// src_region that contains the start of the object that overflowed the
// destination space.
//
// Find the start of the "overflow" object and set split_region to the
// region containing it.
HeapWord
*
const
overflow_obj
=
_region_data
[
src_region
].
partial_obj_addr
();
split_region
=
addr_to_region_idx
(
overflow_obj
);
// Clear the source_region field of all destination regions whose first word
// came from data after the split point (a non-null source_region field
// implies a region must be filled).
//
// An alternative to the simple loop below: clear during post_compact(),
// which uses memcpy instead of individual stores, and is easy to
// parallelize. (The downside is that it clears the entire RegionData
// object as opposed to just one field.)
//
// post_compact() would have to clear the summary data up to the highest
// address that was written during the summary phase, which would be
//
// max(top, max(new_top, clear_top))
//
// where clear_top is a new field in SpaceInfo. Would have to set clear_top
// to destination + partial_obj_size, where both have the values passed to
// this routine.
const
RegionData
*
const
sr
=
region
(
split_region
);
const
size_t
beg_idx
=
addr_to_region_idx
(
region_align_up
(
sr
->
destination
()
+
sr
->
partial_obj_size
()));
const
size_t
end_idx
=
addr_to_region_idx
(
region_align_up
(
destination
+
partial_obj_size
));
if
(
TraceParallelOldGCSummaryPhase
)
{
gclog_or_tty
->
print_cr
(
"split: clearing source_region field in ["
SIZE_FORMAT
", "
SIZE_FORMAT
")"
,
beg_idx
,
end_idx
);
}
for
(
size_t
idx
=
beg_idx
;
idx
<
end_idx
;
++
idx
)
{
_region_data
[
idx
].
set_source_region
(
0
);
}
// Set split_destination and partial_obj_size to reflect the split region.
split_destination
=
sr
->
destination
();
partial_obj_size
=
sr
->
partial_obj_size
();
}
// The split is recorded only if a partial object extends onto the region.
if
(
partial_obj_size
!=
0
)
{
_region_data
[
split_region
].
set_partial_obj_size
(
0
);
split_info
.
record
(
split_region
,
partial_obj_size
,
split_destination
);
}
// Setup the continuation addresses.
*
target_next
=
split_destination
+
partial_obj_size
;
HeapWord
*
const
source_next
=
region_to_addr
(
split_region
)
+
partial_obj_size
;
if
(
TraceParallelOldGCSummaryPhase
)
{
const
char
*
split_type
=
partial_obj_size
==
0
?
"easy"
:
"hard"
;
gclog_or_tty
->
print_cr
(
"%s split: src="
PTR_FORMAT
" src_c="
SIZE_FORMAT
" pos="
SIZE_FORMAT
,
split_type
,
source_next
,
split_region
,
partial_obj_size
);
gclog_or_tty
->
print_cr
(
"%s split: dst="
PTR_FORMAT
" dst_c="
SIZE_FORMAT
" tn="
PTR_FORMAT
,
split_type
,
split_destination
,
addr_to_region_idx
(
split_destination
),
*
target_next
);
if
(
partial_obj_size
!=
0
)
{
HeapWord
*
const
po_beg
=
split_info
.
destination
();
HeapWord
*
const
po_end
=
po_beg
+
split_info
.
partial_obj_size
();
gclog_or_tty
->
print_cr
(
"%s split: "
"po_beg="
PTR_FORMAT
" "
SIZE_FORMAT
" "
"po_end="
PTR_FORMAT
" "
SIZE_FORMAT
,
split_type
,
po_beg
,
addr_to_region_idx
(
po_beg
),
po_end
,
addr_to_region_idx
(
po_end
));
}
}
return
source_next
;
}
bool
ParallelCompactData
::
summarize
(
SplitInfo
&
split_info
,
HeapWord
*
source_beg
,
HeapWord
*
source_end
,
HeapWord
**
source_next
,
HeapWord
*
target_beg
,
HeapWord
*
target_end
,
HeapWord
**
target_next
)
{
if
(
TraceParallelOldGCSummaryPhase
)
{
tty
->
print_cr
(
"tb="
PTR_FORMAT
" te="
PTR_FORMAT
" "
"sb="
PTR_FORMAT
" se="
PTR_FORMAT
" "
"tn="
PTR_FORMAT
" sn="
PTR_FORMAT
,
target_beg
,
target_end
,
source_beg
,
source_end
,
target_next
!=
0
?
*
target_next
:
(
HeapWord
*
)
0
,
source_next
!=
0
?
*
source_next
:
(
HeapWord
*
)
0
);
HeapWord
*
const
source_next_val
=
source_next
==
NULL
?
NULL
:
*
source_next
;
tty
->
print_cr
(
"sb="
PTR_FORMAT
" se="
PTR_FORMAT
" sn="
PTR_FORMAT
"tb="
PTR_FORMAT
" te="
PTR_FORMAT
" tn="
PTR_FORMAT
,
source_beg
,
source_end
,
source_next_val
,
target_beg
,
target_end
,
*
target_next
);
}
size_t
cur_region
=
addr_to_region_idx
(
source_beg
);
...
...
@@ -438,45 +617,53 @@ bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
HeapWord
*
dest_addr
=
target_beg
;
while
(
cur_region
<
end_region
)
{
size_t
words
=
_region_data
[
cur_region
].
data_size
();
#if 1
assert
(
pointer_delta
(
target_end
,
dest_addr
)
>=
words
,
"source region does not fit into target region"
);
#else
// XXX - need some work on the corner cases here. If the region does not
// fit, then must either make sure any partial_obj from the region fits, or
// "undo" the initial part of the partial_obj that is in the previous
// region.
if
(
dest_addr
+
words
>=
target_end
)
{
// Let the caller know where to continue.
*
target_next
=
dest_addr
;
*
source_next
=
region_to_addr
(
cur_region
);
return
false
;
}
#endif // #if 1
// The destination must be set even if the region has no data.
_region_data
[
cur_region
].
set_destination
(
dest_addr
);
// Set the destination_count for cur_region, and if necessary, update
// source_region for a destination region. The source_region field is
// updated if cur_region is the first (left-most) region to be copied to a
// destination region.
//
// The destination_count calculation is a bit subtle. A region that has
// data that compacts into itself does not count itself as a destination.
// This maintains the invariant that a zero count means the region is
// available and can be claimed and then filled.
size_t
words
=
_region_data
[
cur_region
].
data_size
();
if
(
words
>
0
)
{
// If cur_region does not fit entirely into the target space, find a point
// at which the source space can be 'split' so that part is copied to the
// target space and the rest is copied elsewhere.
if
(
dest_addr
+
words
>
target_end
)
{
assert
(
source_next
!=
NULL
,
"source_next is NULL when splitting"
);
*
source_next
=
summarize_split_space
(
cur_region
,
split_info
,
dest_addr
,
target_end
,
target_next
);
return
false
;
}
// Compute the destination_count for cur_region, and if necessary, update
// source_region for a destination region. The source_region field is
// updated if cur_region is the first (left-most) region to be copied to a
// destination region.
//
// The destination_count calculation is a bit subtle. A region that has
// data that compacts into itself does not count itself as a destination.
// This maintains the invariant that a zero count means the region is
// available and can be claimed and then filled.
uint
destination_count
=
0
;
if
(
split_info
.
is_split
(
cur_region
))
{
// The current region has been split: the partial object will be copied
// to one destination space and the remaining data will be copied to
// another destination space. Adjust the initial destination_count and,
// if necessary, set the source_region field if the partial object will
// cross a destination region boundary.
destination_count
=
split_info
.
destination_count
();
if
(
destination_count
==
2
)
{
size_t
dest_idx
=
addr_to_region_idx
(
split_info
.
dest_region_addr
());
_region_data
[
dest_idx
].
set_source_region
(
cur_region
);
}
}
HeapWord
*
const
last_addr
=
dest_addr
+
words
-
1
;
const
size_t
dest_region_1
=
addr_to_region_idx
(
dest_addr
);
const
size_t
dest_region_2
=
addr_to_region_idx
(
last_addr
);
#if 0
// Initially assume that the destination regions will be the same and
// adjust the value below if necessary. Under this assumption, if
// cur_region == dest_region_2, then cur_region will be compacted
// completely into itself.
uint
destination_count
=
cur_region
==
dest_region_2
?
0
:
1
;
destination_count
+
=
cur_region
==
dest_region_2
?
0
:
1
;
if
(
dest_region_1
!=
dest_region_2
)
{
// Destination regions differ; adjust destination_count.
destination_count
+=
1
;
...
...
@@ -487,25 +674,6 @@ bool ParallelCompactData::summarize(HeapWord* target_beg, HeapWord* target_end,
// region.
_region_data
[
dest_region_1
].
set_source_region
(
cur_region
);
}
#else
// Initially assume that the destination regions will be different and
// adjust the value below if necessary. Under this assumption, if
// cur_region == dest_region2, then cur_region will be compacted partially
// into dest_region_1 and partially into itself.
uint
destination_count
=
cur_region
==
dest_region_2
?
1
:
2
;
if
(
dest_region_1
!=
dest_region_2
)
{
// Data from cur_region will be copied to the start of dest_region_2.
_region_data
[
dest_region_2
].
set_source_region
(
cur_region
);
}
else
{
// Destination regions are the same; adjust destination_count.
destination_count
-=
1
;
if
(
region_offset
(
dest_addr
)
==
0
)
{
// Data from cur_region will be copied to the start of the destination
// region.
_region_data
[
dest_region_1
].
set_source_region
(
cur_region
);
}
}
#endif // #if 0
_region_data
[
cur_region
].
set_destination_count
(
destination_count
);
_region_data
[
cur_region
].
set_data_location
(
region_to_addr
(
cur_region
));
...
...
@@ -749,6 +917,13 @@ PSParallelCompact::clear_data_covering_space(SpaceId id)
const
size_t
end_region
=
_summary_data
.
addr_to_region_idx
(
_summary_data
.
region_align_up
(
max_top
));
_summary_data
.
clear_range
(
beg_region
,
end_region
);
// Clear the data used to 'split' regions.
SplitInfo
&
split_info
=
_space_info
[
id
].
split_info
();
if
(
split_info
.
is_valid
())
{
split_info
.
clear
();
}
DEBUG_ONLY
(
split_info
.
verify_clear
();)
}
void
PSParallelCompact
::
pre_compact
(
PreGCValues
*
pre_gc_values
)
...
...
@@ -807,10 +982,11 @@ void PSParallelCompact::post_compact()
{
TraceTime
tm
(
"post compact"
,
print_phases
(),
true
,
gclog_or_tty
);
// Clear the marking bitmap and summary data and update top() in each space.
for
(
unsigned
int
id
=
perm_space_id
;
id
<
last_space_id
;
++
id
)
{
// Clear the marking bitmap, summary data and split info.
clear_data_covering_space
(
SpaceId
(
id
));
_space_info
[
id
].
space
()
->
set_top
(
_space_info
[
id
].
new_top
());
// Update top(). Must be done after clearing the bitmap and summary data.
_space_info
[
id
].
publish_new_top
();
}
MutableSpace
*
const
eden_space
=
_space_info
[
eden_space_id
].
space
();
...
...
@@ -1151,6 +1327,13 @@ HeapWord*
PSParallelCompact
::
compute_dense_prefix
(
const
SpaceId
id
,
bool
maximum_compaction
)
{
if
(
ParallelOldGCSplitALot
)
{
if
(
_space_info
[
id
].
dense_prefix
()
!=
_space_info
[
id
].
space
()
->
bottom
())
{
// The value was chosen to provoke splitting a young gen space; use it.
return
_space_info
[
id
].
dense_prefix
();
}
}
const
size_t
region_size
=
ParallelCompactData
::
RegionSize
;
const
ParallelCompactData
&
sd
=
summary_data
();
...
...
@@ -1239,14 +1422,169 @@ PSParallelCompact::compute_dense_prefix(const SpaceId id,
return
sd
.
region_to_addr
(
best_cp
);
}
#ifndef PRODUCT
void
PSParallelCompact
::
fill_with_live_objects
(
SpaceId
id
,
HeapWord
*
const
start
,
size_t
words
)
{
if
(
TraceParallelOldGCSummaryPhase
)
{
tty
->
print_cr
(
"fill_with_live_objects ["
PTR_FORMAT
" "
PTR_FORMAT
") "
SIZE_FORMAT
,
start
,
start
+
words
,
words
);
}
ObjectStartArray
*
const
start_array
=
_space_info
[
id
].
start_array
();
CollectedHeap
::
fill_with_objects
(
start
,
words
);
for
(
HeapWord
*
p
=
start
;
p
<
start
+
words
;
p
+=
oop
(
p
)
->
size
())
{
_mark_bitmap
.
mark_obj
(
p
,
words
);
_summary_data
.
add_obj
(
p
,
words
);
start_array
->
allocate_block
(
p
);
}
}
void
PSParallelCompact
::
summarize_new_objects
(
SpaceId
id
,
HeapWord
*
start
)
{
ParallelCompactData
&
sd
=
summary_data
();
MutableSpace
*
space
=
_space_info
[
id
].
space
();
// Find the source and destination start addresses.
HeapWord
*
const
src_addr
=
sd
.
region_align_down
(
start
);
HeapWord
*
dst_addr
;
if
(
src_addr
<
start
)
{
dst_addr
=
sd
.
addr_to_region_ptr
(
src_addr
)
->
destination
();
}
else
if
(
src_addr
>
space
->
bottom
())
{
// The start (the original top() value) is aligned to a region boundary so
// the associated region does not have a destination. Compute the
// destination from the previous region.
RegionData
*
const
cp
=
sd
.
addr_to_region_ptr
(
src_addr
)
-
1
;
dst_addr
=
cp
->
destination
()
+
cp
->
data_size
();
}
else
{
// Filling the entire space.
dst_addr
=
space
->
bottom
();
}
assert
(
dst_addr
!=
NULL
,
"sanity"
);
// Update the summary data.
bool
result
=
_summary_data
.
summarize
(
_space_info
[
id
].
split_info
(),
src_addr
,
space
->
top
(),
NULL
,
dst_addr
,
space
->
end
(),
_space_info
[
id
].
new_top_addr
());
assert
(
result
,
"should not fail: bad filler object size"
);
}
void
PSParallelCompact
::
provoke_split
(
bool
&
max_compaction
)
{
const
size_t
region_size
=
ParallelCompactData
::
RegionSize
;
ParallelCompactData
&
sd
=
summary_data
();
MutableSpace
*
const
eden_space
=
_space_info
[
eden_space_id
].
space
();
MutableSpace
*
const
from_space
=
_space_info
[
from_space_id
].
space
();
const
size_t
eden_live
=
pointer_delta
(
eden_space
->
top
(),
_space_info
[
eden_space_id
].
new_top
());
const
size_t
from_live
=
pointer_delta
(
from_space
->
top
(),
_space_info
[
from_space_id
].
new_top
());
const
size_t
min_fill_size
=
CollectedHeap
::
min_fill_size
();
const
size_t
eden_free
=
pointer_delta
(
eden_space
->
end
(),
eden_space
->
top
());
const
size_t
eden_fillable
=
eden_free
>=
min_fill_size
?
eden_free
:
0
;
const
size_t
from_free
=
pointer_delta
(
from_space
->
end
(),
from_space
->
top
());
const
size_t
from_fillable
=
from_free
>=
min_fill_size
?
from_free
:
0
;
// Choose the space to split; need at least 2 regions live (or fillable).
SpaceId
id
;
MutableSpace
*
space
;
size_t
live_words
;
size_t
fill_words
;
if
(
eden_live
+
eden_fillable
>=
region_size
*
2
)
{
id
=
eden_space_id
;
space
=
eden_space
;
live_words
=
eden_live
;
fill_words
=
eden_fillable
;
}
else
if
(
from_live
+
from_fillable
>=
region_size
*
2
)
{
id
=
from_space_id
;
space
=
from_space
;
live_words
=
from_live
;
fill_words
=
from_fillable
;
}
else
{
return
;
// Give up.
}
assert
(
fill_words
==
0
||
fill_words
>=
min_fill_size
,
"sanity"
);
if
(
live_words
<
region_size
*
2
)
{
// Fill from top() to end() w/live objects of mixed sizes.
HeapWord
*
const
fill_start
=
space
->
top
();
live_words
+=
fill_words
;
space
->
set_top
(
fill_start
+
fill_words
);
if
(
ZapUnusedHeapArea
)
{
space
->
set_top_for_allocations
();
}
HeapWord
*
cur_addr
=
fill_start
;
while
(
fill_words
>
0
)
{
const
size_t
r
=
(
size_t
)
os
::
random
()
%
(
region_size
/
2
)
+
min_fill_size
;
size_t
cur_size
=
MIN2
(
align_object_size_
(
r
),
fill_words
);
if
(
fill_words
-
cur_size
<
min_fill_size
)
{
cur_size
=
fill_words
;
// Avoid leaving a fragment too small to fill.
}
CollectedHeap
::
fill_with_object
(
cur_addr
,
cur_size
);
mark_bitmap
()
->
mark_obj
(
cur_addr
,
cur_size
);
sd
.
add_obj
(
cur_addr
,
cur_size
);
cur_addr
+=
cur_size
;
fill_words
-=
cur_size
;
}
summarize_new_objects
(
id
,
fill_start
);
}
max_compaction
=
false
;
// Manipulate the old gen so that it has room for about half of the live data
// in the target young gen space (live_words / 2).
id
=
old_space_id
;
space
=
_space_info
[
id
].
space
();
const
size_t
free_at_end
=
space
->
free_in_words
();
const
size_t
free_target
=
align_object_size
(
live_words
/
2
);
const
size_t
dead
=
pointer_delta
(
space
->
top
(),
_space_info
[
id
].
new_top
());
if
(
free_at_end
>=
free_target
+
min_fill_size
)
{
// Fill space above top() and set the dense prefix so everything survives.
HeapWord
*
const
fill_start
=
space
->
top
();
const
size_t
fill_size
=
free_at_end
-
free_target
;
space
->
set_top
(
space
->
top
()
+
fill_size
);
if
(
ZapUnusedHeapArea
)
{
space
->
set_top_for_allocations
();
}
fill_with_live_objects
(
id
,
fill_start
,
fill_size
);
summarize_new_objects
(
id
,
fill_start
);
_space_info
[
id
].
set_dense_prefix
(
sd
.
region_align_down
(
space
->
top
()));
}
else
if
(
dead
+
free_at_end
>
free_target
)
{
// Find a dense prefix that makes the right amount of space available.
HeapWord
*
cur
=
sd
.
region_align_down
(
space
->
top
());
HeapWord
*
cur_destination
=
sd
.
addr_to_region_ptr
(
cur
)
->
destination
();
size_t
dead_to_right
=
pointer_delta
(
space
->
end
(),
cur_destination
);
while
(
dead_to_right
<
free_target
)
{
cur
-=
region_size
;
cur_destination
=
sd
.
addr_to_region_ptr
(
cur
)
->
destination
();
dead_to_right
=
pointer_delta
(
space
->
end
(),
cur_destination
);
}
_space_info
[
id
].
set_dense_prefix
(
cur
);
}
}
#endif // #ifndef PRODUCT
void
PSParallelCompact
::
summarize_spaces_quick
()
{
for
(
unsigned
int
i
=
0
;
i
<
last_space_id
;
++
i
)
{
const
MutableSpace
*
space
=
_space_info
[
i
].
space
();
bool
result
=
_summary_data
.
summarize
(
space
->
bottom
(),
space
->
end
(),
space
->
bottom
(),
space
->
top
(),
_space_info
[
i
].
new_top_addr
());
assert
(
result
,
"should never fail"
);
HeapWord
**
nta
=
_space_info
[
i
].
new_top_addr
();
bool
result
=
_summary_data
.
summarize
(
_space_info
[
i
].
split_info
(),
space
->
bottom
(),
space
->
top
(),
NULL
,
space
->
bottom
(),
space
->
end
(),
nta
);
assert
(
result
,
"space must fit into itself"
);
_space_info
[
i
].
set_dense_prefix
(
space
->
bottom
());
}
}
...
...
@@ -1308,8 +1646,7 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
}
#endif // #ifdef _LP64
MemRegion
region
(
obj_beg
,
obj_len
);
SharedHeap
::
fill_region_with_object
(
region
);
CollectedHeap
::
fill_with_object
(
obj_beg
,
obj_len
);
_mark_bitmap
.
mark_obj
(
obj_beg
,
obj_len
);
_summary_data
.
add_obj
(
obj_beg
,
obj_len
);
assert
(
start_array
(
id
)
!=
NULL
,
"sanity"
);
...
...
@@ -1317,12 +1654,24 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
}
}
void
PSParallelCompact
::
clear_source_region
(
HeapWord
*
beg_addr
,
HeapWord
*
end_addr
)
{
RegionData
*
const
beg_ptr
=
_summary_data
.
addr_to_region_ptr
(
beg_addr
);
HeapWord
*
const
end_aligned_up
=
_summary_data
.
region_align_up
(
end_addr
);
RegionData
*
const
end_ptr
=
_summary_data
.
addr_to_region_ptr
(
end_aligned_up
);
for
(
RegionData
*
cur
=
beg_ptr
;
cur
<
end_ptr
;
++
cur
)
{
cur
->
set_source_region
(
0
);
}
}
void
PSParallelCompact
::
summarize_space
(
SpaceId
id
,
bool
maximum_compaction
)
{
assert
(
id
<
last_space_id
,
"id out of range"
);
assert
(
_space_info
[
id
].
dense_prefix
()
==
_space_info
[
id
].
space
()
->
bottom
(),
"should have been set in summarize_spaces_quick()"
);
assert
(
_space_info
[
id
].
dense_prefix
()
==
_space_info
[
id
].
space
()
->
bottom
()
||
ParallelOldGCSplitALot
&&
id
==
old_space_id
,
"should have been reset in summarize_spaces_quick()"
);
const
MutableSpace
*
space
=
_space_info
[
id
].
space
();
if
(
_space_info
[
id
].
new_top
()
!=
space
->
bottom
())
{
...
...
@@ -1338,20 +1687,24 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
}
#endif // #ifndef PRODUCT
// If dead space crosses the dense prefix boundary, it is (at least
// partially) filled with a dummy object, marked live and added to the
// summary data. This simplifies the copy/update phase and must be done
// before the final locations of objects are determined, to prevent leaving
// a fragment of dead space that is too small to fill with an object.
// Recompute the summary data, taking into account the dense prefix. If
// every last byte will be reclaimed, then the existing summary data which
// compacts everything can be left in place.
if
(
!
maximum_compaction
&&
dense_prefix_end
!=
space
->
bottom
())
{
// If dead space crosses the dense prefix boundary, it is (at least
// partially) filled with a dummy object, marked live and added to the
// summary data. This simplifies the copy/update phase and must be done
// before the final locations of objects are determined, to prevent
// leaving a fragment of dead space that is too small to fill.
fill_dense_prefix_end
(
id
);
}
// Compute the destination of each Region, and thus each object.
_summary_data
.
summarize_dense_prefix
(
space
->
bottom
(),
dense_prefix_end
);
_summary_data
.
summarize
(
dense_prefix_end
,
space
->
end
(),
dense_prefix_end
,
space
->
top
(),
_space_info
[
id
].
new_top_addr
());
// Compute the destination of each Region, and thus each object.
_summary_data
.
summarize_dense_prefix
(
space
->
bottom
(),
dense_prefix_end
);
_summary_data
.
summarize
(
_space_info
[
id
].
split_info
(),
dense_prefix_end
,
space
->
top
(),
NULL
,
dense_prefix_end
,
space
->
end
(),
_space_info
[
id
].
new_top_addr
());
}
}
if
(
TraceParallelOldGCSummaryPhase
)
{
...
...
@@ -1371,6 +1724,30 @@ PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
}
}
#ifndef PRODUCT
void
PSParallelCompact
::
summary_phase_msg
(
SpaceId
dst_space_id
,
HeapWord
*
dst_beg
,
HeapWord
*
dst_end
,
SpaceId
src_space_id
,
HeapWord
*
src_beg
,
HeapWord
*
src_end
)
{
if
(
TraceParallelOldGCSummaryPhase
)
{
tty
->
print_cr
(
"summarizing %d [%s] into %d [%s]: "
"src="
PTR_FORMAT
"-"
PTR_FORMAT
" "
SIZE_FORMAT
"-"
SIZE_FORMAT
" "
"dst="
PTR_FORMAT
"-"
PTR_FORMAT
" "
SIZE_FORMAT
"-"
SIZE_FORMAT
,
src_space_id
,
space_names
[
src_space_id
],
dst_space_id
,
space_names
[
dst_space_id
],
src_beg
,
src_end
,
_summary_data
.
addr_to_region_idx
(
src_beg
),
_summary_data
.
addr_to_region_idx
(
src_end
),
dst_beg
,
dst_end
,
_summary_data
.
addr_to_region_idx
(
dst_beg
),
_summary_data
.
addr_to_region_idx
(
dst_end
));
}
}
#endif // #ifndef PRODUCT
void
PSParallelCompact
::
summary_phase
(
ParCompactionManager
*
cm
,
bool
maximum_compaction
)
{
...
...
@@ -1403,57 +1780,98 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
// The amount of live data that will end up in old space (assuming it fits).
size_t
old_space_total_live
=
0
;
unsigned
int
id
;
for
(
id
=
old_space_id
;
id
<
last_space_id
;
++
id
)
{
assert
(
perm_space_id
<
old_space_id
,
"should not count perm data here"
)
;
for
(
unsigned
int
id
=
old_space_id
;
id
<
last_space_id
;
++
id
)
{
old_space_total_live
+=
pointer_delta
(
_space_info
[
id
].
new_top
(),
_space_info
[
id
].
space
()
->
bottom
());
}
const
MutableSpace
*
old_space
=
_space_info
[
old_space_id
].
space
();
if
(
old_space_total_live
>
old_space
->
capacity_in_words
())
{
MutableSpace
*
const
old_space
=
_space_info
[
old_space_id
].
space
();
const
size_t
old_capacity
=
old_space
->
capacity_in_words
();
if
(
old_space_total_live
>
old_capacity
)
{
// XXX - should also try to expand
maximum_compaction
=
true
;
}
else
if
(
!
UseParallelOldGCDensePrefix
)
{
maximum_compaction
=
true
;
}
#ifndef PRODUCT
if
(
ParallelOldGCSplitALot
&&
old_space_total_live
<
old_capacity
)
{
if
(
total_invocations
()
%
ParallelOldGCSplitInterval
==
0
)
{
provoke_split
(
maximum_compaction
);
}
}
#endif // #ifndef PRODUCT
// Permanent and Old generations.
summarize_space
(
perm_space_id
,
maximum_compaction
);
summarize_space
(
old_space_id
,
maximum_compaction
);
// Summarize the remaining spaces (those in the young gen) into old space. If
// the live data from a space doesn't fit, the existing summarization is left
// intact, so the data is compacted down within the space itself.
HeapWord
**
new_top_addr
=
_space_info
[
old_space_id
].
new_top_addr
();
HeapWord
*
const
target_space_end
=
old_space
->
end
();
for
(
id
=
eden_space_id
;
id
<
last_space_id
;
++
id
)
{
// Summarize the remaining spaces in the young gen. The initial target space
// is the old gen. If a space does not fit entirely into the target, then the
// remainder is compacted into the space itself and that space becomes the new
// target.
SpaceId
dst_space_id
=
old_space_id
;
HeapWord
*
dst_space_end
=
old_space
->
end
();
HeapWord
**
new_top_addr
=
_space_info
[
dst_space_id
].
new_top_addr
();
for
(
unsigned
int
id
=
eden_space_id
;
id
<
last_space_id
;
++
id
)
{
const
MutableSpace
*
space
=
_space_info
[
id
].
space
();
const
size_t
live
=
pointer_delta
(
_space_info
[
id
].
new_top
(),
space
->
bottom
());
const
size_t
available
=
pointer_delta
(
target_space_end
,
*
new_top_addr
);
const
size_t
available
=
pointer_delta
(
dst_space_end
,
*
new_top_addr
);
NOT_PRODUCT
(
summary_phase_msg
(
dst_space_id
,
*
new_top_addr
,
dst_space_end
,
SpaceId
(
id
),
space
->
bottom
(),
space
->
top
());)
if
(
live
>
0
&&
live
<=
available
)
{
// All the live data will fit.
if
(
TraceParallelOldGCSummaryPhase
)
{
tty
->
print_cr
(
"summarizing %d into old_space @ "
PTR_FORMAT
,
id
,
*
new_top_addr
);
}
_summary_data
.
summarize
(
*
new_top_addr
,
target_space_end
,
space
->
bottom
(),
space
->
top
(),
new_top_addr
);
bool
done
=
_summary_data
.
summarize
(
_space_info
[
id
].
split_info
(),
space
->
bottom
(),
space
->
top
(),
NULL
,
*
new_top_addr
,
dst_space_end
,
new_top_addr
);
assert
(
done
,
"space must fit into old gen"
);
// XXX - this is necessary because decrement_destination_counts() tests
// source_region() to determine if a region will be filled. Probably
// better to pass src_space->new_top() into decrement_destination_counts
// and test that instead.
//
// Clear the source_region field for each region in the space.
HeapWord
*
const
new_top
=
_space_info
[
id
].
new_top
();
HeapWord
*
const
clear_end
=
_summary_data
.
region_align_up
(
new_top
);
RegionData
*
beg_region
=
_summary_data
.
addr_to_region_ptr
(
space
->
bottom
());
RegionData
*
end_region
=
_summary_data
.
addr_to_region_ptr
(
clear_end
);
while
(
beg_region
<
end_region
)
{
beg_region
->
set_source_region
(
0
);
++
beg_region
;
}
clear_source_region
(
space
->
bottom
(),
_space_info
[
id
].
new_top
());
// Reset the new_top value for the space.
_space_info
[
id
].
set_new_top
(
space
->
bottom
());
}
else
if
(
live
>
0
)
{
// Attempt to fit part of the source space into the target space.
HeapWord
*
next_src_addr
=
NULL
;
bool
done
=
_summary_data
.
summarize
(
_space_info
[
id
].
split_info
(),
space
->
bottom
(),
space
->
top
(),
&
next_src_addr
,
*
new_top_addr
,
dst_space_end
,
new_top_addr
);
assert
(
!
done
,
"space should not fit into old gen"
);
assert
(
next_src_addr
!=
NULL
,
"sanity"
);
// The source space becomes the new target, so the remainder is compacted
// within the space itself.
dst_space_id
=
SpaceId
(
id
);
dst_space_end
=
space
->
end
();
new_top_addr
=
_space_info
[
id
].
new_top_addr
();
HeapWord
*
const
clear_end
=
_space_info
[
id
].
new_top
();
NOT_PRODUCT
(
summary_phase_msg
(
dst_space_id
,
space
->
bottom
(),
dst_space_end
,
SpaceId
(
id
),
next_src_addr
,
space
->
top
());)
done
=
_summary_data
.
summarize
(
_space_info
[
id
].
split_info
(),
next_src_addr
,
space
->
top
(),
NULL
,
space
->
bottom
(),
dst_space_end
,
new_top_addr
);
assert
(
done
,
"space must fit when compacted into itself"
);
assert
(
*
new_top_addr
<=
space
->
top
(),
"usage should not grow"
);
// XXX - this should go away. See comments above.
//
// Clear the source_region field in regions at the end of the space that
// will not be filled.
HeapWord
*
const
clear_beg
=
_summary_data
.
region_align_up
(
*
new_top_addr
);
clear_source_region
(
clear_beg
,
clear_end
);
}
}
...
...
@@ -1807,9 +2225,14 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
// Fill the unused part of the old gen.
MutableSpace
*
const
old_space
=
old_gen
->
object_space
();
MemRegion
old_gen_unused
(
old_space
->
top
(),
old_space
->
end
());
if
(
!
old_gen_unused
.
is_empty
())
{
SharedHeap
::
fill_region_with_object
(
old_gen_unused
);
HeapWord
*
const
unused_start
=
old_space
->
top
();
size_t
const
unused_words
=
pointer_delta
(
old_space
->
end
(),
unused_start
);
if
(
unused_words
>
0
)
{
if
(
unused_words
<
CollectedHeap
::
min_fill_size
())
{
return
false
;
// If the old gen cannot be filled, must give up.
}
CollectedHeap
::
fill_with_objects
(
unused_start
,
unused_words
);
}
// Take the live data from eden and set both top and end in the old gen to
...
...
@@ -1825,9 +2248,8 @@ bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_po
// Update the object start array for the filler object and the data from eden.
ObjectStartArray
*
const
start_array
=
old_gen
->
start_array
();
HeapWord
*
const
start
=
old_gen_unused
.
start
();
for
(
HeapWord
*
addr
=
start
;
addr
<
new_top
;
addr
+=
oop
(
addr
)
->
size
())
{
start_array
->
allocate_block
(
addr
);
for
(
HeapWord
*
p
=
unused_start
;
p
<
new_top
;
p
+=
oop
(
p
)
->
size
())
{
start_array
->
allocate_block
(
p
);
}
// Could update the promoted average here, but it is not typically updated at
...
...
@@ -2048,14 +2470,13 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
// regions in the dense prefix. Assume that 1 gc thread
// will work on opening the gaps and the remaining gc threads
// will work on the dense prefix.
SpaceId
space_id
=
old_
space_id
;
while
(
space_id
!=
last_
space_id
)
{
unsigned
int
space_id
;
for
(
space_id
=
old_space_id
;
space_id
<
last_space_id
;
++
space_id
)
{
HeapWord
*
const
dense_prefix_end
=
_space_info
[
space_id
].
dense_prefix
();
const
MutableSpace
*
const
space
=
_space_info
[
space_id
].
space
();
if
(
dense_prefix_end
==
space
->
bottom
())
{
// There is no dense prefix for this space.
space_id
=
next_compaction_space_id
(
space_id
);
continue
;
}
...
...
@@ -2105,23 +2526,20 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
// region_index_end is not processed
size_t
region_index_end
=
MIN2
(
region_index_start
+
regions_per_thread
,
region_index_end_dense_prefix
);
q
->
enqueue
(
new
UpdateDensePrefixTask
(
space_id
,
region_index_start
,
region_index_end
));
q
->
enqueue
(
new
UpdateDensePrefixTask
(
SpaceId
(
space_id
),
region_index_start
,
region_index_end
));
region_index_start
=
region_index_end
;
}
}
// This gets any part of the dense prefix that did not
// fit evenly.
if
(
region_index_start
<
region_index_end_dense_prefix
)
{
q
->
enqueue
(
new
UpdateDensePrefixTask
(
space_id
,
region_index_start
,
region_index_end_dense_prefix
));
q
->
enqueue
(
new
UpdateDensePrefixTask
(
SpaceId
(
space_id
),
region_index_start
,
region_index_end_dense_prefix
));
}
space_id
=
next_compaction_space_id
(
space_id
);
}
// End tasks for dense prefix
}
}
void
PSParallelCompact
::
enqueue_region_stealing_tasks
(
...
...
@@ -2567,16 +2985,24 @@ PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
return
m
->
bit_to_addr
(
cur_beg
);
}
HeapWord
*
PSParallelCompact
::
first_src_addr
(
HeapWord
*
const
dest_addr
,
size_t
src_region_idx
)
HeapWord
*
PSParallelCompact
::
first_src_addr
(
HeapWord
*
const
dest_addr
,
SpaceId
src_space_id
,
size_t
src_region_idx
)
{
ParMarkBitMap
*
const
bitmap
=
mark_bitmap
();
assert
(
summary_data
().
is_region_aligned
(
dest_addr
),
"not aligned"
);
const
SplitInfo
&
split_info
=
_space_info
[
src_space_id
].
split_info
();
if
(
split_info
.
dest_region_addr
()
==
dest_addr
)
{
// The partial object ending at the split point contains the first word to
// be copied to dest_addr.
return
split_info
.
first_src_addr
();
}
const
ParallelCompactData
&
sd
=
summary_data
();
ParMarkBitMap
*
const
bitmap
=
mark_bitmap
();
const
size_t
RegionSize
=
ParallelCompactData
::
RegionSize
;
assert
(
sd
.
is_region_aligned
(
dest_addr
),
"not aligned"
);
const
RegionData
*
const
src_region_ptr
=
sd
.
region
(
src_region_idx
);
const
size_t
partial_obj_size
=
src_region_ptr
->
partial_obj_size
();
HeapWord
*
const
src_region_destination
=
src_region_ptr
->
destination
();
...
...
@@ -2737,7 +3163,7 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord
*
src_space_top
=
_space_info
[
src_space_id
].
space
()
->
top
();
MoveAndUpdateClosure
closure
(
bitmap
,
cm
,
start_array
,
dest_addr
,
words
);
closure
.
set_source
(
first_src_addr
(
dest_addr
,
src_region_idx
));
closure
.
set_source
(
first_src_addr
(
dest_addr
,
src_
space_id
,
src_
region_idx
));
// Adjust src_region_idx to prepare for decrementing destination counts (the
// destination count is not decremented when a region is copied to itself).
...
...
@@ -3008,34 +3434,3 @@ void PSParallelCompact::compact_prologue() {
summary_data
().
calc_new_pointer
(
Universe
::
intArrayKlassObj
());
}
// The initial implementation of this method created a field
// _next_compaction_space_id in SpaceInfo and initialized
// that field in SpaceInfo::initialize_space_info(). That
// required that _next_compaction_space_id be declared a
// SpaceId in SpaceInfo and that would have required that
// either SpaceId be declared in a separate class or that
// it be declared in SpaceInfo. It didn't seem consistent
// to declare it in SpaceInfo (didn't really fit logically).
// Alternatively, defining a separate class to define SpaceId
// seem excessive. This implementation is simple and localizes
// the knowledge.
PSParallelCompact
::
SpaceId
PSParallelCompact
::
next_compaction_space_id
(
SpaceId
id
)
{
assert
(
id
<
last_space_id
,
"id out of range"
);
switch
(
id
)
{
case
perm_space_id
:
return
last_space_id
;
case
old_space_id
:
return
eden_space_id
;
case
eden_space_id
:
return
from_space_id
;
case
from_space_id
:
return
to_space_id
;
case
to_space_id
:
return
last_space_id
;
default:
assert
(
false
,
"Bad space id"
);
return
last_space_id
;
}
}
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
浏览文件 @
0c811a79
...
...
@@ -36,6 +36,123 @@ class PreGCValues;
class
MoveAndUpdateClosure
;
class
RefProcTaskExecutor
;
// The SplitInfo class holds the information needed to 'split' a source region
// so that the live data can be copied to two destination *spaces*. Normally,
// all the live data in a region is copied to a single destination space (e.g.,
// everything live in a region in eden is copied entirely into the old gen).
// However, when the heap is nearly full, all the live data in eden may not fit
// into the old gen. Copying only some of the regions from eden to old gen
// requires finding a region that does not contain a partial object (i.e., no
// live object crosses the region boundary) somewhere near the last object that
// does fit into the old gen. Since it's not always possible to find such a
// region, splitting is necessary for predictable behavior.
//
// A region is always split at the end of the partial object. This avoids
// additional tests when calculating the new location of a pointer, which is a
// very hot code path. The partial object and everything to its left will be
// copied to another space (call it dest_space_1). The live data to the right
// of the partial object will be copied either within the space itself, or to a
// different destination space (distinct from dest_space_1).
//
// Split points are identified during the summary phase, when region
// destinations are computed: data about the split, including the
// partial_object_size, is recorded in a SplitInfo record and the
// partial_object_size field in the summary data is set to zero. The zeroing is
// possible (and necessary) since the partial object will move to a different
// destination space than anything to its right, thus the partial object should
// not affect the locations of any objects to its right.
//
// The recorded data is used during the compaction phase, but only rarely: when
// the partial object on the split region will be copied across a destination
// region boundary. This test is made once each time a region is filled, and is
// a simple address comparison, so the overhead is negligible (see
// PSParallelCompact::first_src_addr()).
//
// Notes:
//
// Only regions with partial objects are split; a region without a partial
// object does not need any extra bookkeeping.
//
// At most one region is split per space, so the amount of data required is
// constant.
//
// A region is split only when the destination space would overflow. Once that
// happens, the destination space is abandoned and no other data (even from
// other source spaces) is targeted to that destination space. Abandoning the
// destination space may leave a somewhat large unused area at the end, if a
// large object caused the overflow.
//
// Future work:
//
// More bookkeeping would be required to continue to use the destination space.
// The most general solution would allow data from regions in two different
// source spaces to be "joined" in a single destination region. At the very
// least, additional code would be required in next_src_region() to detect the
// join and skip to an out-of-order source region. If the join region was also
// the last destination region to which a split region was copied (the most
// likely case), then additional work would be needed to get fill_region() to
// stop iteration and switch to a new source region at the right point. Basic
// idea would be to use a fake value for the top of the source space. It is
// doable, if a bit tricky.
//
// A simpler (but less general) solution would fill the remainder of the
// destination region with a dummy object and continue filling the next
// destination region.
class
SplitInfo
{
public:
// Return true if this split info is valid (i.e., if a split has been
// recorded). The very first region cannot have a partial object and thus is
// never split, so 0 is the 'invalid' value.
bool
is_valid
()
const
{
return
_src_region_idx
>
0
;
}
// Return true if this split holds data for the specified source region.
inline
bool
is_split
(
size_t
source_region
)
const
;
// The index of the split region, the size of the partial object on that
// region and the destination of the partial object.
size_t
src_region_idx
()
const
{
return
_src_region_idx
;
}
size_t
partial_obj_size
()
const
{
return
_partial_obj_size
;
}
HeapWord
*
destination
()
const
{
return
_destination
;
}
// The destination count of the partial object referenced by this split
// (either 1 or 2). This must be added to the destination count of the
// remainder of the source region.
unsigned
int
destination_count
()
const
{
return
_destination_count
;
}
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of the destination region;
// otherwise this is NULL.
HeapWord
*
dest_region_addr
()
const
{
return
_dest_region_addr
;
}
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of that word within the partial
// object; otherwise this is NULL.
HeapWord
*
first_src_addr
()
const
{
return
_first_src_addr
;
}
// Record the data necessary to split the region src_region_idx.
void
record
(
size_t
src_region_idx
,
size_t
partial_obj_size
,
HeapWord
*
destination
);
void
clear
();
DEBUG_ONLY
(
void
verify_clear
();)
private:
size_t
_src_region_idx
;
size_t
_partial_obj_size
;
HeapWord
*
_destination
;
unsigned
int
_destination_count
;
HeapWord
*
_dest_region_addr
;
HeapWord
*
_first_src_addr
;
};
inline
bool
SplitInfo
::
is_split
(
size_t
region_idx
)
const
{
return
_src_region_idx
==
region_idx
&&
is_valid
();
}
class
SpaceInfo
{
public:
...
...
@@ -58,18 +175,23 @@ class SpaceInfo
// is no start array.
ObjectStartArray
*
start_array
()
const
{
return
_start_array
;
}
SplitInfo
&
split_info
()
{
return
_split_info
;
}
void
set_space
(
MutableSpace
*
s
)
{
_space
=
s
;
}
void
set_new_top
(
HeapWord
*
addr
)
{
_new_top
=
addr
;
}
void
set_min_dense_prefix
(
HeapWord
*
addr
)
{
_min_dense_prefix
=
addr
;
}
void
set_dense_prefix
(
HeapWord
*
addr
)
{
_dense_prefix
=
addr
;
}
void
set_start_array
(
ObjectStartArray
*
s
)
{
_start_array
=
s
;
}
void
publish_new_top
()
const
{
_space
->
set_top
(
_new_top
);
}
private:
MutableSpace
*
_space
;
HeapWord
*
_new_top
;
HeapWord
*
_min_dense_prefix
;
HeapWord
*
_dense_prefix
;
ObjectStartArray
*
_start_array
;
SplitInfo
_split_info
;
};
class
ParallelCompactData
...
...
@@ -230,9 +352,14 @@ public:
// must be region-aligned; end need not be.
void
summarize_dense_prefix
(
HeapWord
*
beg
,
HeapWord
*
end
);
bool
summarize
(
HeapWord
*
target_beg
,
HeapWord
*
target_end
,
HeapWord
*
summarize_split_space
(
size_t
src_region
,
SplitInfo
&
split_info
,
HeapWord
*
destination
,
HeapWord
*
target_end
,
HeapWord
**
target_next
);
bool
summarize
(
SplitInfo
&
split_info
,
HeapWord
*
source_beg
,
HeapWord
*
source_end
,
HeapWord
**
target_next
,
HeapWord
**
source_next
=
0
);
HeapWord
**
source_next
,
HeapWord
*
target_beg
,
HeapWord
*
target_end
,
HeapWord
**
target_next
);
void
clear
();
void
clear_range
(
size_t
beg_region
,
size_t
end_region
);
...
...
@@ -838,13 +965,27 @@ class PSParallelCompact : AllStatic {
// non-empty.
static
void
fill_dense_prefix_end
(
SpaceId
id
);
// Clear the summary data source_region field for the specified addresses.
static
void
clear_source_region
(
HeapWord
*
beg_addr
,
HeapWord
*
end_addr
);
#ifndef PRODUCT
// Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
// Fill the region [start, start + words) with live object(s). Only usable
// for the old and permanent generations.
static
void
fill_with_live_objects
(
SpaceId
id
,
HeapWord
*
const
start
,
size_t
words
);
// Include the new objects in the summary data.
static
void
summarize_new_objects
(
SpaceId
id
,
HeapWord
*
start
);
// Add live objects and/or choose the dense prefix to provoke splitting.
static
void
provoke_split
(
bool
&
maximum_compaction
);
#endif
static
void
summarize_spaces_quick
();
static
void
summarize_space
(
SpaceId
id
,
bool
maximum_compaction
);
static
void
summary_phase
(
ParCompactionManager
*
cm
,
bool
maximum_compaction
);
// The space that is compacted after space_id.
static
SpaceId
next_compaction_space_id
(
SpaceId
space_id
);
// Adjust addresses in roots. Does not adjust addresses in heap.
static
void
adjust_roots
();
...
...
@@ -999,6 +1140,7 @@ class PSParallelCompact : AllStatic {
// Return the address of the word to be copied to dest_addr, which must be
// aligned to a region boundary.
static
HeapWord
*
first_src_addr
(
HeapWord
*
const
dest_addr
,
SpaceId
src_space_id
,
size_t
src_region_idx
);
// Determine the next source region, set closure.source() to the start of the
...
...
@@ -1081,6 +1223,10 @@ class PSParallelCompact : AllStatic {
const
SpaceId
id
,
const
bool
maximum_compaction
,
HeapWord
*
const
addr
);
static
void
summary_phase_msg
(
SpaceId
dst_space_id
,
HeapWord
*
dst_beg
,
HeapWord
*
dst_end
,
SpaceId
src_space_id
,
HeapWord
*
src_beg
,
HeapWord
*
src_end
);
#endif // #ifndef PRODUCT
#ifdef ASSERT
...
...
@@ -1324,31 +1470,28 @@ inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
oop
(
addr
)
->
update_contents
(
compaction_manager
());
}
class
FillClosure
:
public
ParMarkBitMapClosure
{
public:
class
FillClosure
:
public
ParMarkBitMapClosure
{
public:
FillClosure
(
ParCompactionManager
*
cm
,
PSParallelCompact
::
SpaceId
space_id
)
:
ParMarkBitMapClosure
(
PSParallelCompact
::
mark_bitmap
(),
cm
),
_s
pace_id
(
space_id
),
_start_array
(
PSParallelCompact
::
start_array
(
space_id
))
{
assert
(
_
space_id
==
PSParallelCompact
::
perm_space_id
||
_
space_id
==
PSParallelCompact
::
old_space_id
,
_s
tart_array
(
PSParallelCompact
::
start_array
(
space_id
))
{
assert
(
space_id
==
PSParallelCompact
::
perm_space_id
||
space_id
==
PSParallelCompact
::
old_space_id
,
"cannot use FillClosure in the young gen"
);
assert
(
bitmap
()
!=
NULL
,
"need a bitmap"
);
assert
(
_start_array
!=
NULL
,
"need a start array"
);
}
void
fill_region
(
HeapWord
*
addr
,
size_t
size
)
{
MemRegion
region
(
addr
,
size
);
SharedHeap
::
fill_region_with_object
(
region
);
_start_array
->
allocate_block
(
addr
);
}
virtual
IterationStatus
do_addr
(
HeapWord
*
addr
,
size_t
size
)
{
fill_region
(
addr
,
size
);
CollectedHeap
::
fill_with_objects
(
addr
,
size
);
HeapWord
*
const
end
=
addr
+
size
;
do
{
_start_array
->
allocate_block
(
addr
);
addr
+=
oop
(
addr
)
->
size
();
}
while
(
addr
<
end
);
return
ParMarkBitMap
::
incomplete
;
}
private:
const
PSParallelCompact
::
SpaceId
_space_id
;
ObjectStartArray
*
const
_start_array
;
ObjectStartArray
*
const
_start_array
;
};
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
浏览文件 @
0c811a79
...
...
@@ -499,26 +499,15 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
// We lost, someone else "owns" this object
guarantee
(
o
->
is_forwarded
(),
"Object must be forwarded if the cas failed."
);
// Unallocate the space used. NOTE! We may have directly allocated
// the object. If so, we cannot deallocate it, so we have to test!
// Try to deallocate the space. If it was directly allocated we cannot
// deallocate it, so we have to test. If the deallocation fails,
// overwrite with a filler object.
if
(
new_obj_is_tenured
)
{
if
(
!
_old_lab
.
unallocate_object
(
new_obj
))
{
// The promotion lab failed to unallocate the object.
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion
mr
((
HeapWord
*
)
new_obj
,
new_obj_size
);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap
::
fill_region_with_object
(
mr
);
}
}
else
{
if
(
!
_young_lab
.
unallocate_object
(
new_obj
))
{
// The promotion lab failed to unallocate the object.
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion
mr
((
HeapWord
*
)
new_obj
,
new_obj_size
);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap
::
fill_region_with_object
(
mr
);
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
}
else
if
(
!
_young_lab
.
unallocate_object
(
new_obj
))
{
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
// don't update this before the unallocation!
...
...
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
浏览文件 @
0c811a79
...
...
@@ -76,8 +76,8 @@ void MutableNUMASpace::ensure_parsability() {
MutableSpace
*
s
=
ls
->
space
();
if
(
s
->
top
()
<
top
())
{
// For all spaces preceeding the one containing top()
if
(
s
->
free_in_words
()
>
0
)
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
s
->
top
(),
s
->
end
()));
size_t
area_touched_words
=
pointer_delta
(
s
->
end
(),
s
->
top
());
CollectedHeap
::
fill_with_object
(
s
->
top
(),
area_touched_words
);
#ifndef ASSERT
if
(
!
ZapUnusedHeapArea
)
{
area_touched_words
=
MIN2
((
size_t
)
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
)),
...
...
@@ -686,11 +686,11 @@ void MutableNUMASpace::set_top(HeapWord* value) {
// a minimal object; assuming that's not the last chunk in which case we don't care.
if
(
i
<
lgrp_spaces
()
->
length
()
-
1
)
{
size_t
remainder
=
pointer_delta
(
s
->
end
(),
value
);
const
size_t
min
imal_object_size
=
oopDesc
::
header
_size
();
if
(
remainder
<
min
imal_object
_size
&&
remainder
>
0
)
{
// Add a
filler object of a minimal size,
it will cross the chunk boundary.
SharedHeap
::
fill_region_with_object
(
MemRegion
(
value
,
minimal_object_size
)
);
value
+=
min
imal_object
_size
;
const
size_t
min
_fill_size
=
CollectedHeap
::
min_fill
_size
();
if
(
remainder
<
min
_fill
_size
&&
remainder
>
0
)
{
// Add a
minimum size filler object;
it will cross the chunk boundary.
CollectedHeap
::
fill_with_object
(
value
,
min_fill_size
);
value
+=
min
_fill
_size
;
assert
(
!
s
->
contains
(
value
),
"Should be in the next chunk"
);
// Restart the loop from the same chunk, since the value has moved
// to the next one.
...
...
src/share/vm/gc_interface/collectedHeap.cpp
浏览文件 @
0c811a79
...
...
@@ -30,12 +30,21 @@
int
CollectedHeap
::
_fire_out_of_memory_count
=
0
;
#endif
size_t
CollectedHeap
::
_filler_array_max_size
=
0
;
// Memory state functions.
CollectedHeap
::
CollectedHeap
()
:
_reserved
(),
_barrier_set
(
NULL
),
_is_gc_active
(
false
),
_total_collections
(
0
),
_total_full_collections
(
0
),
_gc_cause
(
GCCause
::
_no_gc
),
_gc_lastcause
(
GCCause
::
_no_gc
)
{
CollectedHeap
::
CollectedHeap
()
{
const
size_t
max_len
=
size_t
(
arrayOopDesc
::
max_array_length
(
T_INT
));
const
size_t
elements_per_word
=
HeapWordSize
/
sizeof
(
jint
);
_filler_array_max_size
=
align_object_size
(
filler_array_hdr_size
()
+
max_len
*
elements_per_word
);
_barrier_set
=
NULL
;
_is_gc_active
=
false
;
_total_collections
=
_total_full_collections
=
0
;
_gc_cause
=
_gc_lastcause
=
GCCause
::
_no_gc
;
NOT_PRODUCT
(
_promotion_failure_alot_count
=
0
;)
NOT_PRODUCT
(
_promotion_failure_alot_gc_number
=
0
;)
...
...
@@ -128,6 +137,95 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
return
obj
;
}
size_t
CollectedHeap
::
filler_array_hdr_size
()
{
return
size_t
(
arrayOopDesc
::
header_size
(
T_INT
));
}
size_t
CollectedHeap
::
filler_array_min_size
()
{
return
align_object_size
(
filler_array_hdr_size
());
}
size_t
CollectedHeap
::
filler_array_max_size
()
{
return
_filler_array_max_size
;
}
#ifdef ASSERT
void
CollectedHeap
::
fill_args_check
(
HeapWord
*
start
,
size_t
words
)
{
assert
(
words
>=
min_fill_size
(),
"too small to fill"
);
assert
(
words
%
MinObjAlignment
==
0
,
"unaligned size"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
start
),
"not in heap"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
start
+
words
-
1
),
"not in heap"
);
}
void
CollectedHeap
::
zap_filler_array
(
HeapWord
*
start
,
size_t
words
)
{
if
(
ZapFillerObjects
)
{
Copy
::
fill_to_words
(
start
+
filler_array_hdr_size
(),
words
-
filler_array_hdr_size
(),
0XDEAFBABE
);
}
}
#endif // ASSERT
void
CollectedHeap
::
fill_with_array
(
HeapWord
*
start
,
size_t
words
)
{
assert
(
words
>=
filler_array_min_size
(),
"too small for an array"
);
assert
(
words
<=
filler_array_max_size
(),
"too big for a single object"
);
const
size_t
payload_size
=
words
-
filler_array_hdr_size
();
const
size_t
len
=
payload_size
*
HeapWordSize
/
sizeof
(
jint
);
// Set the length first for concurrent GC.
((
arrayOop
)
start
)
->
set_length
((
int
)
len
);
post_allocation_setup_common
(
Universe
::
fillerArrayKlassObj
(),
start
,
words
);
DEBUG_ONLY
(
zap_filler_array
(
start
,
words
);)
}
void
CollectedHeap
::
fill_with_object_impl
(
HeapWord
*
start
,
size_t
words
)
{
assert
(
words
<=
filler_array_max_size
(),
"too big for a single object"
);
if
(
words
>=
filler_array_min_size
())
{
fill_with_array
(
start
,
words
);
}
else
if
(
words
>
0
)
{
assert
(
words
==
min_fill_size
(),
"unaligned size"
);
post_allocation_setup_common
(
SystemDictionary
::
object_klass
(),
start
,
words
);
}
}
void
CollectedHeap
::
fill_with_object
(
HeapWord
*
start
,
size_t
words
)
{
DEBUG_ONLY
(
fill_args_check
(
start
,
words
);)
HandleMark
hm
;
// Free handles before leaving.
fill_with_object_impl
(
start
,
words
);
}
void
CollectedHeap
::
fill_with_objects
(
HeapWord
*
start
,
size_t
words
)
{
DEBUG_ONLY
(
fill_args_check
(
start
,
words
);)
HandleMark
hm
;
// Free handles before leaving.
#ifdef LP64
// A single array can fill ~8G, so multiple objects are needed only in 64-bit.
// First fill with arrays, ensuring that any remaining space is big enough to
// fill. The remainder is filled with a single object.
const
size_t
min
=
min_fill_size
();
const
size_t
max
=
filler_array_max_size
();
while
(
words
>
max
)
{
const
size_t
cur
=
words
-
max
>=
min
?
max
:
max
-
min
;
fill_with_array
(
start
,
cur
);
start
+=
cur
;
words
-=
cur
;
}
#endif
fill_with_object_impl
(
start
,
words
);
}
oop
CollectedHeap
::
new_store_barrier
(
oop
new_obj
)
{
// %%% This needs refactoring. (It was imported from the server compiler.)
guarantee
(
can_elide_tlab_store_barriers
(),
"store barrier elision not supported"
);
...
...
src/share/vm/gc_interface/collectedHeap.hpp
浏览文件 @
0c811a79
...
...
@@ -47,6 +47,9 @@ class CollectedHeap : public CHeapObj {
static
int
_fire_out_of_memory_count
;
#endif
// Used for filler objects (static, but initialized in ctor).
static
size_t
_filler_array_max_size
;
protected:
MemRegion
_reserved
;
BarrierSet
*
_barrier_set
;
...
...
@@ -119,6 +122,21 @@ class CollectedHeap : public CHeapObj {
// Clears an allocated object.
inline
static
void
init_obj
(
HeapWord
*
obj
,
size_t
size
);
// Filler object utilities.
static
inline
size_t
filler_array_hdr_size
();
static
inline
size_t
filler_array_min_size
();
static
inline
size_t
filler_array_max_size
();
DEBUG_ONLY
(
static
void
fill_args_check
(
HeapWord
*
start
,
size_t
words
);)
DEBUG_ONLY
(
static
void
zap_filler_array
(
HeapWord
*
start
,
size_t
words
);)
// Fill with a single array; caller must ensure filler_array_min_size() <=
// words <= filler_array_max_size().
static
inline
void
fill_with_array
(
HeapWord
*
start
,
size_t
words
);
// Fill with a single object (either an int array or a java.lang.Object).
static
inline
void
fill_with_object_impl
(
HeapWord
*
start
,
size_t
words
);
// Verification functions
virtual
void
check_for_bad_heap_word_value
(
HeapWord
*
addr
,
size_t
size
)
PRODUCT_RETURN
;
...
...
@@ -294,6 +312,27 @@ class CollectedHeap : public CHeapObj {
// The boundary between a "large" and "small" array of primitives, in words.
virtual
size_t
large_typearray_limit
()
=
0
;
// Utilities for turning raw memory into filler objects.
//
// min_fill_size() is the smallest region that can be filled.
// fill_with_objects() can fill arbitrary-sized regions of the heap using
// multiple objects. fill_with_object() is for regions known to be smaller
// than the largest array of integers; it uses a single object to fill the
// region and has slightly less overhead.
static
size_t
min_fill_size
()
{
return
size_t
(
align_object_size
(
oopDesc
::
header_size
()));
}
static
void
fill_with_objects
(
HeapWord
*
start
,
size_t
words
);
static
void
fill_with_object
(
HeapWord
*
start
,
size_t
words
);
static
void
fill_with_object
(
MemRegion
region
)
{
fill_with_object
(
region
.
start
(),
region
.
word_size
());
}
static
void
fill_with_object
(
HeapWord
*
start
,
HeapWord
*
end
)
{
fill_with_object
(
start
,
pointer_delta
(
end
,
start
));
}
// Some heaps may offer a contiguous region for shared non-blocking
// allocation, via inlined code (by exporting the address of the top and
// end fields defining the extent of the contiguous allocation region.)
...
...
src/share/vm/gc_interface/collectedHeap.inline.hpp
浏览文件 @
0c811a79
...
...
@@ -34,7 +34,6 @@ void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
void
CollectedHeap
::
post_allocation_setup_no_klass_install
(
KlassHandle
klass
,
HeapWord
*
objPtr
,
size_t
size
)
{
oop
obj
=
(
oop
)
objPtr
;
assert
(
obj
!=
NULL
,
"NULL object pointer"
);
...
...
@@ -44,9 +43,6 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
// May be bootstrapping
obj
->
set_mark
(
markOopDesc
::
prototype
());
}
// support low memory notifications (no-op if not enabled)
LowMemoryDetector
::
detect_low_memory_for_collected_pools
();
}
void
CollectedHeap
::
post_allocation_install_obj_klass
(
KlassHandle
klass
,
...
...
@@ -65,6 +61,9 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
// Support for jvmti and dtrace
inline
void
post_allocation_notify
(
KlassHandle
klass
,
oop
obj
)
{
// support low memory notifications (no-op if not enabled)
LowMemoryDetector
::
detect_low_memory_for_collected_pools
();
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport
::
vm_object_alloc_event_collector
(
obj
);
...
...
src/share/vm/includeDB_gc
浏览文件 @
0c811a79
...
...
@@ -28,21 +28,22 @@ collectedHeap.cpp collectedHeap.hpp
collectedHeap.cpp collectedHeap.inline.hpp
collectedHeap.cpp init.hpp
collectedHeap.cpp oop.inline.hpp
collectedHeap.cpp systemDictionary.hpp
collectedHeap.cpp thread_<os_family>.inline.hpp
collectedHeap.hpp allocation.hpp
collectedHeap.hpp barrierSet.hpp
collectedHeap.hpp gcCause.hpp
collectedHeap.hpp handles.hpp
collectedHeap.hpp
perfData.hpp
collectedHeap.hpp
perfData.hpp
collectedHeap.hpp safepoint.hpp
collectedHeap.inline.hpp arrayOop.hpp
collectedHeap.inline.hpp collectedHeap.hpp
collectedHeap.inline.hpp copy.hpp
collectedHeap.inline.hpp jvmtiExport.hpp
collectedHeap.inline.hpp
lowMemoryDetector.hpp
collectedHeap.inline.hpp
sharedRuntime.hpp
collectedHeap.inline.hpp
lowMemoryDetector.hpp
collectedHeap.inline.hpp
sharedRuntime.hpp
collectedHeap.inline.hpp thread.hpp
collectedHeap.inline.hpp threadLocalAllocBuffer.inline.hpp
collectedHeap.inline.hpp universe.hpp
...
...
src/share/vm/memory/permGen.cpp
浏览文件 @
0c811a79
...
...
@@ -26,20 +26,24 @@
#include "incls/_permGen.cpp.incl"
HeapWord
*
PermGen
::
mem_allocate_in_gen
(
size_t
size
,
Generation
*
gen
)
{
MutexLocker
ml
(
Heap_lock
);
GCCause
::
Cause
next_cause
=
GCCause
::
_permanent_generation_full
;
GCCause
::
Cause
prev_cause
=
GCCause
::
_no_gc
;
unsigned
int
gc_count_before
,
full_gc_count_before
;
HeapWord
*
obj
;
for
(;;)
{
HeapWord
*
obj
=
gen
->
allocate
(
size
,
false
);
if
(
obj
!=
NULL
)
{
return
obj
;
}
if
(
gen
->
capacity
()
<
_capacity_expansion_limit
||
prev_cause
!=
GCCause
::
_no_gc
)
{
obj
=
gen
->
expand_and_allocate
(
size
,
false
);
}
if
(
obj
==
NULL
&&
prev_cause
!=
GCCause
::
_last_ditch_collection
)
{
{
MutexLocker
ml
(
Heap_lock
);
if
((
obj
=
gen
->
allocate
(
size
,
false
))
!=
NULL
)
{
return
obj
;
}
if
(
gen
->
capacity
()
<
_capacity_expansion_limit
||
prev_cause
!=
GCCause
::
_no_gc
)
{
obj
=
gen
->
expand_and_allocate
(
size
,
false
);
}
if
(
obj
!=
NULL
||
prev_cause
==
GCCause
::
_last_ditch_collection
)
{
return
obj
;
}
if
(
GC_locker
::
is_active_and_needs_gc
())
{
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
...
...
@@ -61,31 +65,27 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
return
NULL
;
}
}
// Read the GC count while holding the Heap_lock
unsigned
int
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
unsigned
int
full_gc_count_before
=
SharedHeap
::
heap
()
->
total_full_collections
();
{
MutexUnlocker
mu
(
Heap_lock
);
// give up heap lock, execute gets it back
VM_GenCollectForPermanentAllocation
op
(
size
,
gc_count_before
,
full_gc_count_before
,
next_cause
);
VMThread
::
execute
(
&
op
);
if
(
!
op
.
prologue_succeeded
()
||
op
.
gc_locked
())
{
assert
(
op
.
result
()
==
NULL
,
"must be NULL if gc_locked() is true"
);
continue
;
// retry and/or stall as necessary
}
obj
=
op
.
result
();
assert
(
obj
==
NULL
||
SharedHeap
::
heap
()
->
is_in_reserved
(
obj
),
"result not in heap"
);
if
(
obj
!=
NULL
)
{
return
obj
;
}
}
prev_cause
=
next_cause
;
next_cause
=
GCCause
::
_last_ditch_collection
;
}
else
{
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
full_gc_count_before
=
SharedHeap
::
heap
()
->
total_full_collections
();
}
// Give up heap lock above, VMThread::execute below gets it back
VM_GenCollectForPermanentAllocation
op
(
size
,
gc_count_before
,
full_gc_count_before
,
next_cause
);
VMThread
::
execute
(
&
op
);
if
(
!
op
.
prologue_succeeded
()
||
op
.
gc_locked
())
{
assert
(
op
.
result
()
==
NULL
,
"must be NULL if gc_locked() is true"
);
continue
;
// retry and/or stall as necessary
}
obj
=
op
.
result
();
assert
(
obj
==
NULL
||
SharedHeap
::
heap
()
->
is_in_reserved
(
obj
),
"result not in heap"
);
if
(
obj
!=
NULL
)
{
return
obj
;
}
prev_cause
=
next_cause
;
next_cause
=
GCCause
::
_last_ditch_collection
;
}
}
...
...
src/share/vm/memory/sharedHeap.cpp
浏览文件 @
0c811a79
...
...
@@ -248,46 +248,6 @@ void SharedHeap::ref_processing_init() {
perm_gen
()
->
ref_processor_init
();
}
void
SharedHeap
::
fill_region_with_object
(
MemRegion
mr
)
{
// Disable the posting of JVMTI VMObjectAlloc events as we
// don't want the filling of tlabs with filler arrays to be
// reported to the profiler.
NoJvmtiVMObjectAllocMark
njm
;
// Disable low memory detector because there is no real allocation.
LowMemoryDetectorDisabler
lmd_dis
;
// It turns out that post_allocation_setup_array takes a handle, so the
// call below contains an implicit conversion. Best to free that handle
// as soon as possible.
HandleMark
hm
;
size_t
word_size
=
mr
.
word_size
();
size_t
aligned_array_header_size
=
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
));
if
(
word_size
>=
aligned_array_header_size
)
{
const
size_t
array_length
=
pointer_delta
(
mr
.
end
(),
mr
.
start
())
-
typeArrayOopDesc
::
header_size
(
T_INT
);
const
size_t
array_length_words
=
array_length
*
(
HeapWordSize
/
sizeof
(
jint
));
post_allocation_setup_array
(
Universe
::
intArrayKlassObj
(),
mr
.
start
(),
mr
.
word_size
(),
(
int
)
array_length_words
);
#ifdef ASSERT
HeapWord
*
elt_words
=
(
mr
.
start
()
+
typeArrayOopDesc
::
header_size
(
T_INT
));
Copy
::
fill_to_words
(
elt_words
,
array_length
,
0xDEAFBABE
);
#endif
}
else
{
assert
(
word_size
==
(
size_t
)
oopDesc
::
header_size
(),
"Unaligned?"
);
post_allocation_setup_obj
(
SystemDictionary
::
object_klass
(),
mr
.
start
(),
mr
.
word_size
());
}
}
// Some utilities.
void
SharedHeap
::
print_size_transition
(
outputStream
*
out
,
size_t
bytes_before
,
...
...
src/share/vm/memory/sharedHeap.hpp
浏览文件 @
0c811a79
...
...
@@ -108,14 +108,6 @@ public:
void
set_perm
(
PermGen
*
perm_gen
)
{
_perm_gen
=
perm_gen
;
}
// A helper function that fills a region of the heap with
// with a single object.
static
void
fill_region_with_object
(
MemRegion
mr
);
// Minimum garbage fill object size
static
size_t
min_fill_size
()
{
return
(
size_t
)
align_object_size
(
oopDesc
::
header_size
());
}
static
size_t
min_fill_size_in_bytes
()
{
return
min_fill_size
()
*
HeapWordSize
;
}
// This function returns the "GenRemSet" object that allows us to scan
// generations; at least the perm gen, possibly more in a fully
// generational heap.
...
...
src/share/vm/memory/space.cpp
浏览文件 @
0c811a79
...
...
@@ -409,19 +409,9 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord
*
q
,
size_t
deadlength
)
{
if
(
allowed_deadspace_words
>=
deadlength
)
{
allowed_deadspace_words
-=
deadlength
;
oop
(
q
)
->
set_mark
(
markOopDesc
::
prototype
()
->
set_marked
());
const
size_t
min_int_array_size
=
typeArrayOopDesc
::
header_size
(
T_INT
);
if
(
deadlength
>=
min_int_array_size
)
{
oop
(
q
)
->
set_klass
(
Universe
::
intArrayKlassObj
());
typeArrayOop
(
q
)
->
set_length
((
int
)((
deadlength
-
min_int_array_size
)
*
(
HeapWordSize
/
sizeof
(
jint
))));
}
else
{
assert
((
int
)
deadlength
==
instanceOopDesc
::
header_size
(),
"size for smallest fake dead object doesn't match"
);
oop
(
q
)
->
set_klass
(
SystemDictionary
::
object_klass
());
}
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"make sure size for fake dead object match"
);
CollectedHeap
::
fill_with_object
(
q
,
deadlength
);
oop
(
q
)
->
set_mark
(
oop
(
q
)
->
mark
()
->
set_marked
());
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"bad filler object size"
);
// Recall that we required "q == compaction_top".
return
true
;
}
else
{
...
...
src/share/vm/memory/tenuredGeneration.cpp
浏览文件 @
0c811a79
...
...
@@ -387,7 +387,7 @@ void TenuredGeneration::par_promote_alloc_undo(int thread_num,
"should contain whole object"
);
buf
->
undo_allocation
(
obj
,
word_sz
);
}
else
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
obj
,
word_sz
)
);
CollectedHeap
::
fill_with_object
(
obj
,
word_sz
);
}
}
...
...
src/share/vm/memory/threadLocalAllocBuffer.cpp
浏览文件 @
0c811a79
...
...
@@ -100,8 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() {
void
ThreadLocalAllocBuffer
::
make_parsable
(
bool
retire
)
{
if
(
end
()
!=
NULL
)
{
invariants
();
MemRegion
mr
(
top
(),
hard_end
());
SharedHeap
::
fill_region_with_object
(
mr
);
CollectedHeap
::
fill_with_object
(
top
(),
hard_end
());
if
(
retire
||
ZeroTLAB
)
{
// "Reset" the TLAB
set_start
(
NULL
);
...
...
src/share/vm/memory/universe.cpp
浏览文件 @
0c811a79
...
...
@@ -49,16 +49,17 @@ klassOop Universe::_constantPoolKlassObj = NULL;
klassOop
Universe
::
_constantPoolCacheKlassObj
=
NULL
;
klassOop
Universe
::
_compiledICHolderKlassObj
=
NULL
;
klassOop
Universe
::
_systemObjArrayKlassObj
=
NULL
;
oop
Universe
::
_int_mirror
=
NULL
;
oop
Universe
::
_float_mirror
=
NULL
;
oop
Universe
::
_double_mirror
=
NULL
;
oop
Universe
::
_byte_mirror
=
NULL
;
oop
Universe
::
_bool_mirror
=
NULL
;
oop
Universe
::
_char_mirror
=
NULL
;
oop
Universe
::
_long_mirror
=
NULL
;
oop
Universe
::
_short_mirror
=
NULL
;
oop
Universe
::
_void_mirror
=
NULL
;
oop
Universe
::
_mirrors
[
T_VOID
+
1
]
=
{
NULL
/*, NULL...*/
};
klassOop
Universe
::
_fillerArrayKlassObj
=
NULL
;
oop
Universe
::
_int_mirror
=
NULL
;
oop
Universe
::
_float_mirror
=
NULL
;
oop
Universe
::
_double_mirror
=
NULL
;
oop
Universe
::
_byte_mirror
=
NULL
;
oop
Universe
::
_bool_mirror
=
NULL
;
oop
Universe
::
_char_mirror
=
NULL
;
oop
Universe
::
_long_mirror
=
NULL
;
oop
Universe
::
_short_mirror
=
NULL
;
oop
Universe
::
_void_mirror
=
NULL
;
oop
Universe
::
_mirrors
[
T_VOID
+
1
]
=
{
NULL
/*, NULL...*/
};
oop
Universe
::
_main_thread_group
=
NULL
;
oop
Universe
::
_system_thread_group
=
NULL
;
typeArrayOop
Universe
::
_the_empty_byte_array
=
NULL
;
...
...
@@ -126,6 +127,7 @@ void Universe::system_classes_do(void f(klassOop)) {
f
(
instanceKlassKlassObj
());
f
(
constantPoolKlassObj
());
f
(
systemObjArrayKlassObj
());
f
(
fillerArrayKlassObj
());
}
void
Universe
::
oops_do
(
OopClosure
*
f
,
bool
do_all
)
{
...
...
@@ -180,6 +182,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f
->
do_oop
((
oop
*
)
&
_constantPoolCacheKlassObj
);
f
->
do_oop
((
oop
*
)
&
_compiledICHolderKlassObj
);
f
->
do_oop
((
oop
*
)
&
_systemObjArrayKlassObj
);
f
->
do_oop
((
oop
*
)
&
_fillerArrayKlassObj
);
f
->
do_oop
((
oop
*
)
&
_the_empty_byte_array
);
f
->
do_oop
((
oop
*
)
&
_the_empty_short_array
);
f
->
do_oop
((
oop
*
)
&
_the_empty_int_array
);
...
...
@@ -257,16 +260,17 @@ void Universe::genesis(TRAPS) {
_typeArrayKlassObjs
[
T_INT
]
=
_intArrayKlassObj
;
_typeArrayKlassObjs
[
T_LONG
]
=
_longArrayKlassObj
;
_methodKlassObj
=
methodKlass
::
create_klass
(
CHECK
);
_constMethodKlassObj
=
constMethodKlass
::
create_klass
(
CHECK
);
_methodDataKlassObj
=
methodDataKlass
::
create_klass
(
CHECK
);
_methodKlassObj
=
methodKlass
::
create_klass
(
CHECK
);
_constMethodKlassObj
=
constMethodKlass
::
create_klass
(
CHECK
);
_methodDataKlassObj
=
methodDataKlass
::
create_klass
(
CHECK
);
_constantPoolKlassObj
=
constantPoolKlass
::
create_klass
(
CHECK
);
_constantPoolCacheKlassObj
=
constantPoolCacheKlass
::
create_klass
(
CHECK
);
_compiledICHolderKlassObj
=
compiledICHolderKlass
::
create_klass
(
CHECK
);
_systemObjArrayKlassObj
=
objArrayKlassKlass
::
cast
(
objArrayKlassKlassObj
())
->
allocate_system_objArray_klass
(
CHECK
);
_fillerArrayKlassObj
=
typeArrayKlass
::
create_klass
(
T_INT
,
sizeof
(
jint
),
"<filler>"
,
CHECK
);
_the_empty_byte_array
=
oopFactory
::
new_permanent_byteArray
(
0
,
CHECK
);
_the_empty_byte_array
=
oopFactory
::
new_permanent_byteArray
(
0
,
CHECK
);
_the_empty_short_array
=
oopFactory
::
new_permanent_shortArray
(
0
,
CHECK
);
_the_empty_int_array
=
oopFactory
::
new_permanent_intArray
(
0
,
CHECK
);
_the_empty_system_obj_array
=
oopFactory
::
new_system_objArray
(
0
,
CHECK
);
...
...
@@ -274,7 +278,6 @@ void Universe::genesis(TRAPS) {
_the_array_interfaces_array
=
oopFactory
::
new_system_objArray
(
2
,
CHECK
);
_vm_exception
=
oopFactory
::
new_symbol
(
"vm exception holder"
,
CHECK
);
}
else
{
FileMapInfo
*
mapinfo
=
FileMapInfo
::
current_info
();
char
*
buffer
=
mapinfo
->
region_base
(
CompactingPermGenGen
::
md
);
void
**
vtbl_list
=
(
void
**
)
buffer
;
...
...
src/share/vm/memory/universe.hpp
浏览文件 @
0c811a79
...
...
@@ -92,6 +92,7 @@ class LatestMethodOopCache : public CommonMethodOopCache {
class
Universe
:
AllStatic
{
// Ugh. Universe is much too friendly.
friend
class
MarkSweep
;
friend
class
oopDesc
;
friend
class
ClassLoader
;
...
...
@@ -132,6 +133,7 @@ class Universe: AllStatic {
static
klassOop
_constantPoolCacheKlassObj
;
static
klassOop
_compiledICHolderKlassObj
;
static
klassOop
_systemObjArrayKlassObj
;
static
klassOop
_fillerArrayKlassObj
;
// Known objects in the VM
...
...
@@ -264,6 +266,7 @@ class Universe: AllStatic {
static
klassOop
constantPoolCacheKlassObj
()
{
return
_constantPoolCacheKlassObj
;
}
static
klassOop
compiledICHolderKlassObj
()
{
return
_compiledICHolderKlassObj
;
}
static
klassOop
systemObjArrayKlassObj
()
{
return
_systemObjArrayKlassObj
;
}
static
klassOop
fillerArrayKlassObj
()
{
return
_fillerArrayKlassObj
;
}
// Known objects in tbe VM
static
oop
int_mirror
()
{
return
check_mirror
(
_int_mirror
);
...
...
src/share/vm/oops/arrayOop.hpp
浏览文件 @
0c811a79
...
...
@@ -96,19 +96,20 @@ class arrayOopDesc : public oopDesc {
:
typesize_in_bytes
/
HeapWordSize
);
}
// This method returns the maximum length that can passed into
// typeArrayOop::object_size(scale, length, header_size) without causing an
// overflow. We substract an extra 2*wordSize to guard against double word
// alignments. It gets the scale from the type2aelembytes array.
// Return the maximum length of an array of BasicType. The length can passed
// to typeArrayOop::object_size(scale, length, header_size) without causing an
// overflow.
static
int32_t
max_array_length
(
BasicType
type
)
{
assert
(
type
>=
0
&&
type
<
T_CONFLICT
,
"wrong type"
);
assert
(
type2aelembytes
(
type
)
!=
0
,
"wrong type"
);
// We use max_jint, since object_size is internally represented by an 'int'
// This gives us an upper bound of max_jint words for the size of the oop.
int32_t
max_words
=
(
max_jint
-
header_size
(
type
)
-
2
);
int
elembytes
=
type2aelembytes
(
type
);
jlong
len
=
((
jlong
)
max_words
*
HeapWordSize
)
/
elembytes
;
return
(
len
>
max_jint
)
?
max_jint
:
(
int32_t
)
len
;
}
const
int
bytes_per_element
=
type2aelembytes
(
type
);
if
(
bytes_per_element
<
HeapWordSize
)
{
return
max_jint
;
}
const
int32_t
max_words
=
align_size_down
(
max_jint
,
MinObjAlignment
);
const
int32_t
max_element_words
=
max_words
-
header_size
(
type
);
const
int32_t
words_per_element
=
bytes_per_element
>>
LogHeapWordSize
;
return
max_element_words
/
words_per_element
;
}
};
src/share/vm/oops/typeArrayKlass.cpp
浏览文件 @
0c811a79
...
...
@@ -36,13 +36,14 @@ bool typeArrayKlass::compute_is_subtype_of(klassOop k) {
return
element_type
()
==
tak
->
element_type
();
}
klassOop
typeArrayKlass
::
create_klass
(
BasicType
type
,
int
scale
,
TRAPS
)
{
klassOop
typeArrayKlass
::
create_klass
(
BasicType
type
,
int
scale
,
const
char
*
name_str
,
TRAPS
)
{
typeArrayKlass
o
;
symbolHandle
sym
(
symbolOop
(
NULL
));
// bootstrapping: don't create sym if symbolKlass not created yet
if
(
Universe
::
symbolKlassObj
()
!=
NULL
)
{
sym
=
oopFactory
::
new_symbol_handle
(
external_name
(
type
)
,
CHECK_NULL
);
if
(
Universe
::
symbolKlassObj
()
!=
NULL
&&
name_str
!=
NULL
)
{
sym
=
oopFactory
::
new_symbol_handle
(
name_str
,
CHECK_NULL
);
}
KlassHandle
klassklass
(
THREAD
,
Universe
::
typeArrayKlassKlassObj
());
...
...
src/share/vm/oops/typeArrayKlass.hpp
浏览文件 @
0c811a79
...
...
@@ -39,7 +39,11 @@ class typeArrayKlass : public arrayKlass {
// klass allocation
DEFINE_ALLOCATE_PERMANENT
(
typeArrayKlass
);
static
klassOop
create_klass
(
BasicType
type
,
int
scale
,
TRAPS
);
static
klassOop
create_klass
(
BasicType
type
,
int
scale
,
const
char
*
name_str
,
TRAPS
);
static
inline
klassOop
create_klass
(
BasicType
type
,
int
scale
,
TRAPS
)
{
return
create_klass
(
type
,
scale
,
external_name
(
type
),
CHECK_NULL
);
}
int
oop_size
(
oop
obj
)
const
;
int
klass_oop_size
()
const
{
return
object_size
();
}
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
0c811a79
...
...
@@ -1517,6 +1517,16 @@ bool Arguments::check_vm_args_consistency() {
MarkSweepAlwaysCompactCount
=
1
;
// Move objects every gc.
}
if
(
UseParallelOldGC
&&
ParallelOldGCSplitALot
)
{
// Settings to encourage splitting.
if
(
!
FLAG_IS_CMDLINE
(
NewRatio
))
{
FLAG_SET_CMDLINE
(
intx
,
NewRatio
,
2
);
}
if
(
!
FLAG_IS_CMDLINE
(
ScavengeBeforeFullGC
))
{
FLAG_SET_CMDLINE
(
bool
,
ScavengeBeforeFullGC
,
false
);
}
}
status
=
status
&&
verify_percentage
(
GCHeapFreeLimit
,
"GCHeapFreeLimit"
);
status
=
status
&&
verify_percentage
(
GCTimeLimit
,
"GCTimeLimit"
);
if
(
GCTimeLimit
==
100
)
{
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
0c811a79
...
...
@@ -625,6 +625,9 @@ class CommandLineFlags {
develop(bool, CheckZapUnusedHeapArea, false, \
"Check zapping of unused heap space") \
\
develop(bool, ZapFillerObjects, trueInDebug, \
"Zap filler objects with 0xDEAFBABE") \
\
develop(bool, PrintVMMessages, true, \
"Print vm messages on console") \
\
...
...
@@ -1200,11 +1203,12 @@ class CommandLineFlags {
product(uintx, ParallelCMSThreads, 0, \
"Max number of threads CMS will use for concurrent work") \
\
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
"Use the Parallel Old MT unsafe in marking the bitmap") \
develop(bool, ParallelOldGCSplitALot, false, \
"Provoke splitting (copying data from a young gen space to" \
"multiple destination spaces)") \
\
develop(
bool, ParallelOldMTUnsafeUpdateLiveData, false,
\
"
Use the Parallel Old MT unsafe in update of live size")
\
develop(
uintx, ParallelOldGCSplitInterval, 3,
\
"
How often to provoke splitting a young gen space")
\
\
develop(bool, TraceRegionTasksQueuing, false, \
"Trace the queuing of the region tasks") \
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录