Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
0c811a79
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0c811a79
编写于
12月 12, 2008
作者:
J
jmasa
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
7bead513
1a26624c
变更
27
展开全部
显示空白变更内容
内联
并排
Showing
27 changed file
with
1011 addition
and
399 deletion
+1011
-399
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+4
-5
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
+1
-1
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
+7
-9
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
+1
-1
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
...are/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
+10
-13
src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
..._implementation/parallelScavenge/psMarkSweepDecorator.cpp
+3
-16
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+563
-168
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
.../gc_implementation/parallelScavenge/psParallelCompact.hpp
+165
-22
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
...gc_implementation/parallelScavenge/psPromotionManager.cpp
+6
-17
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+6
-6
src/share/vm/gc_interface/collectedHeap.cpp
src/share/vm/gc_interface/collectedHeap.cpp
+102
-4
src/share/vm/gc_interface/collectedHeap.hpp
src/share/vm/gc_interface/collectedHeap.hpp
+39
-0
src/share/vm/gc_interface/collectedHeap.inline.hpp
src/share/vm/gc_interface/collectedHeap.inline.hpp
+3
-4
src/share/vm/includeDB_gc
src/share/vm/includeDB_gc
+4
-3
src/share/vm/memory/permGen.cpp
src/share/vm/memory/permGen.cpp
+32
-32
src/share/vm/memory/sharedHeap.cpp
src/share/vm/memory/sharedHeap.cpp
+0
-40
src/share/vm/memory/sharedHeap.hpp
src/share/vm/memory/sharedHeap.hpp
+0
-8
src/share/vm/memory/space.cpp
src/share/vm/memory/space.cpp
+3
-13
src/share/vm/memory/tenuredGeneration.cpp
src/share/vm/memory/tenuredGeneration.cpp
+1
-1
src/share/vm/memory/threadLocalAllocBuffer.cpp
src/share/vm/memory/threadLocalAllocBuffer.cpp
+1
-2
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+18
-15
src/share/vm/memory/universe.hpp
src/share/vm/memory/universe.hpp
+3
-0
src/share/vm/oops/arrayOop.hpp
src/share/vm/oops/arrayOop.hpp
+12
-11
src/share/vm/oops/typeArrayKlass.cpp
src/share/vm/oops/typeArrayKlass.cpp
+4
-3
src/share/vm/oops/typeArrayKlass.hpp
src/share/vm/oops/typeArrayKlass.hpp
+5
-1
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+10
-0
src/share/vm/runtime/globals.hpp
src/share/vm/runtime/globals.hpp
+8
-4
未找到文件。
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
0c811a79
...
...
@@ -2954,7 +2954,7 @@ public:
// The object has been either evacuated or is dead. Fill it with a
// dummy object.
MemRegion
mr
((
HeapWord
*
)
obj
,
obj
->
size
());
SharedHeap
::
fill_region
_with_object
(
mr
);
CollectedHeap
::
fill
_with_object
(
mr
);
_cm
->
clearRangeBothMaps
(
mr
);
}
}
...
...
@@ -3225,7 +3225,7 @@ void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
// Otherwise, try to claim it.
block
=
r
->
par_allocate
(
free_words
);
}
while
(
block
==
NULL
);
SharedHeap
::
fill_region_with_object
(
MemRegion
(
block
,
free_words
)
);
fill_with_object
(
block
,
free_words
);
}
#define use_local_bitmaps 1
...
...
@@ -3619,9 +3619,8 @@ public:
guarantee
(
alloc_buffer
(
purpose
)
->
contains
(
obj
+
word_sz
-
1
),
"should contain whole object"
);
alloc_buffer
(
purpose
)
->
undo_allocation
(
obj
,
word_sz
);
}
else
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
obj
,
word_sz
));
}
else
{
CollectedHeap
::
fill_with_object
(
obj
,
word_sz
);
add_to_undo_waste
(
word_sz
);
}
}
...
...
src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
浏览文件 @
0c811a79
...
...
@@ -102,7 +102,7 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
HeapWord
*
tmp
=
hr
->
allocate
(
sz
);
assert
(
tmp
!=
NULL
,
"Humongous allocation failure"
);
MemRegion
mr
=
MemRegion
(
tmp
,
sz
);
SharedHeap
::
fill_region
_with_object
(
mr
);
CollectedHeap
::
fill
_with_object
(
mr
);
hr
->
declare_filled_region_to_BOT
(
mr
);
if
(
i
==
first
)
{
first_hr
->
set_startsHumongous
();
...
...
src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
浏览文件 @
0c811a79
...
...
@@ -51,14 +51,14 @@ void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
if
(
_retained
)
{
// If the buffer had been retained shorten the previous filler object.
assert
(
_retained_filler
.
end
()
<=
_top
,
"INVARIANT"
);
SharedHeap
::
fill_region
_with_object
(
_retained_filler
);
CollectedHeap
::
fill
_with_object
(
_retained_filler
);
// Wasted space book-keeping, otherwise (normally) done in invalidate()
_wasted
+=
_retained_filler
.
word_size
();
_retained
=
false
;
}
assert
(
!
end_of_gc
||
!
_retained
,
"At this point, end_of_gc ==> !_retained."
);
if
(
_top
<
_hard_end
)
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
_top
,
_hard_end
)
);
CollectedHeap
::
fill_with_object
(
_top
,
_hard_end
);
if
(
!
retain
)
{
invalidate
();
}
else
{
...
...
@@ -155,7 +155,7 @@ ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
// modifying the _next_threshold state in the BOT.
void
ParGCAllocBufferWithBOT
::
fill_region_with_block
(
MemRegion
mr
,
bool
contig
)
{
SharedHeap
::
fill_region
_with_object
(
mr
);
CollectedHeap
::
fill
_with_object
(
mr
);
if
(
contig
)
{
_bt
.
alloc_block
(
mr
.
start
(),
mr
.
end
());
}
else
{
...
...
@@ -171,7 +171,7 @@ HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
"or else _true_end should be equal to _hard_end"
);
assert
(
_retained
,
"or else _true_end should be equal to _hard_end"
);
assert
(
_retained_filler
.
end
()
<=
_top
,
"INVARIANT"
);
SharedHeap
::
fill_region
_with_object
(
_retained_filler
);
CollectedHeap
::
fill
_with_object
(
_retained_filler
);
if
(
_top
<
_hard_end
)
{
fill_region_with_block
(
MemRegion
(
_top
,
_hard_end
),
true
);
}
...
...
@@ -316,11 +316,9 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
while
(
_top
<=
chunk_boundary
)
{
assert
(
pointer_delta
(
_hard_end
,
chunk_boundary
)
>=
AlignmentReserve
,
"Consequence of last card handling above."
);
MemRegion
chunk_portion
(
chunk_boundary
,
_hard_end
);
_bt
.
BlockOffsetArray
::
alloc_block
(
chunk_portion
.
start
(),
chunk_portion
.
end
());
SharedHeap
::
fill_region_with_object
(
chunk_portion
);
_hard_end
=
chunk_portion
.
start
();
_bt
.
BlockOffsetArray
::
alloc_block
(
chunk_boundary
,
_hard_end
);
CollectedHeap
::
fill_with_object
(
chunk_boundary
,
_hard_end
);
_hard_end
=
chunk_boundary
;
chunk_boundary
-=
ChunkSizeInWords
;
}
_end
=
_hard_end
-
AlignmentReserve
;
...
...
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
浏览文件 @
0c811a79
...
...
@@ -201,7 +201,7 @@ void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
"Should contain whole object."
);
to_space_alloc_buffer
()
->
undo_allocation
(
obj
,
word_sz
);
}
else
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
obj
,
word_sz
)
);
CollectedHeap
::
fill_with_object
(
obj
,
word_sz
);
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp
浏览文件 @
0c811a79
...
...
@@ -389,7 +389,7 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// full GC.
const
size_t
alignment
=
old_gen
->
virtual_space
()
->
alignment
();
const
size_t
eden_used
=
eden_space
->
used_in_bytes
();
const
size_t
promoted
=
(
size_t
)
(
size_policy
->
avg_promoted
()
->
padded_average
()
);
const
size_t
promoted
=
(
size_t
)
size_policy
->
avg_promoted
()
->
padded_average
(
);
const
size_t
absorb_size
=
align_size_up
(
eden_used
+
promoted
,
alignment
);
const
size_t
eden_capacity
=
eden_space
->
capacity_in_bytes
();
...
...
@@ -416,16 +416,14 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Fill the unused part of the old gen.
MutableSpace
*
const
old_space
=
old_gen
->
object_space
();
MemRegion
old_gen_unused
(
old_space
->
top
(),
old_space
->
end
());
HeapWord
*
const
unused_start
=
old_space
->
top
();
size_t
const
unused_words
=
pointer_delta
(
old_space
->
end
(),
unused_start
);
// If the unused part of the old gen cannot be filled, skip
// absorbing eden.
if
(
old_gen_unused
.
word_size
()
<
SharedHeap
::
min_fill_size
())
{
return
false
;
if
(
unused_words
>
0
)
{
if
(
unused_words
<
CollectedHeap
::
min_fill_size
())
{
return
false
;
// If the old gen cannot be filled, must give up.
}
if
(
!
old_gen_unused
.
is_empty
())
{
SharedHeap
::
fill_region_with_object
(
old_gen_unused
);
CollectedHeap
::
fill_with_objects
(
unused_start
,
unused_words
);
}
// Take the live data from eden and set both top and end in the old gen to
...
...
@@ -441,9 +439,8 @@ bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
// Update the object start array for the filler object and the data from eden.
ObjectStartArray
*
const
start_array
=
old_gen
->
start_array
();
HeapWord
*
const
start
=
old_gen_unused
.
start
();
for
(
HeapWord
*
addr
=
start
;
addr
<
new_top
;
addr
+=
oop
(
addr
)
->
size
())
{
start_array
->
allocate_block
(
addr
);
for
(
HeapWord
*
p
=
unused_start
;
p
<
new_top
;
p
+=
oop
(
p
)
->
size
())
{
start_array
->
allocate_block
(
p
);
}
// Could update the promoted average here, but it is not typically updated at
...
...
src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp
浏览文件 @
0c811a79
...
...
@@ -275,22 +275,9 @@ bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord
*
q
,
size_t
deadlength
)
{
if
(
allowed_deadspace_words
>=
deadlength
)
{
allowed_deadspace_words
-=
deadlength
;
oop
(
q
)
->
set_mark
(
markOopDesc
::
prototype
()
->
set_marked
());
const
size_t
aligned_min_int_array_size
=
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
));
if
(
deadlength
>=
aligned_min_int_array_size
)
{
oop
(
q
)
->
set_klass
(
Universe
::
intArrayKlassObj
());
assert
(((
deadlength
-
aligned_min_int_array_size
)
*
(
HeapWordSize
/
sizeof
(
jint
)))
<
(
size_t
)
max_jint
,
"deadspace too big for Arrayoop"
);
typeArrayOop
(
q
)
->
set_length
((
int
)((
deadlength
-
aligned_min_int_array_size
)
*
(
HeapWordSize
/
sizeof
(
jint
))));
}
else
{
assert
((
int
)
deadlength
==
instanceOopDesc
::
header_size
(),
"size for smallest fake dead object doesn't match"
);
oop
(
q
)
->
set_klass
(
SystemDictionary
::
object_klass
());
}
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"make sure size for fake dead object match"
);
CollectedHeap
::
fill_with_object
(
q
,
deadlength
);
oop
(
q
)
->
set_mark
(
oop
(
q
)
->
mark
()
->
set_marked
());
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"bad filler object size"
);
// Recall that we required "q == compaction_top".
return
true
;
}
else
{
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
0c811a79
此差异已折叠。
点击以展开。
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
浏览文件 @
0c811a79
...
...
@@ -36,6 +36,123 @@ class PreGCValues;
class
MoveAndUpdateClosure
;
class
RefProcTaskExecutor
;
// The SplitInfo class holds the information needed to 'split' a source region
// so that the live data can be copied to two destination *spaces*. Normally,
// all the live data in a region is copied to a single destination space (e.g.,
// everything live in a region in eden is copied entirely into the old gen).
// However, when the heap is nearly full, all the live data in eden may not fit
// into the old gen. Copying only some of the regions from eden to old gen
// requires finding a region that does not contain a partial object (i.e., no
// live object crosses the region boundary) somewhere near the last object that
// does fit into the old gen. Since it's not always possible to find such a
// region, splitting is necessary for predictable behavior.
//
// A region is always split at the end of the partial object. This avoids
// additional tests when calculating the new location of a pointer, which is a
// very hot code path. The partial object and everything to its left will be
// copied to another space (call it dest_space_1). The live data to the right
// of the partial object will be copied either within the space itself, or to a
// different destination space (distinct from dest_space_1).
//
// Split points are identified during the summary phase, when region
// destinations are computed: data about the split, including the
// partial_object_size, is recorded in a SplitInfo record and the
// partial_object_size field in the summary data is set to zero. The zeroing is
// possible (and necessary) since the partial object will move to a different
// destination space than anything to its right, thus the partial object should
// not affect the locations of any objects to its right.
//
// The recorded data is used during the compaction phase, but only rarely: when
// the partial object on the split region will be copied across a destination
// region boundary. This test is made once each time a region is filled, and is
// a simple address comparison, so the overhead is negligible (see
// PSParallelCompact::first_src_addr()).
//
// Notes:
//
// Only regions with partial objects are split; a region without a partial
// object does not need any extra bookkeeping.
//
// At most one region is split per space, so the amount of data required is
// constant.
//
// A region is split only when the destination space would overflow. Once that
// happens, the destination space is abandoned and no other data (even from
// other source spaces) is targeted to that destination space. Abandoning the
// destination space may leave a somewhat large unused area at the end, if a
// large object caused the overflow.
//
// Future work:
//
// More bookkeeping would be required to continue to use the destination space.
// The most general solution would allow data from regions in two different
// source spaces to be "joined" in a single destination region. At the very
// least, additional code would be required in next_src_region() to detect the
// join and skip to an out-of-order source region. If the join region was also
// the last destination region to which a split region was copied (the most
// likely case), then additional work would be needed to get fill_region() to
// stop iteration and switch to a new source region at the right point. Basic
// idea would be to use a fake value for the top of the source space. It is
// doable, if a bit tricky.
//
// A simpler (but less general) solution would fill the remainder of the
// destination region with a dummy object and continue filling the next
// destination region.
class
SplitInfo
{
public:
// Return true if this split info is valid (i.e., if a split has been
// recorded). The very first region cannot have a partial object and thus is
// never split, so 0 is the 'invalid' value.
bool
is_valid
()
const
{
return
_src_region_idx
>
0
;
}
// Return true if this split holds data for the specified source region.
inline
bool
is_split
(
size_t
source_region
)
const
;
// The index of the split region, the size of the partial object on that
// region and the destination of the partial object.
size_t
src_region_idx
()
const
{
return
_src_region_idx
;
}
size_t
partial_obj_size
()
const
{
return
_partial_obj_size
;
}
HeapWord
*
destination
()
const
{
return
_destination
;
}
// The destination count of the partial object referenced by this split
// (either 1 or 2). This must be added to the destination count of the
// remainder of the source region.
unsigned
int
destination_count
()
const
{
return
_destination_count
;
}
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of the destination region;
// otherwise this is NULL.
HeapWord
*
dest_region_addr
()
const
{
return
_dest_region_addr
;
}
// If a word within the partial object will be written to the first word of a
// destination region, this is the address of that word within the partial
// object; otherwise this is NULL.
HeapWord
*
first_src_addr
()
const
{
return
_first_src_addr
;
}
// Record the data necessary to split the region src_region_idx.
void
record
(
size_t
src_region_idx
,
size_t
partial_obj_size
,
HeapWord
*
destination
);
void
clear
();
DEBUG_ONLY
(
void
verify_clear
();)
private:
size_t
_src_region_idx
;
size_t
_partial_obj_size
;
HeapWord
*
_destination
;
unsigned
int
_destination_count
;
HeapWord
*
_dest_region_addr
;
HeapWord
*
_first_src_addr
;
};
inline
bool
SplitInfo
::
is_split
(
size_t
region_idx
)
const
{
return
_src_region_idx
==
region_idx
&&
is_valid
();
}
class
SpaceInfo
{
public:
...
...
@@ -58,18 +175,23 @@ class SpaceInfo
// is no start array.
ObjectStartArray
*
start_array
()
const
{
return
_start_array
;
}
SplitInfo
&
split_info
()
{
return
_split_info
;
}
void
set_space
(
MutableSpace
*
s
)
{
_space
=
s
;
}
void
set_new_top
(
HeapWord
*
addr
)
{
_new_top
=
addr
;
}
void
set_min_dense_prefix
(
HeapWord
*
addr
)
{
_min_dense_prefix
=
addr
;
}
void
set_dense_prefix
(
HeapWord
*
addr
)
{
_dense_prefix
=
addr
;
}
void
set_start_array
(
ObjectStartArray
*
s
)
{
_start_array
=
s
;
}
void
publish_new_top
()
const
{
_space
->
set_top
(
_new_top
);
}
private:
MutableSpace
*
_space
;
HeapWord
*
_new_top
;
HeapWord
*
_min_dense_prefix
;
HeapWord
*
_dense_prefix
;
ObjectStartArray
*
_start_array
;
SplitInfo
_split_info
;
};
class
ParallelCompactData
...
...
@@ -230,9 +352,14 @@ public:
// must be region-aligned; end need not be.
void
summarize_dense_prefix
(
HeapWord
*
beg
,
HeapWord
*
end
);
bool
summarize
(
HeapWord
*
target_beg
,
HeapWord
*
target_end
,
HeapWord
*
summarize_split_space
(
size_t
src_region
,
SplitInfo
&
split_info
,
HeapWord
*
destination
,
HeapWord
*
target_end
,
HeapWord
**
target_next
);
bool
summarize
(
SplitInfo
&
split_info
,
HeapWord
*
source_beg
,
HeapWord
*
source_end
,
HeapWord
**
target_next
,
HeapWord
**
source_next
=
0
);
HeapWord
**
source_next
,
HeapWord
*
target_beg
,
HeapWord
*
target_end
,
HeapWord
**
target_next
);
void
clear
();
void
clear_range
(
size_t
beg_region
,
size_t
end_region
);
...
...
@@ -838,13 +965,27 @@ class PSParallelCompact : AllStatic {
// non-empty.
static
void
fill_dense_prefix_end
(
SpaceId
id
);
// Clear the summary data source_region field for the specified addresses.
static
void
clear_source_region
(
HeapWord
*
beg_addr
,
HeapWord
*
end_addr
);
#ifndef PRODUCT
// Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
// Fill the region [start, start + words) with live object(s). Only usable
// for the old and permanent generations.
static
void
fill_with_live_objects
(
SpaceId
id
,
HeapWord
*
const
start
,
size_t
words
);
// Include the new objects in the summary data.
static
void
summarize_new_objects
(
SpaceId
id
,
HeapWord
*
start
);
// Add live objects and/or choose the dense prefix to provoke splitting.
static
void
provoke_split
(
bool
&
maximum_compaction
);
#endif
static
void
summarize_spaces_quick
();
static
void
summarize_space
(
SpaceId
id
,
bool
maximum_compaction
);
static
void
summary_phase
(
ParCompactionManager
*
cm
,
bool
maximum_compaction
);
// The space that is compacted after space_id.
static
SpaceId
next_compaction_space_id
(
SpaceId
space_id
);
// Adjust addresses in roots. Does not adjust addresses in heap.
static
void
adjust_roots
();
...
...
@@ -999,6 +1140,7 @@ class PSParallelCompact : AllStatic {
// Return the address of the word to be copied to dest_addr, which must be
// aligned to a region boundary.
static
HeapWord
*
first_src_addr
(
HeapWord
*
const
dest_addr
,
SpaceId
src_space_id
,
size_t
src_region_idx
);
// Determine the next source region, set closure.source() to the start of the
...
...
@@ -1081,6 +1223,10 @@ class PSParallelCompact : AllStatic {
const
SpaceId
id
,
const
bool
maximum_compaction
,
HeapWord
*
const
addr
);
static
void
summary_phase_msg
(
SpaceId
dst_space_id
,
HeapWord
*
dst_beg
,
HeapWord
*
dst_end
,
SpaceId
src_space_id
,
HeapWord
*
src_beg
,
HeapWord
*
src_end
);
#endif // #ifndef PRODUCT
#ifdef ASSERT
...
...
@@ -1324,31 +1470,28 @@ inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
oop
(
addr
)
->
update_contents
(
compaction_manager
());
}
class
FillClosure
:
public
ParMarkBitMapClosure
{
public:
class
FillClosure
:
public
ParMarkBitMapClosure
{
public:
FillClosure
(
ParCompactionManager
*
cm
,
PSParallelCompact
::
SpaceId
space_id
)
:
ParMarkBitMapClosure
(
PSParallelCompact
::
mark_bitmap
(),
cm
),
_s
pace_id
(
space_id
),
_start_array
(
PSParallelCompact
::
start_array
(
space_id
))
{
assert
(
_
space_id
==
PSParallelCompact
::
perm_space_id
||
_
space_id
==
PSParallelCompact
::
old_space_id
,
_s
tart_array
(
PSParallelCompact
::
start_array
(
space_id
))
{
assert
(
space_id
==
PSParallelCompact
::
perm_space_id
||
space_id
==
PSParallelCompact
::
old_space_id
,
"cannot use FillClosure in the young gen"
);
assert
(
bitmap
()
!=
NULL
,
"need a bitmap"
);
assert
(
_start_array
!=
NULL
,
"need a start array"
);
}
void
fill_region
(
HeapWord
*
addr
,
size_t
size
)
{
MemRegion
region
(
addr
,
size
);
SharedHeap
::
fill_region_with_object
(
region
);
_start_array
->
allocate_block
(
addr
);
}
virtual
IterationStatus
do_addr
(
HeapWord
*
addr
,
size_t
size
)
{
fill_region
(
addr
,
size
);
CollectedHeap
::
fill_with_objects
(
addr
,
size
);
HeapWord
*
const
end
=
addr
+
size
;
do
{
_start_array
->
allocate_block
(
addr
);
addr
+=
oop
(
addr
)
->
size
();
}
while
(
addr
<
end
);
return
ParMarkBitMap
::
incomplete
;
}
private:
const
PSParallelCompact
::
SpaceId
_space_id
;
ObjectStartArray
*
const
_start_array
;
};
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
浏览文件 @
0c811a79
...
...
@@ -499,26 +499,15 @@ oop PSPromotionManager::copy_to_survivor_space(oop o, bool depth_first) {
// We lost, someone else "owns" this object
guarantee
(
o
->
is_forwarded
(),
"Object must be forwarded if the cas failed."
);
// Unallocate the space used. NOTE! We may have directly allocated
// the object. If so, we cannot deallocate it, so we have to test!
// Try to deallocate the space. If it was directly allocated we cannot
// deallocate it, so we have to test. If the deallocation fails,
// overwrite with a filler object.
if
(
new_obj_is_tenured
)
{
if
(
!
_old_lab
.
unallocate_object
(
new_obj
))
{
// The promotion lab failed to unallocate the object.
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion
mr
((
HeapWord
*
)
new_obj
,
new_obj_size
);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap
::
fill_region_with_object
(
mr
);
}
}
else
{
if
(
!
_young_lab
.
unallocate_object
(
new_obj
))
{
// The promotion lab failed to unallocate the object.
// We need to overwrite the object with a filler that
// contains no interior pointers.
MemRegion
mr
((
HeapWord
*
)
new_obj
,
new_obj_size
);
// Clean this up and move to oopFactory (see bug 4718422)
SharedHeap
::
fill_region_with_object
(
mr
);
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
}
else
if
(
!
_young_lab
.
unallocate_object
(
new_obj
))
{
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
new_obj
,
new_obj_size
);
}
// don't update this before the unallocation!
...
...
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
浏览文件 @
0c811a79
...
...
@@ -76,8 +76,8 @@ void MutableNUMASpace::ensure_parsability() {
MutableSpace
*
s
=
ls
->
space
();
if
(
s
->
top
()
<
top
())
{
// For all spaces preceeding the one containing top()
if
(
s
->
free_in_words
()
>
0
)
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
s
->
top
(),
s
->
end
()));
size_t
area_touched_words
=
pointer_delta
(
s
->
end
(),
s
->
top
());
CollectedHeap
::
fill_with_object
(
s
->
top
(),
area_touched_words
);
#ifndef ASSERT
if
(
!
ZapUnusedHeapArea
)
{
area_touched_words
=
MIN2
((
size_t
)
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
)),
...
...
@@ -686,11 +686,11 @@ void MutableNUMASpace::set_top(HeapWord* value) {
// a minimal object; assuming that's not the last chunk in which case we don't care.
if
(
i
<
lgrp_spaces
()
->
length
()
-
1
)
{
size_t
remainder
=
pointer_delta
(
s
->
end
(),
value
);
const
size_t
min
imal_object_size
=
oopDesc
::
header
_size
();
if
(
remainder
<
min
imal_object
_size
&&
remainder
>
0
)
{
// Add a
filler object of a minimal size,
it will cross the chunk boundary.
SharedHeap
::
fill_region_with_object
(
MemRegion
(
value
,
minimal_object_size
)
);
value
+=
min
imal_object
_size
;
const
size_t
min
_fill_size
=
CollectedHeap
::
min_fill
_size
();
if
(
remainder
<
min
_fill
_size
&&
remainder
>
0
)
{
// Add a
minimum size filler object;
it will cross the chunk boundary.
CollectedHeap
::
fill_with_object
(
value
,
min_fill_size
);
value
+=
min
_fill
_size
;
assert
(
!
s
->
contains
(
value
),
"Should be in the next chunk"
);
// Restart the loop from the same chunk, since the value has moved
// to the next one.
...
...
src/share/vm/gc_interface/collectedHeap.cpp
浏览文件 @
0c811a79
...
...
@@ -30,12 +30,21 @@
int
CollectedHeap
::
_fire_out_of_memory_count
=
0
;
#endif
size_t
CollectedHeap
::
_filler_array_max_size
=
0
;
// Memory state functions.
CollectedHeap
::
CollectedHeap
()
:
_reserved
(),
_barrier_set
(
NULL
),
_is_gc_active
(
false
),
_total_collections
(
0
),
_total_full_collections
(
0
),
_gc_cause
(
GCCause
::
_no_gc
),
_gc_lastcause
(
GCCause
::
_no_gc
)
{
CollectedHeap
::
CollectedHeap
()
{
const
size_t
max_len
=
size_t
(
arrayOopDesc
::
max_array_length
(
T_INT
));
const
size_t
elements_per_word
=
HeapWordSize
/
sizeof
(
jint
);
_filler_array_max_size
=
align_object_size
(
filler_array_hdr_size
()
+
max_len
*
elements_per_word
);
_barrier_set
=
NULL
;
_is_gc_active
=
false
;
_total_collections
=
_total_full_collections
=
0
;
_gc_cause
=
_gc_lastcause
=
GCCause
::
_no_gc
;
NOT_PRODUCT
(
_promotion_failure_alot_count
=
0
;)
NOT_PRODUCT
(
_promotion_failure_alot_gc_number
=
0
;)
...
...
@@ -128,6 +137,95 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
return
obj
;
}
size_t
CollectedHeap
::
filler_array_hdr_size
()
{
return
size_t
(
arrayOopDesc
::
header_size
(
T_INT
));
}
size_t
CollectedHeap
::
filler_array_min_size
()
{
return
align_object_size
(
filler_array_hdr_size
());
}
size_t
CollectedHeap
::
filler_array_max_size
()
{
return
_filler_array_max_size
;
}
#ifdef ASSERT
void
CollectedHeap
::
fill_args_check
(
HeapWord
*
start
,
size_t
words
)
{
assert
(
words
>=
min_fill_size
(),
"too small to fill"
);
assert
(
words
%
MinObjAlignment
==
0
,
"unaligned size"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
start
),
"not in heap"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
start
+
words
-
1
),
"not in heap"
);
}
void
CollectedHeap
::
zap_filler_array
(
HeapWord
*
start
,
size_t
words
)
{
if
(
ZapFillerObjects
)
{
Copy
::
fill_to_words
(
start
+
filler_array_hdr_size
(),
words
-
filler_array_hdr_size
(),
0XDEAFBABE
);
}
}
#endif // ASSERT
void
CollectedHeap
::
fill_with_array
(
HeapWord
*
start
,
size_t
words
)
{
assert
(
words
>=
filler_array_min_size
(),
"too small for an array"
);
assert
(
words
<=
filler_array_max_size
(),
"too big for a single object"
);
const
size_t
payload_size
=
words
-
filler_array_hdr_size
();
const
size_t
len
=
payload_size
*
HeapWordSize
/
sizeof
(
jint
);
// Set the length first for concurrent GC.
((
arrayOop
)
start
)
->
set_length
((
int
)
len
);
post_allocation_setup_common
(
Universe
::
fillerArrayKlassObj
(),
start
,
words
);
DEBUG_ONLY
(
zap_filler_array
(
start
,
words
);)
}
void
CollectedHeap
::
fill_with_object_impl
(
HeapWord
*
start
,
size_t
words
)
{
assert
(
words
<=
filler_array_max_size
(),
"too big for a single object"
);
if
(
words
>=
filler_array_min_size
())
{
fill_with_array
(
start
,
words
);
}
else
if
(
words
>
0
)
{
assert
(
words
==
min_fill_size
(),
"unaligned size"
);
post_allocation_setup_common
(
SystemDictionary
::
object_klass
(),
start
,
words
);
}
}
void
CollectedHeap
::
fill_with_object
(
HeapWord
*
start
,
size_t
words
)
{
DEBUG_ONLY
(
fill_args_check
(
start
,
words
);)
HandleMark
hm
;
// Free handles before leaving.
fill_with_object_impl
(
start
,
words
);
}
void
CollectedHeap
::
fill_with_objects
(
HeapWord
*
start
,
size_t
words
)
{
DEBUG_ONLY
(
fill_args_check
(
start
,
words
);)
HandleMark
hm
;
// Free handles before leaving.
#ifdef LP64
// A single array can fill ~8G, so multiple objects are needed only in 64-bit.
// First fill with arrays, ensuring that any remaining space is big enough to
// fill. The remainder is filled with a single object.
const
size_t
min
=
min_fill_size
();
const
size_t
max
=
filler_array_max_size
();
while
(
words
>
max
)
{
const
size_t
cur
=
words
-
max
>=
min
?
max
:
max
-
min
;
fill_with_array
(
start
,
cur
);
start
+=
cur
;
words
-=
cur
;
}
#endif
fill_with_object_impl
(
start
,
words
);
}
oop
CollectedHeap
::
new_store_barrier
(
oop
new_obj
)
{
// %%% This needs refactoring. (It was imported from the server compiler.)
guarantee
(
can_elide_tlab_store_barriers
(),
"store barrier elision not supported"
);
...
...
src/share/vm/gc_interface/collectedHeap.hpp
浏览文件 @
0c811a79
...
...
@@ -47,6 +47,9 @@ class CollectedHeap : public CHeapObj {
static
int
_fire_out_of_memory_count
;
#endif
// Used for filler objects (static, but initialized in ctor).
static
size_t
_filler_array_max_size
;
protected:
MemRegion
_reserved
;
BarrierSet
*
_barrier_set
;
...
...
@@ -119,6 +122,21 @@ class CollectedHeap : public CHeapObj {
// Clears an allocated object.
inline
static
void
init_obj
(
HeapWord
*
obj
,
size_t
size
);
// Filler object utilities.
static
inline
size_t
filler_array_hdr_size
();
static
inline
size_t
filler_array_min_size
();
static
inline
size_t
filler_array_max_size
();
DEBUG_ONLY
(
static
void
fill_args_check
(
HeapWord
*
start
,
size_t
words
);)
DEBUG_ONLY
(
static
void
zap_filler_array
(
HeapWord
*
start
,
size_t
words
);)
// Fill with a single array; caller must ensure filler_array_min_size() <=
// words <= filler_array_max_size().
static
inline
void
fill_with_array
(
HeapWord
*
start
,
size_t
words
);
// Fill with a single object (either an int array or a java.lang.Object).
static
inline
void
fill_with_object_impl
(
HeapWord
*
start
,
size_t
words
);
// Verification functions
virtual
void
check_for_bad_heap_word_value
(
HeapWord
*
addr
,
size_t
size
)
PRODUCT_RETURN
;
...
...
@@ -294,6 +312,27 @@ class CollectedHeap : public CHeapObj {
// The boundary between a "large" and "small" array of primitives, in words.
virtual
size_t
large_typearray_limit
()
=
0
;
// Utilities for turning raw memory into filler objects.
//
// min_fill_size() is the smallest region that can be filled.
// fill_with_objects() can fill arbitrary-sized regions of the heap using
// multiple objects. fill_with_object() is for regions known to be smaller
// than the largest array of integers; it uses a single object to fill the
// region and has slightly less overhead.
static
size_t
min_fill_size
()
{
return
size_t
(
align_object_size
(
oopDesc
::
header_size
()));
}
static
void
fill_with_objects
(
HeapWord
*
start
,
size_t
words
);
static
void
fill_with_object
(
HeapWord
*
start
,
size_t
words
);
static
void
fill_with_object
(
MemRegion
region
)
{
fill_with_object
(
region
.
start
(),
region
.
word_size
());
}
static
void
fill_with_object
(
HeapWord
*
start
,
HeapWord
*
end
)
{
fill_with_object
(
start
,
pointer_delta
(
end
,
start
));
}
// Some heaps may offer a contiguous region for shared non-blocking
// allocation, via inlined code (by exporting the address of the top and
// end fields defining the extent of the contiguous allocation region.)
...
...
src/share/vm/gc_interface/collectedHeap.inline.hpp
浏览文件 @
0c811a79
...
...
@@ -34,7 +34,6 @@ void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
void
CollectedHeap
::
post_allocation_setup_no_klass_install
(
KlassHandle
klass
,
HeapWord
*
objPtr
,
size_t
size
)
{
oop
obj
=
(
oop
)
objPtr
;
assert
(
obj
!=
NULL
,
"NULL object pointer"
);
...
...
@@ -44,9 +43,6 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
// May be bootstrapping
obj
->
set_mark
(
markOopDesc
::
prototype
());
}
// support low memory notifications (no-op if not enabled)
LowMemoryDetector
::
detect_low_memory_for_collected_pools
();
}
void
CollectedHeap
::
post_allocation_install_obj_klass
(
KlassHandle
klass
,
...
...
@@ -65,6 +61,9 @@ void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
// Support for jvmti and dtrace
inline
void
post_allocation_notify
(
KlassHandle
klass
,
oop
obj
)
{
// support low memory notifications (no-op if not enabled)
LowMemoryDetector
::
detect_low_memory_for_collected_pools
();
// support for JVMTI VMObjectAlloc event (no-op if not enabled)
JvmtiExport
::
vm_object_alloc_event_collector
(
obj
);
...
...
src/share/vm/includeDB_gc
浏览文件 @
0c811a79
...
...
@@ -28,6 +28,7 @@ collectedHeap.cpp collectedHeap.hpp
collectedHeap.cpp collectedHeap.inline.hpp
collectedHeap.cpp init.hpp
collectedHeap.cpp oop.inline.hpp
collectedHeap.cpp systemDictionary.hpp
collectedHeap.cpp thread_<os_family>.inline.hpp
collectedHeap.hpp allocation.hpp
...
...
src/share/vm/memory/permGen.cpp
浏览文件 @
0c811a79
...
...
@@ -26,20 +26,24 @@
#include "incls/_permGen.cpp.incl"
HeapWord
*
PermGen
::
mem_allocate_in_gen
(
size_t
size
,
Generation
*
gen
)
{
MutexLocker
ml
(
Heap_lock
);
GCCause
::
Cause
next_cause
=
GCCause
::
_permanent_generation_full
;
GCCause
::
Cause
prev_cause
=
GCCause
::
_no_gc
;
unsigned
int
gc_count_before
,
full_gc_count_before
;
HeapWord
*
obj
;
for
(;;)
{
HeapWord
*
obj
=
gen
->
allocate
(
size
,
false
);
if
(
obj
!=
NULL
)
{
{
MutexLocker
ml
(
Heap_lock
);
if
((
obj
=
gen
->
allocate
(
size
,
false
))
!=
NULL
)
{
return
obj
;
}
if
(
gen
->
capacity
()
<
_capacity_expansion_limit
||
prev_cause
!=
GCCause
::
_no_gc
)
{
obj
=
gen
->
expand_and_allocate
(
size
,
false
);
}
if
(
obj
==
NULL
&&
prev_cause
!=
GCCause
::
_last_ditch_collection
)
{
if
(
obj
!=
NULL
||
prev_cause
==
GCCause
::
_last_ditch_collection
)
{
return
obj
;
}
if
(
GC_locker
::
is_active_and_needs_gc
())
{
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
...
...
@@ -61,12 +65,12 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
return
NULL
;
}
}
// Read the GC count while holding the Heap_lock
unsigned
int
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
unsigned
int
full_gc_count_before
=
SharedHeap
::
heap
()
->
total_full_collections
();
{
MutexUnlocker
mu
(
Heap_lock
);
// give up heap lock, execute gets it back
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
full_gc_count_before
=
SharedHeap
::
heap
()
->
total_full_collections
();
}
// Give up heap lock above, VMThread::execute below gets it back
VM_GenCollectForPermanentAllocation
op
(
size
,
gc_count_before
,
full_gc_count_before
,
next_cause
);
VMThread
::
execute
(
&
op
);
...
...
@@ -80,12 +84,8 @@ HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
if
(
obj
!=
NULL
)
{
return
obj
;
}
}
prev_cause
=
next_cause
;
next_cause
=
GCCause
::
_last_ditch_collection
;
}
else
{
return
obj
;
}
}
}
...
...
src/share/vm/memory/sharedHeap.cpp
浏览文件 @
0c811a79
...
...
@@ -248,46 +248,6 @@ void SharedHeap::ref_processing_init() {
perm_gen
()
->
ref_processor_init
();
}
void
SharedHeap
::
fill_region_with_object
(
MemRegion
mr
)
{
// Disable the posting of JVMTI VMObjectAlloc events as we
// don't want the filling of tlabs with filler arrays to be
// reported to the profiler.
NoJvmtiVMObjectAllocMark
njm
;
// Disable low memory detector because there is no real allocation.
LowMemoryDetectorDisabler
lmd_dis
;
// It turns out that post_allocation_setup_array takes a handle, so the
// call below contains an implicit conversion. Best to free that handle
// as soon as possible.
HandleMark
hm
;
size_t
word_size
=
mr
.
word_size
();
size_t
aligned_array_header_size
=
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
));
if
(
word_size
>=
aligned_array_header_size
)
{
const
size_t
array_length
=
pointer_delta
(
mr
.
end
(),
mr
.
start
())
-
typeArrayOopDesc
::
header_size
(
T_INT
);
const
size_t
array_length_words
=
array_length
*
(
HeapWordSize
/
sizeof
(
jint
));
post_allocation_setup_array
(
Universe
::
intArrayKlassObj
(),
mr
.
start
(),
mr
.
word_size
(),
(
int
)
array_length_words
);
#ifdef ASSERT
HeapWord
*
elt_words
=
(
mr
.
start
()
+
typeArrayOopDesc
::
header_size
(
T_INT
));
Copy
::
fill_to_words
(
elt_words
,
array_length
,
0xDEAFBABE
);
#endif
}
else
{
assert
(
word_size
==
(
size_t
)
oopDesc
::
header_size
(),
"Unaligned?"
);
post_allocation_setup_obj
(
SystemDictionary
::
object_klass
(),
mr
.
start
(),
mr
.
word_size
());
}
}
// Some utilities.
void
SharedHeap
::
print_size_transition
(
outputStream
*
out
,
size_t
bytes_before
,
...
...
src/share/vm/memory/sharedHeap.hpp
浏览文件 @
0c811a79
...
...
@@ -108,14 +108,6 @@ public:
void
set_perm
(
PermGen
*
perm_gen
)
{
_perm_gen
=
perm_gen
;
}
// A helper function that fills a region of the heap with
// with a single object.
static
void
fill_region_with_object
(
MemRegion
mr
);
// Minimum garbage fill object size
static
size_t
min_fill_size
()
{
return
(
size_t
)
align_object_size
(
oopDesc
::
header_size
());
}
static
size_t
min_fill_size_in_bytes
()
{
return
min_fill_size
()
*
HeapWordSize
;
}
// This function returns the "GenRemSet" object that allows us to scan
// generations; at least the perm gen, possibly more in a fully
// generational heap.
...
...
src/share/vm/memory/space.cpp
浏览文件 @
0c811a79
...
...
@@ -409,19 +409,9 @@ bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
HeapWord
*
q
,
size_t
deadlength
)
{
if
(
allowed_deadspace_words
>=
deadlength
)
{
allowed_deadspace_words
-=
deadlength
;
oop
(
q
)
->
set_mark
(
markOopDesc
::
prototype
()
->
set_marked
());
const
size_t
min_int_array_size
=
typeArrayOopDesc
::
header_size
(
T_INT
);
if
(
deadlength
>=
min_int_array_size
)
{
oop
(
q
)
->
set_klass
(
Universe
::
intArrayKlassObj
());
typeArrayOop
(
q
)
->
set_length
((
int
)((
deadlength
-
min_int_array_size
)
*
(
HeapWordSize
/
sizeof
(
jint
))));
}
else
{
assert
((
int
)
deadlength
==
instanceOopDesc
::
header_size
(),
"size for smallest fake dead object doesn't match"
);
oop
(
q
)
->
set_klass
(
SystemDictionary
::
object_klass
());
}
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"make sure size for fake dead object match"
);
CollectedHeap
::
fill_with_object
(
q
,
deadlength
);
oop
(
q
)
->
set_mark
(
oop
(
q
)
->
mark
()
->
set_marked
());
assert
((
int
)
deadlength
==
oop
(
q
)
->
size
(),
"bad filler object size"
);
// Recall that we required "q == compaction_top".
return
true
;
}
else
{
...
...
src/share/vm/memory/tenuredGeneration.cpp
浏览文件 @
0c811a79
...
...
@@ -387,7 +387,7 @@ void TenuredGeneration::par_promote_alloc_undo(int thread_num,
"should contain whole object"
);
buf
->
undo_allocation
(
obj
,
word_sz
);
}
else
{
SharedHeap
::
fill_region_with_object
(
MemRegion
(
obj
,
word_sz
)
);
CollectedHeap
::
fill_with_object
(
obj
,
word_sz
);
}
}
...
...
src/share/vm/memory/threadLocalAllocBuffer.cpp
浏览文件 @
0c811a79
...
...
@@ -100,8 +100,7 @@ void ThreadLocalAllocBuffer::accumulate_statistics() {
void
ThreadLocalAllocBuffer
::
make_parsable
(
bool
retire
)
{
if
(
end
()
!=
NULL
)
{
invariants
();
MemRegion
mr
(
top
(),
hard_end
());
SharedHeap
::
fill_region_with_object
(
mr
);
CollectedHeap
::
fill_with_object
(
top
(),
hard_end
());
if
(
retire
||
ZeroTLAB
)
{
// "Reset" the TLAB
set_start
(
NULL
);
...
...
src/share/vm/memory/universe.cpp
浏览文件 @
0c811a79
...
...
@@ -49,6 +49,7 @@ klassOop Universe::_constantPoolKlassObj = NULL;
klassOop
Universe
::
_constantPoolCacheKlassObj
=
NULL
;
klassOop
Universe
::
_compiledICHolderKlassObj
=
NULL
;
klassOop
Universe
::
_systemObjArrayKlassObj
=
NULL
;
klassOop
Universe
::
_fillerArrayKlassObj
=
NULL
;
oop
Universe
::
_int_mirror
=
NULL
;
oop
Universe
::
_float_mirror
=
NULL
;
oop
Universe
::
_double_mirror
=
NULL
;
...
...
@@ -126,6 +127,7 @@ void Universe::system_classes_do(void f(klassOop)) {
f
(
instanceKlassKlassObj
());
f
(
constantPoolKlassObj
());
f
(
systemObjArrayKlassObj
());
f
(
fillerArrayKlassObj
());
}
void
Universe
::
oops_do
(
OopClosure
*
f
,
bool
do_all
)
{
...
...
@@ -180,6 +182,7 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f
->
do_oop
((
oop
*
)
&
_constantPoolCacheKlassObj
);
f
->
do_oop
((
oop
*
)
&
_compiledICHolderKlassObj
);
f
->
do_oop
((
oop
*
)
&
_systemObjArrayKlassObj
);
f
->
do_oop
((
oop
*
)
&
_fillerArrayKlassObj
);
f
->
do_oop
((
oop
*
)
&
_the_empty_byte_array
);
f
->
do_oop
((
oop
*
)
&
_the_empty_short_array
);
f
->
do_oop
((
oop
*
)
&
_the_empty_int_array
);
...
...
@@ -265,6 +268,7 @@ void Universe::genesis(TRAPS) {
_compiledICHolderKlassObj
=
compiledICHolderKlass
::
create_klass
(
CHECK
);
_systemObjArrayKlassObj
=
objArrayKlassKlass
::
cast
(
objArrayKlassKlassObj
())
->
allocate_system_objArray_klass
(
CHECK
);
_fillerArrayKlassObj
=
typeArrayKlass
::
create_klass
(
T_INT
,
sizeof
(
jint
),
"<filler>"
,
CHECK
);
_the_empty_byte_array
=
oopFactory
::
new_permanent_byteArray
(
0
,
CHECK
);
_the_empty_short_array
=
oopFactory
::
new_permanent_shortArray
(
0
,
CHECK
);
...
...
@@ -274,7 +278,6 @@ void Universe::genesis(TRAPS) {
_the_array_interfaces_array
=
oopFactory
::
new_system_objArray
(
2
,
CHECK
);
_vm_exception
=
oopFactory
::
new_symbol
(
"vm exception holder"
,
CHECK
);
}
else
{
FileMapInfo
*
mapinfo
=
FileMapInfo
::
current_info
();
char
*
buffer
=
mapinfo
->
region_base
(
CompactingPermGenGen
::
md
);
void
**
vtbl_list
=
(
void
**
)
buffer
;
...
...
src/share/vm/memory/universe.hpp
浏览文件 @
0c811a79
...
...
@@ -92,6 +92,7 @@ class LatestMethodOopCache : public CommonMethodOopCache {
class
Universe
:
AllStatic
{
// Ugh. Universe is much too friendly.
friend
class
MarkSweep
;
friend
class
oopDesc
;
friend
class
ClassLoader
;
...
...
@@ -132,6 +133,7 @@ class Universe: AllStatic {
static
klassOop
_constantPoolCacheKlassObj
;
static
klassOop
_compiledICHolderKlassObj
;
static
klassOop
_systemObjArrayKlassObj
;
static
klassOop
_fillerArrayKlassObj
;
// Known objects in the VM
...
...
@@ -264,6 +266,7 @@ class Universe: AllStatic {
static
klassOop
constantPoolCacheKlassObj
()
{
return
_constantPoolCacheKlassObj
;
}
static
klassOop
compiledICHolderKlassObj
()
{
return
_compiledICHolderKlassObj
;
}
static
klassOop
systemObjArrayKlassObj
()
{
return
_systemObjArrayKlassObj
;
}
static
klassOop
fillerArrayKlassObj
()
{
return
_fillerArrayKlassObj
;
}
// Known objects in tbe VM
static
oop
int_mirror
()
{
return
check_mirror
(
_int_mirror
);
...
...
src/share/vm/oops/arrayOop.hpp
浏览文件 @
0c811a79
...
...
@@ -96,19 +96,20 @@ class arrayOopDesc : public oopDesc {
:
typesize_in_bytes
/
HeapWordSize
);
}
// This method returns the maximum length that can passed into
// typeArrayOop::object_size(scale, length, header_size) without causing an
// overflow. We substract an extra 2*wordSize to guard against double word
// alignments. It gets the scale from the type2aelembytes array.
// Return the maximum length of an array of BasicType. The length can passed
// to typeArrayOop::object_size(scale, length, header_size) without causing an
// overflow.
static
int32_t
max_array_length
(
BasicType
type
)
{
assert
(
type
>=
0
&&
type
<
T_CONFLICT
,
"wrong type"
);
assert
(
type2aelembytes
(
type
)
!=
0
,
"wrong type"
);
// We use max_jint, since object_size is internally represented by an 'int'
// This gives us an upper bound of max_jint words for the size of the oop.
int32_t
max_words
=
(
max_jint
-
header_size
(
type
)
-
2
);
int
elembytes
=
type2aelembytes
(
type
);
jlong
len
=
((
jlong
)
max_words
*
HeapWordSize
)
/
elembytes
;
return
(
len
>
max_jint
)
?
max_jint
:
(
int32_t
)
len
;
const
int
bytes_per_element
=
type2aelembytes
(
type
);
if
(
bytes_per_element
<
HeapWordSize
)
{
return
max_jint
;
}
const
int32_t
max_words
=
align_size_down
(
max_jint
,
MinObjAlignment
);
const
int32_t
max_element_words
=
max_words
-
header_size
(
type
);
const
int32_t
words_per_element
=
bytes_per_element
>>
LogHeapWordSize
;
return
max_element_words
/
words_per_element
;
}
};
src/share/vm/oops/typeArrayKlass.cpp
浏览文件 @
0c811a79
...
...
@@ -36,13 +36,14 @@ bool typeArrayKlass::compute_is_subtype_of(klassOop k) {
return
element_type
()
==
tak
->
element_type
();
}
klassOop
typeArrayKlass
::
create_klass
(
BasicType
type
,
int
scale
,
TRAPS
)
{
klassOop
typeArrayKlass
::
create_klass
(
BasicType
type
,
int
scale
,
const
char
*
name_str
,
TRAPS
)
{
typeArrayKlass
o
;
symbolHandle
sym
(
symbolOop
(
NULL
));
// bootstrapping: don't create sym if symbolKlass not created yet
if
(
Universe
::
symbolKlassObj
()
!=
NULL
)
{
sym
=
oopFactory
::
new_symbol_handle
(
external_name
(
type
)
,
CHECK_NULL
);
if
(
Universe
::
symbolKlassObj
()
!=
NULL
&&
name_str
!=
NULL
)
{
sym
=
oopFactory
::
new_symbol_handle
(
name_str
,
CHECK_NULL
);
}
KlassHandle
klassklass
(
THREAD
,
Universe
::
typeArrayKlassKlassObj
());
...
...
src/share/vm/oops/typeArrayKlass.hpp
浏览文件 @
0c811a79
...
...
@@ -39,7 +39,11 @@ class typeArrayKlass : public arrayKlass {
// klass allocation
DEFINE_ALLOCATE_PERMANENT
(
typeArrayKlass
);
static
klassOop
create_klass
(
BasicType
type
,
int
scale
,
TRAPS
);
static
klassOop
create_klass
(
BasicType
type
,
int
scale
,
const
char
*
name_str
,
TRAPS
);
static
inline
klassOop
create_klass
(
BasicType
type
,
int
scale
,
TRAPS
)
{
return
create_klass
(
type
,
scale
,
external_name
(
type
),
CHECK_NULL
);
}
int
oop_size
(
oop
obj
)
const
;
int
klass_oop_size
()
const
{
return
object_size
();
}
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
0c811a79
...
...
@@ -1517,6 +1517,16 @@ bool Arguments::check_vm_args_consistency() {
MarkSweepAlwaysCompactCount
=
1
;
// Move objects every gc.
}
if
(
UseParallelOldGC
&&
ParallelOldGCSplitALot
)
{
// Settings to encourage splitting.
if
(
!
FLAG_IS_CMDLINE
(
NewRatio
))
{
FLAG_SET_CMDLINE
(
intx
,
NewRatio
,
2
);
}
if
(
!
FLAG_IS_CMDLINE
(
ScavengeBeforeFullGC
))
{
FLAG_SET_CMDLINE
(
bool
,
ScavengeBeforeFullGC
,
false
);
}
}
status
=
status
&&
verify_percentage
(
GCHeapFreeLimit
,
"GCHeapFreeLimit"
);
status
=
status
&&
verify_percentage
(
GCTimeLimit
,
"GCTimeLimit"
);
if
(
GCTimeLimit
==
100
)
{
...
...
src/share/vm/runtime/globals.hpp
浏览文件 @
0c811a79
...
...
@@ -625,6 +625,9 @@ class CommandLineFlags {
develop(bool, CheckZapUnusedHeapArea, false, \
"Check zapping of unused heap space") \
\
develop(bool, ZapFillerObjects, trueInDebug, \
"Zap filler objects with 0xDEAFBABE") \
\
develop(bool, PrintVMMessages, true, \
"Print vm messages on console") \
\
...
...
@@ -1200,11 +1203,12 @@ class CommandLineFlags {
product(uintx, ParallelCMSThreads, 0, \
"Max number of threads CMS will use for concurrent work") \
\
develop(bool, ParallelOldMTUnsafeMarkBitMap, false, \
"Use the Parallel Old MT unsafe in marking the bitmap") \
develop(bool, ParallelOldGCSplitALot, false, \
"Provoke splitting (copying data from a young gen space to" \
"multiple destination spaces)") \
\
develop(
bool, ParallelOldMTUnsafeUpdateLiveData, false,
\
"
Use the Parallel Old MT unsafe in update of live size")
\
develop(
uintx, ParallelOldGCSplitInterval, 3,
\
"
How often to provoke splitting a young gen space")
\
\
develop(bool, TraceRegionTasksQueuing, false, \
"Trace the queuing of the region tasks") \
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录