Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
a23b31e3
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a23b31e3
编写于
12月 20, 2008
作者:
T
trims
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
f48c1bad
8e833fe9
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
169 addition
and
86 deletion
+169
-86
src/os/solaris/vm/os_solaris.cpp
src/os/solaris/vm/os_solaris.cpp
+18
-10
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+1
-1
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+84
-34
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
.../gc_implementation/parallelScavenge/psParallelCompact.hpp
+18
-1
src/share/vm/gc_interface/collectedHeap.cpp
src/share/vm/gc_interface/collectedHeap.cpp
+1
-2
src/share/vm/memory/cardTableModRefBS.cpp
src/share/vm/memory/cardTableModRefBS.cpp
+1
-1
src/share/vm/memory/cardTableModRefBS.hpp
src/share/vm/memory/cardTableModRefBS.hpp
+1
-1
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+0
-4
src/share/vm/memory/universe.hpp
src/share/vm/memory/universe.hpp
+0
-2
src/share/vm/opto/compile.cpp
src/share/vm/opto/compile.cpp
+3
-0
src/share/vm/opto/macro.cpp
src/share/vm/opto/macro.cpp
+7
-0
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+27
-22
src/share/vm/runtime/arguments.hpp
src/share/vm/runtime/arguments.hpp
+3
-3
src/share/vm/runtime/javaCalls.cpp
src/share/vm/runtime/javaCalls.cpp
+1
-1
src/share/vm/services/management.cpp
src/share/vm/services/management.cpp
+4
-4
未找到文件。
src/os/solaris/vm/os_solaris.cpp
浏览文件 @
a23b31e3
...
...
@@ -1638,16 +1638,24 @@ inline hrtime_t oldgetTimeNanos() {
// getTimeNanos is guaranteed to not move backward on Solaris
inline
hrtime_t
getTimeNanos
()
{
if
(
VM_Version
::
supports_cx8
())
{
bool
retry
=
false
;
hrtime_t
newtime
=
gethrtime
();
hrtime_t
oldmaxtime
=
max_hrtime
;
hrtime_t
retmaxtime
=
oldmaxtime
;
while
((
newtime
>
retmaxtime
)
&&
(
retry
==
false
||
retmaxtime
!=
oldmaxtime
))
{
oldmaxtime
=
retmaxtime
;
retmaxtime
=
Atomic
::
cmpxchg
(
newtime
,
(
volatile
jlong
*
)
&
max_hrtime
,
oldmaxtime
);
retry
=
true
;
}
return
(
newtime
>
retmaxtime
)
?
newtime
:
retmaxtime
;
const
hrtime_t
now
=
gethrtime
();
const
hrtime_t
prev
=
max_hrtime
;
if
(
now
<=
prev
)
return
prev
;
// same or retrograde time;
const
hrtime_t
obsv
=
Atomic
::
cmpxchg
(
now
,
(
volatile
jlong
*
)
&
max_hrtime
,
prev
);
assert
(
obsv
>=
prev
,
"invariant"
);
// Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obs" is >= now then
// we should return "obs". If the CAS failed and now > obs > prv then
// some other thread raced this thread and installed a new value, in which case
// we could either (a) retry the entire operation, (b) retry trying to install now
// or (c) just return obs. We use (c). No loop is required although in some cases
// we might discard a higher "now" value in deference to a slightly lower but freshly
// installed obs value. That's entirely benign -- it admits no new orderings compared
// to (a) or (b) -- and greatly reduces coherence traffic.
// We might also condition (c) on the magnitude of the delta between obs and now.
// Avoiding excessive CAS operations to hot RW locations is critical.
// See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
return
(
prev
==
obsv
)
?
now
:
obsv
;
}
else
{
return
oldgetTimeNanos
();
}
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
浏览文件 @
a23b31e3
...
...
@@ -181,7 +181,7 @@ public:
void
scrub
(
CardTableModRefBS
*
ctbs
,
BitMap
*
card_bm
)
{
HeapWord
*
hr_bot
=
hr
()
->
bottom
();
in
t
hr_first_card_index
=
ctbs
->
index_for
(
hr_bot
);
size_
t
hr_first_card_index
=
ctbs
->
index_for
(
hr_bot
);
bm
()
->
set_intersection_at_offset
(
*
card_bm
,
hr_first_card_index
);
#if PRT_COUNT_OCCUPIED
recount_occupied
();
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
a23b31e3
...
...
@@ -726,7 +726,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
size_t
live_to_left
=
bitmap
->
live_words_in_range
(
search_start
,
oop
(
addr
));
result
+=
partial_obj_size
+
live_to_left
;
assert
(
result
<=
addr
,
"object cannot move to the right"
);
DEBUG_ONLY
(
PSParallelCompact
::
check_new_location
(
addr
,
result
);)
return
result
;
}
...
...
@@ -1472,9 +1472,53 @@ PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
assert
(
result
,
"should not fail: bad filler object size"
);
}
void
PSParallelCompact
::
provoke_split_fill_survivor
(
SpaceId
id
)
{
if
(
total_invocations
()
%
(
ParallelOldGCSplitInterval
*
3
)
!=
0
)
{
return
;
}
MutableSpace
*
const
space
=
_space_info
[
id
].
space
();
if
(
space
->
is_empty
())
{
HeapWord
*
b
=
space
->
bottom
();
HeapWord
*
t
=
b
+
space
->
capacity_in_words
()
/
2
;
space
->
set_top
(
t
);
if
(
ZapUnusedHeapArea
)
{
space
->
set_top_for_allocations
();
}
size_t
obj_len
=
8
;
while
(
b
+
obj_len
<=
t
)
{
CollectedHeap
::
fill_with_object
(
b
,
obj_len
);
mark_bitmap
()
->
mark_obj
(
b
,
obj_len
);
summary_data
().
add_obj
(
b
,
obj_len
);
b
+=
obj_len
;
obj_len
=
(
obj_len
&
0x18
)
+
8
;
// 8 16 24 32 8 16 24 32 ...
}
if
(
b
<
t
)
{
// The loop didn't completely fill to t (top); adjust top downward.
space
->
set_top
(
b
);
if
(
ZapUnusedHeapArea
)
{
space
->
set_top_for_allocations
();
}
}
HeapWord
**
nta
=
_space_info
[
id
].
new_top_addr
();
bool
result
=
summary_data
().
summarize
(
_space_info
[
id
].
split_info
(),
space
->
bottom
(),
space
->
top
(),
NULL
,
space
->
bottom
(),
space
->
end
(),
nta
);
assert
(
result
,
"space must fit into itself"
);
}
}
void
PSParallelCompact
::
provoke_split
(
bool
&
max_compaction
)
{
if
(
total_invocations
()
%
ParallelOldGCSplitInterval
!=
0
)
{
return
;
}
const
size_t
region_size
=
ParallelCompactData
::
RegionSize
;
ParallelCompactData
&
sd
=
summary_data
();
...
...
@@ -1587,6 +1631,12 @@ void PSParallelCompact::summarize_spaces_quick()
assert
(
result
,
"space must fit into itself"
);
_space_info
[
i
].
set_dense_prefix
(
space
->
bottom
());
}
#ifndef PRODUCT
if
(
ParallelOldGCSplitALot
)
{
provoke_split_fill_survivor
(
to_space_id
);
}
#endif // #ifndef PRODUCT
}
void
PSParallelCompact
::
fill_dense_prefix_end
(
SpaceId
id
)
...
...
@@ -1794,9 +1844,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
}
#ifndef PRODUCT
if
(
ParallelOldGCSplitALot
&&
old_space_total_live
<
old_capacity
)
{
if
(
total_invocations
()
%
ParallelOldGCSplitInterval
==
0
)
{
provoke_split
(
maximum_compaction
);
}
provoke_split
(
maximum_compaction
);
}
#endif // #ifndef PRODUCT
...
...
@@ -1828,14 +1876,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
new_top_addr
);
assert
(
done
,
"space must fit into old gen"
);
// XXX - this is necessary because decrement_destination_counts() tests
// source_region() to determine if a region will be filled. Probably
// better to pass src_space->new_top() into decrement_destination_counts
// and test that instead.
//
// Clear the source_region field for each region in the space.
clear_source_region
(
space
->
bottom
(),
_space_info
[
id
].
new_top
());
// Reset the new_top value for the space.
_space_info
[
id
].
set_new_top
(
space
->
bottom
());
}
else
if
(
live
>
0
)
{
...
...
@@ -1854,7 +1894,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
dst_space_id
=
SpaceId
(
id
);
dst_space_end
=
space
->
end
();
new_top_addr
=
_space_info
[
id
].
new_top_addr
();
HeapWord
*
const
clear_end
=
_space_info
[
id
].
new_top
();
NOT_PRODUCT
(
summary_phase_msg
(
dst_space_id
,
space
->
bottom
(),
dst_space_end
,
SpaceId
(
id
),
next_src_addr
,
space
->
top
());)
...
...
@@ -1865,13 +1904,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
new_top_addr
);
assert
(
done
,
"space must fit when compacted into itself"
);
assert
(
*
new_top_addr
<=
space
->
top
(),
"usage should not grow"
);
// XXX - this should go away. See comments above.
//
// Clear the source_region field in regions at the end of the space that
// will not be filled.
HeapWord
*
const
clear_beg
=
_summary_data
.
region_align_up
(
*
new_top_addr
);
clear_source_region
(
clear_beg
,
clear_end
);
}
}
...
...
@@ -3051,19 +3083,34 @@ HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
}
void
PSParallelCompact
::
decrement_destination_counts
(
ParCompactionManager
*
cm
,
SpaceId
src_space_id
,
size_t
beg_region
,
HeapWord
*
end_addr
)
{
ParallelCompactData
&
sd
=
summary_data
();
#ifdef ASSERT
MutableSpace
*
const
src_space
=
_space_info
[
src_space_id
].
space
();
HeapWord
*
const
beg_addr
=
sd
.
region_to_addr
(
beg_region
);
assert
(
src_space
->
contains
(
beg_addr
)
||
beg_addr
==
src_space
->
end
(),
"src_space_id does not match beg_addr"
);
assert
(
src_space
->
contains
(
end_addr
)
||
end_addr
==
src_space
->
end
(),
"src_space_id does not match end_addr"
);
#endif // #ifdef ASSERT
RegionData
*
const
beg
=
sd
.
region
(
beg_region
);
HeapWord
*
const
end_addr_aligned_up
=
sd
.
region_align_up
(
end_addr
);
RegionData
*
const
end
=
sd
.
addr_to_region_ptr
(
end_addr_aligned_up
);
size_t
cur_idx
=
beg_region
;
for
(
RegionData
*
cur
=
beg
;
cur
<
end
;
++
cur
,
++
cur_idx
)
{
RegionData
*
const
end
=
sd
.
addr_to_region_ptr
(
sd
.
region_align_up
(
end_addr
));
// Regions up to new_top() are enqueued if they become available.
HeapWord
*
const
new_top
=
_space_info
[
src_space_id
].
new_top
();
RegionData
*
const
enqueue_end
=
sd
.
addr_to_region_ptr
(
sd
.
region_align_up
(
new_top
));
for
(
RegionData
*
cur
=
beg
;
cur
<
end
;
++
cur
)
{
assert
(
cur
->
data_size
()
>
0
,
"region must have live data"
);
cur
->
decrement_destination_count
();
if
(
cur
_idx
<=
cur
->
source_region
()
&&
cur
->
available
()
&&
cur
->
claim
())
{
cm
->
save_for_processing
(
cur_idx
);
if
(
cur
<
enqueue_end
&&
cur
->
available
()
&&
cur
->
claim
())
{
cm
->
save_for_processing
(
sd
.
region
(
cur
)
);
}
}
}
...
...
@@ -3178,7 +3225,8 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord
*
const
old_src_addr
=
closure
.
source
();
closure
.
copy_partial_obj
();
if
(
closure
.
is_full
())
{
decrement_destination_counts
(
cm
,
src_region_idx
,
closure
.
source
());
decrement_destination_counts
(
cm
,
src_space_id
,
src_region_idx
,
closure
.
source
());
region_ptr
->
set_deferred_obj_addr
(
NULL
);
region_ptr
->
set_completed
();
return
;
...
...
@@ -3187,7 +3235,7 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord
*
const
end_addr
=
sd
.
region_align_down
(
closure
.
source
());
if
(
sd
.
region_align_down
(
old_src_addr
)
!=
end_addr
)
{
// The partial object was copied from more than one source region.
decrement_destination_counts
(
cm
,
src_region_idx
,
end_addr
);
decrement_destination_counts
(
cm
,
src_
space_id
,
src_
region_idx
,
end_addr
);
// Move to the next source region, possibly switching spaces as well. All
// args except end_addr may be modified.
...
...
@@ -3227,19 +3275,21 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
region_ptr
->
set_deferred_obj_addr
(
closure
.
destination
());
status
=
closure
.
copy_until_full
();
// copies from closure.source()
decrement_destination_counts
(
cm
,
src_region_idx
,
closure
.
source
());
decrement_destination_counts
(
cm
,
src_space_id
,
src_region_idx
,
closure
.
source
());
region_ptr
->
set_completed
();
return
;
}
if
(
status
==
ParMarkBitMap
::
full
)
{
decrement_destination_counts
(
cm
,
src_region_idx
,
closure
.
source
());
decrement_destination_counts
(
cm
,
src_space_id
,
src_region_idx
,
closure
.
source
());
region_ptr
->
set_deferred_obj_addr
(
NULL
);
region_ptr
->
set_completed
();
return
;
}
decrement_destination_counts
(
cm
,
src_region_idx
,
end_addr
);
decrement_destination_counts
(
cm
,
src_
space_id
,
src_
region_idx
,
end_addr
);
// Move to the next source region, possibly switching spaces as well. All
// args except end_addr may be modified.
...
...
@@ -3318,7 +3368,7 @@ void PSParallelCompact::reset_millis_since_last_gc() {
ParMarkBitMap
::
IterationStatus
MoveAndUpdateClosure
::
copy_until_full
()
{
if
(
source
()
!=
destination
())
{
assert
(
source
()
>
destination
(),
"must copy to the left"
);
DEBUG_ONLY
(
PSParallelCompact
::
check_new_location
(
source
(),
destination
());)
Copy
::
aligned_conjoint_words
(
source
(),
destination
(),
words_remaining
());
}
update_state
(
words_remaining
());
...
...
@@ -3339,7 +3389,7 @@ void MoveAndUpdateClosure::copy_partial_obj()
// This test is necessary; if omitted, the pointer updates to a partial object
// that crosses the dense prefix boundary could be overwritten.
if
(
source
()
!=
destination
())
{
assert
(
source
()
>
destination
(),
"must copy to the left"
);
DEBUG_ONLY
(
PSParallelCompact
::
check_new_location
(
source
(),
destination
());)
Copy
::
aligned_conjoint_words
(
source
(),
destination
(),
words
);
}
update_state
(
words
);
...
...
@@ -3364,7 +3414,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
}
if
(
destination
()
!=
source
())
{
assert
(
destination
()
<
source
(),
"must copy to the left"
);
DEBUG_ONLY
(
PSParallelCompact
::
check_new_location
(
source
(),
destination
());)
Copy
::
aligned_conjoint_words
(
source
(),
destination
(),
words
);
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
浏览文件 @
a23b31e3
...
...
@@ -978,6 +978,10 @@ class PSParallelCompact : AllStatic {
// Include the new objects in the summary data.
static
void
summarize_new_objects
(
SpaceId
id
,
HeapWord
*
start
);
// Add live objects to a survivor space since it's rare that both survivors
// are non-empty.
static
void
provoke_split_fill_survivor
(
SpaceId
id
);
// Add live objects and/or choose the dense prefix to provoke splitting.
static
void
provoke_split
(
bool
&
maximum_compaction
);
#endif
...
...
@@ -1154,8 +1158,10 @@ class PSParallelCompact : AllStatic {
HeapWord
*
end_addr
);
// Decrement the destination count for each non-empty source region in the
// range [beg_region, region(region_align_up(end_addr))).
// range [beg_region, region(region_align_up(end_addr))). If the destination
// count for a region goes to 0 and it needs to be filled, enqueue it.
static
void
decrement_destination_counts
(
ParCompactionManager
*
cm
,
SpaceId
src_space_id
,
size_t
beg_region
,
HeapWord
*
end_addr
);
...
...
@@ -1230,6 +1236,8 @@ class PSParallelCompact : AllStatic {
#endif // #ifndef PRODUCT
#ifdef ASSERT
// Sanity check the new location of a word in the heap.
static
inline
void
check_new_location
(
HeapWord
*
old_addr
,
HeapWord
*
new_addr
);
// Verify that all the regions have been emptied.
static
void
verify_complete
(
SpaceId
space_id
);
#endif // #ifdef ASSERT
...
...
@@ -1397,6 +1405,15 @@ inline void PSParallelCompact::adjust_pointer(T* p,
}
}
#ifdef ASSERT
inline
void
PSParallelCompact
::
check_new_location
(
HeapWord
*
old_addr
,
HeapWord
*
new_addr
)
{
assert
(
old_addr
>=
new_addr
||
space_id
(
old_addr
)
!=
space_id
(
new_addr
),
"must move left or to a different space"
);
}
#endif // ASSERT
class
MoveAndUpdateClosure
:
public
ParMarkBitMapClosure
{
public:
inline
MoveAndUpdateClosure
(
ParMarkBitMap
*
bitmap
,
ParCompactionManager
*
cm
,
...
...
src/share/vm/gc_interface/collectedHeap.cpp
浏览文件 @
a23b31e3
...
...
@@ -178,8 +178,7 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words)
// Set the length first for concurrent GC.
((
arrayOop
)
start
)
->
set_length
((
int
)
len
);
post_allocation_setup_common
(
Universe
::
fillerArrayKlassObj
(),
start
,
words
);
post_allocation_setup_common
(
Universe
::
intArrayKlassObj
(),
start
,
words
);
DEBUG_ONLY
(
zap_filler_array
(
start
,
words
);)
}
...
...
src/share/vm/memory/cardTableModRefBS.cpp
浏览文件 @
a23b31e3
...
...
@@ -283,7 +283,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
}
else
{
entry
=
byte_after
(
old_region
.
last
());
}
assert
(
index_for
(
new_region
.
last
())
<
(
int
)
_guard_index
,
assert
(
index_for
(
new_region
.
last
())
<
_guard_index
,
"The guard card will be overwritten"
);
// This line commented out cleans the newly expanded region and
// not the aligned up expanded region.
...
...
src/share/vm/memory/cardTableModRefBS.hpp
浏览文件 @
a23b31e3
...
...
@@ -428,7 +428,7 @@ public:
}
// Mapping from address to card marking array index.
in
t
index_for
(
void
*
p
)
{
size_
t
index_for
(
void
*
p
)
{
assert
(
_whole_heap
.
contains
(
p
),
"out of bounds access to card marking array"
);
return
byte_for
(
p
)
-
_byte_map
;
...
...
src/share/vm/memory/universe.cpp
浏览文件 @
a23b31e3
...
...
@@ -49,7 +49,6 @@ klassOop Universe::_constantPoolKlassObj = NULL;
klassOop
Universe
::
_constantPoolCacheKlassObj
=
NULL
;
klassOop
Universe
::
_compiledICHolderKlassObj
=
NULL
;
klassOop
Universe
::
_systemObjArrayKlassObj
=
NULL
;
klassOop
Universe
::
_fillerArrayKlassObj
=
NULL
;
oop
Universe
::
_int_mirror
=
NULL
;
oop
Universe
::
_float_mirror
=
NULL
;
oop
Universe
::
_double_mirror
=
NULL
;
...
...
@@ -127,7 +126,6 @@ void Universe::system_classes_do(void f(klassOop)) {
f
(
instanceKlassKlassObj
());
f
(
constantPoolKlassObj
());
f
(
systemObjArrayKlassObj
());
f
(
fillerArrayKlassObj
());
}
void
Universe
::
oops_do
(
OopClosure
*
f
,
bool
do_all
)
{
...
...
@@ -182,7 +180,6 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f
->
do_oop
((
oop
*
)
&
_constantPoolCacheKlassObj
);
f
->
do_oop
((
oop
*
)
&
_compiledICHolderKlassObj
);
f
->
do_oop
((
oop
*
)
&
_systemObjArrayKlassObj
);
f
->
do_oop
((
oop
*
)
&
_fillerArrayKlassObj
);
f
->
do_oop
((
oop
*
)
&
_the_empty_byte_array
);
f
->
do_oop
((
oop
*
)
&
_the_empty_short_array
);
f
->
do_oop
((
oop
*
)
&
_the_empty_int_array
);
...
...
@@ -268,7 +265,6 @@ void Universe::genesis(TRAPS) {
_compiledICHolderKlassObj
=
compiledICHolderKlass
::
create_klass
(
CHECK
);
_systemObjArrayKlassObj
=
objArrayKlassKlass
::
cast
(
objArrayKlassKlassObj
())
->
allocate_system_objArray_klass
(
CHECK
);
_fillerArrayKlassObj
=
typeArrayKlass
::
create_klass
(
T_INT
,
sizeof
(
jint
),
"<filler>"
,
CHECK
);
_the_empty_byte_array
=
oopFactory
::
new_permanent_byteArray
(
0
,
CHECK
);
_the_empty_short_array
=
oopFactory
::
new_permanent_shortArray
(
0
,
CHECK
);
...
...
src/share/vm/memory/universe.hpp
浏览文件 @
a23b31e3
...
...
@@ -133,7 +133,6 @@ class Universe: AllStatic {
static
klassOop
_constantPoolCacheKlassObj
;
static
klassOop
_compiledICHolderKlassObj
;
static
klassOop
_systemObjArrayKlassObj
;
static
klassOop
_fillerArrayKlassObj
;
// Known objects in the VM
...
...
@@ -266,7 +265,6 @@ class Universe: AllStatic {
static
klassOop
constantPoolCacheKlassObj
()
{
return
_constantPoolCacheKlassObj
;
}
static
klassOop
compiledICHolderKlassObj
()
{
return
_compiledICHolderKlassObj
;
}
static
klassOop
systemObjArrayKlassObj
()
{
return
_systemObjArrayKlassObj
;
}
static
klassOop
fillerArrayKlassObj
()
{
return
_fillerArrayKlassObj
;
}
// Known objects in tbe VM
static
oop
int_mirror
()
{
return
check_mirror
(
_int_mirror
);
...
...
src/share/vm/opto/compile.cpp
浏览文件 @
a23b31e3
...
...
@@ -2192,6 +2192,9 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
case
Op_DecodeN
:
assert
(
!
n
->
in
(
1
)
->
is_EncodeP
(),
"should be optimized out"
);
// DecodeN could be pinned on Sparc where it can't be fold into
// an address expression, see the code for Op_CastPP above.
assert
(
n
->
in
(
0
)
==
NULL
||
!
Matcher
::
clone_shift_expressions
,
"no control except on sparc"
);
break
;
case
Op_EncodeP
:
{
...
...
src/share/vm/opto/macro.cpp
浏览文件 @
a23b31e3
...
...
@@ -1724,6 +1724,13 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
if
(
klass_node
==
NULL
)
{
Node
*
k_adr
=
basic_plus_adr
(
obj
,
oopDesc
::
klass_offset_in_bytes
());
klass_node
=
transform_later
(
LoadKlassNode
::
make
(
_igvn
,
mem
,
k_adr
,
_igvn
.
type
(
k_adr
)
->
is_ptr
())
);
#ifdef _LP64
if
(
UseCompressedOops
&&
klass_node
->
is_DecodeN
())
{
assert
(
klass_node
->
in
(
1
)
->
Opcode
()
==
Op_LoadNKlass
,
"sanity"
);
klass_node
->
in
(
1
)
->
init_req
(
0
,
ctrl
);
}
else
#endif
klass_node
->
init_req
(
0
,
ctrl
);
}
Node
*
proto_node
=
make_load
(
ctrl
,
mem
,
klass_node
,
Klass
::
prototype_header_offset_in_bytes
()
+
sizeof
(
oopDesc
),
TypeX_X
,
TypeX_X
->
basic_type
());
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
a23b31e3
...
...
@@ -444,9 +444,9 @@ char* SysClassPath::add_jars_to_path(char* path, const char* directory) {
}
// Parses a memory size specification string.
static
bool
atom
ll
(
const
char
*
s
,
j
long
*
result
)
{
jlong
n
=
0
;
int
args_read
=
sscanf
(
s
,
os
::
jlong_format_specifier
(),
&
n
);
static
bool
atom
ull
(
const
char
*
s
,
ju
long
*
result
)
{
j
u
long
n
=
0
;
int
args_read
=
sscanf
(
s
,
os
::
j
u
long_format_specifier
(),
&
n
);
if
(
args_read
!=
1
)
{
return
false
;
}
...
...
@@ -460,15 +460,20 @@ static bool atomll(const char *s, jlong* result) {
switch
(
*
s
)
{
case
'T'
:
case
't'
:
*
result
=
n
*
G
*
K
;
// Check for overflow.
if
(
*
result
/
((
julong
)
G
*
K
)
!=
n
)
return
false
;
return
true
;
case
'G'
:
case
'g'
:
*
result
=
n
*
G
;
if
(
*
result
/
G
!=
n
)
return
false
;
return
true
;
case
'M'
:
case
'm'
:
*
result
=
n
*
M
;
if
(
*
result
/
M
!=
n
)
return
false
;
return
true
;
case
'K'
:
case
'k'
:
*
result
=
n
*
K
;
if
(
*
result
/
K
!=
n
)
return
false
;
return
true
;
case
'\0'
:
*
result
=
n
;
...
...
@@ -478,10 +483,10 @@ static bool atomll(const char *s, jlong* result) {
}
}
Arguments
::
ArgsRange
Arguments
::
check_memory_size
(
j
long
size
,
j
long
min_size
)
{
Arguments
::
ArgsRange
Arguments
::
check_memory_size
(
j
ulong
size
,
ju
long
min_size
)
{
if
(
size
<
min_size
)
return
arg_too_small
;
// Check that size will fit in a size_t (only relevant on 32-bit)
if
(
(
julong
)
size
>
max_uintx
)
return
arg_too_big
;
if
(
size
>
max_uintx
)
return
arg_too_big
;
return
arg_in_range
;
}
...
...
@@ -522,10 +527,10 @@ static bool set_fp_numeric_flag(char* name, char* value, FlagValueOrigin origin)
static
bool
set_numeric_flag
(
char
*
name
,
char
*
value
,
FlagValueOrigin
origin
)
{
jlong
v
;
j
u
long
v
;
intx
intx_v
;
bool
is_neg
=
false
;
// Check the sign first since atomll() parses only unsigned values.
// Check the sign first since atom
u
ll() parses only unsigned values.
if
(
*
value
==
'-'
)
{
if
(
!
CommandLineFlags
::
intxAt
(
name
,
&
intx_v
))
{
return
false
;
...
...
@@ -533,7 +538,7 @@ static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
value
++
;
is_neg
=
true
;
}
if
(
!
atomll
(
value
,
&
v
))
{
if
(
!
atom
u
ll
(
value
,
&
v
))
{
return
false
;
}
intx_v
=
(
intx
)
v
;
...
...
@@ -1677,9 +1682,9 @@ static bool match_option(const JavaVMOption* option, const char** names, const c
}
Arguments
::
ArgsRange
Arguments
::
parse_memory_size
(
const
char
*
s
,
jlong
*
long_arg
,
jlong
min_size
)
{
if
(
!
atomll
(
s
,
long_arg
))
return
arg_unreadable
;
j
u
long
*
long_arg
,
j
u
long
min_size
)
{
if
(
!
atom
u
ll
(
s
,
long_arg
))
return
arg_unreadable
;
return
check_memory_size
(
*
long_arg
,
min_size
);
}
...
...
@@ -1857,7 +1862,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
FLAG_SET_CMDLINE
(
bool
,
BackgroundCompilation
,
false
);
// -Xmn for compatibility with other JVM vendors
}
else
if
(
match_option
(
option
,
"-Xmn"
,
&
tail
))
{
jlong
long_initial_eden_size
=
0
;
j
u
long
long_initial_eden_size
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
long_initial_eden_size
,
1
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -1869,7 +1874,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
FLAG_SET_CMDLINE
(
uintx
,
NewSize
,
(
size_t
)
long_initial_eden_size
);
// -Xms
}
else
if
(
match_option
(
option
,
"-Xms"
,
&
tail
))
{
jlong
long_initial_heap_size
=
0
;
j
u
long
long_initial_heap_size
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
long_initial_heap_size
,
1
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -1882,7 +1887,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
set_min_heap_size
(
initial_heap_size
());
// -Xmx
}
else
if
(
match_option
(
option
,
"-Xmx"
,
&
tail
))
{
jlong
long_max_heap_size
=
0
;
j
u
long
long_max_heap_size
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
long_max_heap_size
,
1
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -1915,7 +1920,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
}
// -Xss
}
else
if
(
match_option
(
option
,
"-Xss"
,
&
tail
))
{
jlong
long_ThreadStackSize
=
0
;
j
u
long
long_ThreadStackSize
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
long_ThreadStackSize
,
1000
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -1931,9 +1936,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// HotSpot does not have separate native and Java stacks, ignore silently for compatibility
// -Xmaxjitcodesize
}
else
if
(
match_option
(
option
,
"-Xmaxjitcodesize"
,
&
tail
))
{
jlong
long_ReservedCodeCacheSize
=
0
;
j
u
long
long_ReservedCodeCacheSize
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
long_ReservedCodeCacheSize
,
InitialCodeCacheSize
);
(
size_t
)
InitialCodeCacheSize
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
"Invalid maximum code cache size: %s
\n
"
,
...
...
@@ -2238,7 +2243,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
}
else
if
(
match_option
(
option
,
"-XX:TLEFragmentationRatio="
,
&
tail
))
{
// No longer used.
}
else
if
(
match_option
(
option
,
"-XX:TLESize="
,
&
tail
))
{
jlong
long_tlab_size
=
0
;
j
u
long
long_tlab_size
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
long_tlab_size
,
1
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -2293,7 +2298,7 @@ SOLARIS_ONLY(
"-XX:ParCMSPromoteBlocksToClaim in the future
\n
"
);
}
else
if
(
match_option
(
option
,
"-XX:ParallelGCOldGenAllocBufferSize="
,
&
tail
))
{
jlong
old_plab_size
=
0
;
j
u
long
old_plab_size
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
old_plab_size
,
1
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -2301,13 +2306,13 @@ SOLARIS_ONLY(
describe_range_error
(
errcode
);
return
JNI_EINVAL
;
}
FLAG_SET_CMDLINE
(
uintx
,
OldPLABSize
,
(
julong
)
old_plab_size
);
FLAG_SET_CMDLINE
(
uintx
,
OldPLABSize
,
old_plab_size
);
jio_fprintf
(
defaultStream
::
error_stream
(),
"Please use -XX:OldPLABSize in place of "
"-XX:ParallelGCOldGenAllocBufferSize in the future
\n
"
);
}
else
if
(
match_option
(
option
,
"-XX:ParallelGCToSpaceAllocBufferSize="
,
&
tail
))
{
jlong
young_plab_size
=
0
;
j
u
long
young_plab_size
=
0
;
ArgsRange
errcode
=
parse_memory_size
(
tail
,
&
young_plab_size
,
1
);
if
(
errcode
!=
arg_in_range
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -2315,7 +2320,7 @@ SOLARIS_ONLY(
describe_range_error
(
errcode
);
return
JNI_EINVAL
;
}
FLAG_SET_CMDLINE
(
uintx
,
YoungPLABSize
,
(
julong
)
young_plab_size
);
FLAG_SET_CMDLINE
(
uintx
,
YoungPLABSize
,
young_plab_size
);
jio_fprintf
(
defaultStream
::
error_stream
(),
"Please use -XX:YoungPLABSize in place of "
"-XX:ParallelGCToSpaceAllocBufferSize in the future
\n
"
);
...
...
src/share/vm/runtime/arguments.hpp
浏览文件 @
a23b31e3
...
...
@@ -339,9 +339,9 @@ class Arguments : AllStatic {
}
static
bool
verify_percentage
(
uintx
value
,
const
char
*
name
);
static
void
describe_range_error
(
ArgsRange
errcode
);
static
ArgsRange
check_memory_size
(
j
long
size
,
j
long
min_size
);
static
ArgsRange
parse_memory_size
(
const
char
*
s
,
jlong
*
long_arg
,
jlong
min_size
);
static
ArgsRange
check_memory_size
(
j
ulong
size
,
ju
long
min_size
);
static
ArgsRange
parse_memory_size
(
const
char
*
s
,
j
u
long
*
long_arg
,
j
u
long
min_size
);
// methods to build strings from individual args
static
void
build_jvm_args
(
const
char
*
arg
);
...
...
src/share/vm/runtime/javaCalls.cpp
浏览文件 @
a23b31e3
...
...
@@ -504,7 +504,7 @@ class SignatureChekker : public SignatureIterator {
intptr_t
v
=
_value
[
p
];
if
(
v
!=
0
)
{
size_t
t
=
(
size_t
)
v
;
bad
=
(
t
<
(
size_t
)
os
::
vm_page_size
()
)
||
!
(
*
(
oop
*
)
v
)
->
is_oop_or_null
(
true
);
bad
=
(
t
<
(
size_t
)
os
::
vm_page_size
()
)
||
!
Handle
::
raw_resolve
((
oop
*
)
v
)
->
is_oop_or_null
(
true
);
if
(
CheckJNICalls
&&
bad
)
{
ReportJNIFatalError
((
JavaThread
*
)
_thread
,
"Bad JNI oop argument"
);
}
...
...
src/share/vm/services/management.cpp
浏览文件 @
a23b31e3
...
...
@@ -694,10 +694,10 @@ JVM_ENTRY(jlong, jmm_SetPoolThreshold(JNIEnv* env, jobject obj, jmmThresholdType
-
1
);
}
if
(
threshold
>
max_
intx
)
{
THROW_MSG_
(
vmSymbols
::
java_lang_IllegalArgumentException
(),
"Invalid threshold value > max value of size_t"
,
-
1
);
if
(
(
size_t
)
threshold
>
max_u
intx
)
{
stringStream
st
;
st
.
print
(
"Invalid valid threshold value. Threshold value ("
UINT64_FORMAT
") > max value of size_t ("
SIZE_FORMAT
")"
,
(
size_t
)
threshold
,
max_uintx
);
THROW_MSG_
(
vmSymbols
::
java_lang_IllegalArgumentException
(),
st
.
as_string
(),
-
1
);
}
MemoryPool
*
pool
=
get_memory_pool_from_jobject
(
obj
,
CHECK_
(
0L
));
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录