Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
6008e6c9
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6008e6c9
编写于
12月 22, 2009
作者:
T
trims
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
4de17e95
ad1d0faf
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
701 addition
and
223 deletion
+701
-223
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
+8
-1
src/share/vm/opto/compile.cpp
src/share/vm/opto/compile.cpp
+1
-0
src/share/vm/opto/escape.cpp
src/share/vm/opto/escape.cpp
+473
-212
src/share/vm/opto/escape.hpp
src/share/vm/opto/escape.hpp
+6
-2
src/share/vm/opto/graphKit.cpp
src/share/vm/opto/graphKit.cpp
+5
-0
src/share/vm/opto/lcm.cpp
src/share/vm/opto/lcm.cpp
+3
-2
src/share/vm/opto/macro.cpp
src/share/vm/opto/macro.cpp
+27
-0
src/share/vm/opto/memnode.cpp
src/share/vm/opto/memnode.cpp
+65
-1
src/share/vm/opto/memnode.hpp
src/share/vm/opto/memnode.hpp
+7
-1
src/share/vm/opto/node.hpp
src/share/vm/opto/node.hpp
+4
-1
src/share/vm/opto/parse3.cpp
src/share/vm/opto/parse3.cpp
+3
-3
test/compiler/6895383/Test.java
test/compiler/6895383/Test.java
+51
-0
test/compiler/6896727/Test.java
test/compiler/6896727/Test.java
+48
-0
未找到文件。
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
浏览文件 @
6008e6c9
...
...
@@ -351,9 +351,16 @@ void
CollectionSetChooser
::
printSortedHeapRegions
()
{
gclog_or_tty
->
print_cr
(
"Printing %d Heap Regions sorted by amount of known garbage"
,
_numMarkedRegions
);
DEBUG_ONLY
(
int
marked_count
=
0
;)
for
(
int
i
=
0
;
i
<
_markedRegions
.
length
();
i
++
)
{
printHeapRegion
(
_markedRegions
.
at
(
i
));
HeapRegion
*
r
=
_markedRegions
.
at
(
i
);
if
(
r
!=
NULL
)
{
printHeapRegion
(
r
);
DEBUG_ONLY
(
marked_count
++
;)
}
}
assert
(
marked_count
==
_numMarkedRegions
,
"must be"
);
gclog_or_tty
->
print_cr
(
"Done sorted heap region print"
);
}
...
...
src/share/vm/opto/compile.cpp
浏览文件 @
6008e6c9
...
...
@@ -1852,6 +1852,7 @@ void Compile::dump_asm(int *pcs, uint pc_limit) {
!
n
->
is_Phi
()
&&
// a few noisely useless nodes
!
n
->
is_Proj
()
&&
!
n
->
is_MachTemp
()
&&
!
n
->
is_SafePointScalarObject
()
&&
!
n
->
is_Catch
()
&&
// Would be nice to print exception table targets
!
n
->
is_MergeMem
()
&&
// Not very interesting
!
n
->
is_top
()
&&
// Debug info table constants
...
...
src/share/vm/opto/escape.cpp
浏览文件 @
6008e6c9
...
...
@@ -543,6 +543,7 @@ bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
int
alias_idx
=
_compile
->
get_alias_index
(
tinst
);
igvn
->
set_type
(
addp
,
tinst
);
// record the allocation in the node map
assert
(
ptnode_adr
(
addp
->
_idx
)
->
_node
!=
NULL
,
"should be registered"
);
set_map
(
addp
->
_idx
,
get_map
(
base
->
_idx
));
// Set addp's Base and Address to 'base'.
...
...
@@ -618,9 +619,14 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro
const
TypePtr
*
atype
=
C
->
get_adr_type
(
alias_idx
);
result
=
PhiNode
::
make
(
orig_phi
->
in
(
0
),
NULL
,
Type
::
MEMORY
,
atype
);
C
->
copy_node_notes_to
(
result
,
orig_phi
);
set_map_phi
(
orig_phi
->
_idx
,
result
);
igvn
->
set_type
(
result
,
result
->
bottom_type
());
record_for_optimizer
(
result
);
debug_only
(
Node
*
pn
=
ptnode_adr
(
orig_phi
->
_idx
)
->
_node
;)
assert
(
pn
==
NULL
||
pn
==
orig_phi
,
"wrong node"
);
set_map
(
orig_phi
->
_idx
,
result
);
ptnode_adr
(
orig_phi
->
_idx
)
->
_node
=
orig_phi
;
new_created
=
true
;
return
result
;
}
...
...
@@ -710,6 +716,81 @@ static Node *step_through_mergemem(MergeMemNode *mmem, int alias_idx, const Type
return
mem
;
}
//
// Move memory users to their memory slices.
//
void
ConnectionGraph
::
move_inst_mem
(
Node
*
n
,
GrowableArray
<
PhiNode
*>
&
orig_phis
,
PhaseGVN
*
igvn
)
{
Compile
*
C
=
_compile
;
const
TypePtr
*
tp
=
igvn
->
type
(
n
->
in
(
MemNode
::
Address
))
->
isa_ptr
();
assert
(
tp
!=
NULL
,
"ptr type"
);
int
alias_idx
=
C
->
get_alias_index
(
tp
);
int
general_idx
=
C
->
get_general_index
(
alias_idx
);
// Move users first
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
Node
*
use
=
n
->
fast_out
(
i
);
if
(
use
->
is_MergeMem
())
{
MergeMemNode
*
mmem
=
use
->
as_MergeMem
();
assert
(
n
==
mmem
->
memory_at
(
alias_idx
),
"should be on instance memory slice"
);
if
(
n
!=
mmem
->
memory_at
(
general_idx
)
||
alias_idx
==
general_idx
)
{
continue
;
// Nothing to do
}
// Replace previous general reference to mem node.
uint
orig_uniq
=
C
->
unique
();
Node
*
m
=
find_inst_mem
(
n
,
general_idx
,
orig_phis
,
igvn
);
assert
(
orig_uniq
==
C
->
unique
(),
"no new nodes"
);
mmem
->
set_memory_at
(
general_idx
,
m
);
--
imax
;
--
i
;
}
else
if
(
use
->
is_MemBar
())
{
assert
(
!
use
->
is_Initialize
(),
"initializing stores should not be moved"
);
if
(
use
->
req
()
>
MemBarNode
::
Precedent
&&
use
->
in
(
MemBarNode
::
Precedent
)
==
n
)
{
// Don't move related membars.
record_for_optimizer
(
use
);
continue
;
}
tp
=
use
->
as_MemBar
()
->
adr_type
()
->
isa_ptr
();
if
(
tp
!=
NULL
&&
C
->
get_alias_index
(
tp
)
==
alias_idx
||
alias_idx
==
general_idx
)
{
continue
;
// Nothing to do
}
// Move to general memory slice.
uint
orig_uniq
=
C
->
unique
();
Node
*
m
=
find_inst_mem
(
n
,
general_idx
,
orig_phis
,
igvn
);
assert
(
orig_uniq
==
C
->
unique
(),
"no new nodes"
);
igvn
->
hash_delete
(
use
);
imax
-=
use
->
replace_edge
(
n
,
m
);
igvn
->
hash_insert
(
use
);
record_for_optimizer
(
use
);
--
i
;
#ifdef ASSERT
}
else
if
(
use
->
is_Mem
())
{
if
(
use
->
Opcode
()
==
Op_StoreCM
&&
use
->
in
(
MemNode
::
OopStore
)
==
n
)
{
// Don't move related cardmark.
continue
;
}
// Memory nodes should have new memory input.
tp
=
igvn
->
type
(
use
->
in
(
MemNode
::
Address
))
->
isa_ptr
();
assert
(
tp
!=
NULL
,
"ptr type"
);
int
idx
=
C
->
get_alias_index
(
tp
);
assert
(
get_map
(
use
->
_idx
)
!=
NULL
||
idx
==
alias_idx
,
"Following memory nodes should have new memory input or be on the same memory slice"
);
}
else
if
(
use
->
is_Phi
())
{
// Phi nodes should be split and moved already.
tp
=
use
->
as_Phi
()
->
adr_type
()
->
isa_ptr
();
assert
(
tp
!=
NULL
,
"ptr type"
);
int
idx
=
C
->
get_alias_index
(
tp
);
assert
(
idx
==
alias_idx
,
"Following Phi nodes should be on the same memory slice"
);
}
else
{
use
->
dump
();
assert
(
false
,
"should not be here"
);
#endif
}
}
}
//
// Search memory chain of "mem" to find a MemNode whose address
// is the specified alias index.
...
...
@@ -775,10 +856,18 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
C
->
get_alias_index
(
result
->
as_Phi
()
->
adr_type
())
!=
alias_idx
)
{
Node
*
un
=
result
->
as_Phi
()
->
unique_input
(
phase
);
if
(
un
!=
NULL
)
{
orig_phis
.
append_if_missing
(
result
->
as_Phi
());
result
=
un
;
}
else
{
break
;
}
}
else
if
(
result
->
is_ClearArray
())
{
if
(
!
ClearArrayNode
::
step_through
(
&
result
,
(
uint
)
tinst
->
instance_id
(),
phase
))
{
// Can not bypass initialization of the instance
// we are looking for.
break
;
}
// Otherwise skip it (the call updated 'result' value).
}
else
if
(
result
->
Opcode
()
==
Op_SCMemProj
)
{
assert
(
result
->
in
(
0
)
->
is_LoadStore
(),
"sanity"
);
const
Type
*
at
=
phase
->
type
(
result
->
in
(
0
)
->
in
(
MemNode
::
Address
));
...
...
@@ -808,7 +897,6 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
return
result
;
}
//
// Convert the types of unescaped object to instance types where possible,
// propagate the new type information through the graph, and update memory
...
...
@@ -900,12 +988,13 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra
//
void
ConnectionGraph
::
split_unique_types
(
GrowableArray
<
Node
*>
&
alloc_worklist
)
{
GrowableArray
<
Node
*>
memnode_worklist
;
GrowableArray
<
Node
*>
mergemem_worklist
;
GrowableArray
<
PhiNode
*>
orig_phis
;
PhaseGVN
*
igvn
=
_compile
->
initial_gvn
();
uint
new_index_start
=
(
uint
)
_compile
->
num_alias_types
();
VectorSet
visited
(
Thread
::
current
()
->
resource_area
());
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
Arena
*
arena
=
Thread
::
current
()
->
resource_area
();
VectorSet
visited
(
arena
);
VectorSet
ptset
(
arena
);
// Phase 1: Process possible allocations from alloc_worklist.
...
...
@@ -981,6 +1070,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// - non-escaping
// - eligible to be a unique type
// - not determined to be ineligible by escape analysis
assert
(
ptnode_adr
(
alloc
->
_idx
)
->
_node
!=
NULL
&&
ptnode_adr
(
n
->
_idx
)
->
_node
!=
NULL
,
"should be registered"
);
set_map
(
alloc
->
_idx
,
n
);
set_map
(
n
->
_idx
,
alloc
);
const
TypeOopPtr
*
t
=
igvn
->
type
(
n
)
->
isa_oopptr
();
...
...
@@ -1025,7 +1116,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
alloc_worklist
.
append_if_missing
(
addp2
);
}
alloc_worklist
.
append_if_missing
(
use
);
}
else
if
(
use
->
is_
Initialize
())
{
}
else
if
(
use
->
is_
MemBar
())
{
memnode_worklist
.
append_if_missing
(
use
);
}
}
...
...
@@ -1035,10 +1126,12 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
PointsTo
(
ptset
,
get_addp_base
(
n
),
igvn
);
assert
(
ptset
.
Size
()
==
1
,
"AddP address is unique"
);
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
if
(
elem
==
_phantom_object
)
if
(
elem
==
_phantom_object
)
{
assert
(
false
,
"escaped allocation"
);
continue
;
// Assume the value was set outside this method.
}
Node
*
base
=
get_map
(
elem
);
// CheckCastPP node
if
(
!
split_AddP
(
n
,
base
,
igvn
))
continue
;
// wrong type
if
(
!
split_AddP
(
n
,
base
,
igvn
))
continue
;
// wrong type
from dead path
tinst
=
igvn
->
type
(
base
)
->
isa_oopptr
();
}
else
if
(
n
->
is_Phi
()
||
n
->
is_CheckCastPP
()
||
...
...
@@ -1053,8 +1146,10 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
PointsTo
(
ptset
,
n
,
igvn
);
if
(
ptset
.
Size
()
==
1
)
{
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
if
(
elem
==
_phantom_object
)
if
(
elem
==
_phantom_object
)
{
assert
(
false
,
"escaped allocation"
);
continue
;
// Assume the value was set outside this method.
}
Node
*
val
=
get_map
(
elem
);
// CheckCastPP node
TypeNode
*
tn
=
n
->
as_Type
();
tinst
=
igvn
->
type
(
val
)
->
isa_oopptr
();
...
...
@@ -1069,8 +1164,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
tn_t
=
tn_type
->
isa_oopptr
();
}
if
(
tn_t
!=
NULL
&&
tinst
->
cast_to_instance_id
(
TypeOopPtr
::
InstanceBot
)
->
higher_equal
(
tn_t
))
{
if
(
tn_t
!=
NULL
&&
tinst
->
klass
()
->
is_subtype_of
(
tn_t
->
klass
()))
{
if
(
tn_type
->
isa_narrowoop
())
{
tn_type
=
tinst
->
make_narrowoop
();
}
else
{
...
...
@@ -1082,33 +1176,25 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
igvn
->
hash_insert
(
tn
);
record_for_optimizer
(
n
);
}
else
{
continue
;
// wrong type
assert
(
tn_type
==
TypePtr
::
NULL_PTR
||
tn_t
!=
NULL
&&
!
tinst
->
klass
()
->
is_subtype_of
(
tn_t
->
klass
()),
"unexpected type"
);
continue
;
// Skip dead path with different type
}
}
}
else
{
debug_only
(
n
->
dump
();)
assert
(
false
,
"EA: unexpected node"
);
continue
;
}
// push users on appropriate worklist
// push
allocation's
users on appropriate worklist
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
Node
*
use
=
n
->
fast_out
(
i
);
if
(
use
->
is_Mem
()
&&
use
->
in
(
MemNode
::
Address
)
==
n
)
{
// Load/store to instance's field
memnode_worklist
.
append_if_missing
(
use
);
}
else
if
(
use
->
is_
Initialize
())
{
}
else
if
(
use
->
is_
MemBar
())
{
memnode_worklist
.
append_if_missing
(
use
);
}
else
if
(
use
->
is_MergeMem
())
{
mergemem_worklist
.
append_if_missing
(
use
);
}
else
if
(
use
->
is_SafePoint
()
&&
tinst
!=
NULL
)
{
// Look for MergeMem nodes for calls which reference unique allocation
// (through CheckCastPP nodes) even for debug info.
Node
*
m
=
use
->
in
(
TypeFunc
::
Memory
);
uint
iid
=
tinst
->
instance_id
();
while
(
m
->
is_Proj
()
&&
m
->
in
(
0
)
->
is_SafePoint
()
&&
m
->
in
(
0
)
!=
use
&&
!
m
->
in
(
0
)
->
_idx
!=
iid
)
{
m
=
m
->
in
(
0
)
->
in
(
TypeFunc
::
Memory
);
}
if
(
m
->
is_MergeMem
())
{
mergemem_worklist
.
append_if_missing
(
m
);
}
}
else
if
(
use
->
is_AddP
()
&&
use
->
outcnt
()
>
0
)
{
// No dead nodes
Node
*
addp2
=
find_second_addp
(
use
,
n
);
if
(
addp2
!=
NULL
)
{
...
...
@@ -1121,6 +1207,29 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
use
->
is_DecodeN
()
||
(
use
->
is_ConstraintCast
()
&&
use
->
Opcode
()
==
Op_CastPP
))
{
alloc_worklist
.
append_if_missing
(
use
);
#ifdef ASSERT
}
else
if
(
use
->
is_Mem
())
{
assert
(
use
->
in
(
MemNode
::
Address
)
!=
n
,
"EA: missing allocation reference path"
);
}
else
if
(
use
->
is_MergeMem
())
{
assert
(
_mergemem_worklist
.
contains
(
use
->
as_MergeMem
()),
"EA: missing MergeMem node in the worklist"
);
}
else
if
(
use
->
is_SafePoint
())
{
// Look for MergeMem nodes for calls which reference unique allocation
// (through CheckCastPP nodes) even for debug info.
Node
*
m
=
use
->
in
(
TypeFunc
::
Memory
);
if
(
m
->
is_MergeMem
())
{
assert
(
_mergemem_worklist
.
contains
(
m
->
as_MergeMem
()),
"EA: missing MergeMem node in the worklist"
);
}
}
else
{
uint
op
=
use
->
Opcode
();
if
(
!
(
op
==
Op_CmpP
||
op
==
Op_Conv2B
||
op
==
Op_CastP2X
||
op
==
Op_StoreCM
||
op
==
Op_FastLock
||
op
==
Op_AryEq
||
op
==
Op_StrComp
||
op
==
Op_StrEquals
||
op
==
Op_StrIndexOf
))
{
n
->
dump
();
use
->
dump
();
assert
(
false
,
"EA: missing allocation reference path"
);
}
#endif
}
}
...
...
@@ -1138,13 +1247,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
Node
*
n
=
memnode_worklist
.
pop
();
if
(
visited
.
test_set
(
n
->
_idx
))
continue
;
if
(
n
->
is_Phi
())
{
assert
(
n
->
as_Phi
()
->
adr_type
()
!=
TypePtr
::
BOTTOM
,
"narrow memory slice required"
);
// we don't need to do anything, but the users must be pushed if we haven't processed
// this Phi before
}
else
if
(
n
->
is_Initialize
())
{
// we don't need to do anything, but the users of the memory projection must be pushed
n
=
n
->
as_Initialize
()
->
proj_out
(
TypeFunc
::
Memory
);
if
(
n
->
is_Phi
()
||
n
->
is_ClearArray
())
{
// we don't need to do anything, but the users must be pushed
}
else
if
(
n
->
is_MemBar
())
{
// Initialize, MemBar nodes
// we don't need to do anything, but the users must be pushed
n
=
n
->
as_MemBar
()
->
proj_out
(
TypeFunc
::
Memory
);
if
(
n
==
NULL
)
continue
;
}
else
{
...
...
@@ -1161,6 +1268,10 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
return
;
}
if
(
mem
!=
n
->
in
(
MemNode
::
Memory
))
{
// We delay the memory edge update since we need old one in
// MergeMem code below when instances memory slices are separated.
debug_only
(
Node
*
pn
=
ptnode_adr
(
n
->
_idx
)
->
_node
;)
assert
(
pn
==
NULL
||
pn
==
n
,
"wrong node"
);
set_map
(
n
->
_idx
,
mem
);
ptnode_adr
(
n
->
_idx
)
->
_node
=
n
;
}
...
...
@@ -1181,36 +1292,55 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// push user on appropriate worklist
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
Node
*
use
=
n
->
fast_out
(
i
);
if
(
use
->
is_Phi
())
{
if
(
use
->
is_Phi
()
||
use
->
is_ClearArray
()
)
{
memnode_worklist
.
append_if_missing
(
use
);
}
else
if
(
use
->
is_Mem
()
&&
use
->
in
(
MemNode
::
Memory
)
==
n
)
{
if
(
use
->
Opcode
()
==
Op_StoreCM
)
// Ignore cardmark stores
continue
;
memnode_worklist
.
append_if_missing
(
use
);
}
else
if
(
use
->
is_
Initialize
())
{
}
else
if
(
use
->
is_
MemBar
())
{
memnode_worklist
.
append_if_missing
(
use
);
#ifdef ASSERT
}
else
if
(
use
->
is_Mem
())
{
assert
(
use
->
in
(
MemNode
::
Memory
)
!=
n
,
"EA: missing memory path"
);
}
else
if
(
use
->
is_MergeMem
())
{
mergemem_worklist
.
append_if_missing
(
use
);
assert
(
_mergemem_worklist
.
contains
(
use
->
as_MergeMem
()),
"EA: missing MergeMem node in the worklist"
);
}
else
{
uint
op
=
use
->
Opcode
();
if
(
!
(
op
==
Op_StoreCM
||
(
op
==
Op_CallLeaf
&&
use
->
as_CallLeaf
()
->
_name
!=
NULL
&&
strcmp
(
use
->
as_CallLeaf
()
->
_name
,
"g1_wb_pre"
)
==
0
)
||
op
==
Op_AryEq
||
op
==
Op_StrComp
||
op
==
Op_StrEquals
||
op
==
Op_StrIndexOf
))
{
n
->
dump
();
use
->
dump
();
assert
(
false
,
"EA: missing memory path"
);
}
#endif
}
}
}
// Phase 3: Process MergeMem nodes from mergemem_worklist.
// Walk each memory moving the first node encountered of each
// Walk each memory
slice
moving the first node encountered of each
// instance type to the the input corresponding to its alias index.
while
(
mergemem_worklist
.
length
()
!=
0
)
{
Node
*
n
=
mergemem_worklist
.
pop
();
assert
(
n
->
is_MergeMem
(),
"MergeMem node required."
);
if
(
visited
.
test_set
(
n
->
_idx
))
continue
;
MergeMemNode
*
nmm
=
n
->
as_MergeMem
();
uint
length
=
_mergemem_worklist
.
length
();
for
(
uint
next
=
0
;
next
<
length
;
++
next
)
{
MergeMemNode
*
nmm
=
_mergemem_worklist
.
at
(
next
);
assert
(
!
visited
.
test_set
(
nmm
->
_idx
),
"should not be visited before"
);
// Note: we don't want to use MergeMemStream here because we only want to
// scan inputs which exist at the start, not ones we add during processing.
uint
nslices
=
nmm
->
req
();
// scan inputs which exist at the start, not ones we add during processing.
// Note 2: MergeMem may already contains instance memory slices added
// during find_inst_mem() call when memory nodes were processed above.
igvn
->
hash_delete
(
nmm
);
uint
nslices
=
nmm
->
req
();
for
(
uint
i
=
Compile
::
AliasIdxRaw
+
1
;
i
<
nslices
;
i
++
)
{
Node
*
mem
=
nmm
->
in
(
i
);
Node
*
cur
=
NULL
;
if
(
mem
==
NULL
||
mem
->
is_top
())
continue
;
// First, update mergemem by moving memory nodes to corresponding slices
// if their type became more precise since this mergemem was created.
while
(
mem
->
is_Mem
())
{
const
Type
*
at
=
igvn
->
type
(
mem
->
in
(
MemNode
::
Address
));
if
(
at
!=
Type
::
TOP
)
{
...
...
@@ -1229,7 +1359,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
}
nmm
->
set_memory_at
(
i
,
(
cur
!=
NULL
)
?
cur
:
mem
);
// Find any instance of the current type if we haven't encountered
// a
value of the instance along the
chain.
// a
lready a memory slice of the instance along the memory
chain.
for
(
uint
ni
=
new_index_start
;
ni
<
new_index_end
;
ni
++
)
{
if
((
uint
)
_compile
->
get_general_index
(
ni
)
==
i
)
{
Node
*
m
=
(
ni
>=
nmm
->
req
())
?
nmm
->
empty_memory
()
:
nmm
->
in
(
ni
);
...
...
@@ -1245,11 +1375,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
}
// Find the rest of instances values
for
(
uint
ni
=
new_index_start
;
ni
<
new_index_end
;
ni
++
)
{
const
TypeOopPtr
*
tinst
=
igvn
->
C
->
get_adr_type
(
ni
)
->
isa_oopptr
();
const
TypeOopPtr
*
tinst
=
_compile
->
get_adr_type
(
ni
)
->
isa_oopptr
();
Node
*
result
=
step_through_mergemem
(
nmm
,
ni
,
tinst
);
if
(
result
==
nmm
->
base_memory
())
{
// Didn't find instance memory, search through general slice recursively.
result
=
nmm
->
memory_at
(
igvn
->
C
->
get_general_index
(
ni
));
result
=
nmm
->
memory_at
(
_compile
->
get_general_index
(
ni
));
result
=
find_inst_mem
(
result
,
ni
,
orig_phis
,
igvn
);
if
(
_compile
->
failing
())
{
return
;
...
...
@@ -1259,41 +1389,6 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
}
igvn
->
hash_insert
(
nmm
);
record_for_optimizer
(
nmm
);
// Propagate new memory slices to following MergeMem nodes.
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
Node
*
use
=
n
->
fast_out
(
i
);
if
(
use
->
is_Call
())
{
CallNode
*
in
=
use
->
as_Call
();
if
(
in
->
proj_out
(
TypeFunc
::
Memory
)
!=
NULL
)
{
Node
*
m
=
in
->
proj_out
(
TypeFunc
::
Memory
);
for
(
DUIterator_Fast
jmax
,
j
=
m
->
fast_outs
(
jmax
);
j
<
jmax
;
j
++
)
{
Node
*
mm
=
m
->
fast_out
(
j
);
if
(
mm
->
is_MergeMem
())
{
mergemem_worklist
.
append_if_missing
(
mm
);
}
}
}
if
(
use
->
is_Allocate
())
{
use
=
use
->
as_Allocate
()
->
initialization
();
if
(
use
==
NULL
)
{
continue
;
}
}
}
if
(
use
->
is_Initialize
())
{
InitializeNode
*
in
=
use
->
as_Initialize
();
if
(
in
->
proj_out
(
TypeFunc
::
Memory
)
!=
NULL
)
{
Node
*
m
=
in
->
proj_out
(
TypeFunc
::
Memory
);
for
(
DUIterator_Fast
jmax
,
j
=
m
->
fast_outs
(
jmax
);
j
<
jmax
;
j
++
)
{
Node
*
mm
=
m
->
fast_out
(
j
);
if
(
mm
->
is_MergeMem
())
{
mergemem_worklist
.
append_if_missing
(
mm
);
}
}
}
}
}
}
// Phase 4: Update the inputs of non-instance memory Phis and
...
...
@@ -1322,19 +1417,48 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
}
// Update the memory inputs of MemNodes with the value we computed
// in Phase 2.
// in Phase 2 and move stores memory users to corresponding memory slices.
#ifdef ASSERT
visited
.
Clear
();
Node_Stack
old_mems
(
arena
,
_compile
->
unique
()
>>
2
);
#endif
for
(
uint
i
=
0
;
i
<
nodes_size
();
i
++
)
{
Node
*
nmem
=
get_map
(
i
);
if
(
nmem
!=
NULL
)
{
Node
*
n
=
ptnode_adr
(
i
)
->
_node
;
if
(
n
!=
NULL
&&
n
->
is_Mem
())
{
assert
(
n
!=
NULL
,
"sanity"
);
if
(
n
->
is_Mem
())
{
#ifdef ASSERT
Node
*
old_mem
=
n
->
in
(
MemNode
::
Memory
);
if
(
!
visited
.
test_set
(
old_mem
->
_idx
))
{
old_mems
.
push
(
old_mem
,
old_mem
->
outcnt
());
}
#endif
assert
(
n
->
in
(
MemNode
::
Memory
)
!=
nmem
,
"sanity"
);
if
(
!
n
->
is_Load
())
{
// Move memory users of a store first.
move_inst_mem
(
n
,
orig_phis
,
igvn
);
}
// Now update memory input
igvn
->
hash_delete
(
n
);
n
->
set_req
(
MemNode
::
Memory
,
nmem
);
igvn
->
hash_insert
(
n
);
record_for_optimizer
(
n
);
}
else
{
assert
(
n
->
is_Allocate
()
||
n
->
is_CheckCastPP
()
||
n
->
is_AddP
()
||
n
->
is_Phi
(),
"unknown node used for set_map()"
);
}
}
}
#ifdef ASSERT
// Verify that memory was split correctly
while
(
old_mems
.
is_nonempty
())
{
Node
*
old_mem
=
old_mems
.
node
();
uint
old_cnt
=
old_mems
.
index
();
old_mems
.
pop
();
assert
(
old_cnt
=
old_mem
->
outcnt
(),
"old mem could be lost"
);
}
#endif
}
bool
ConnectionGraph
::
has_candidates
(
Compile
*
C
)
{
...
...
@@ -1381,8 +1505,20 @@ bool ConnectionGraph::compute_escape() {
ptnode_adr
(
n
->
_idx
)
->
node_type
()
==
PointsToNode
::
JavaObject
)
{
has_allocations
=
true
;
}
if
(
n
->
is_AddP
())
cg_worklist
.
append
(
n
->
_idx
);
if
(
n
->
is_AddP
())
{
// Collect address nodes which directly reference an allocation.
// Use them during stage 3 below to build initial connection graph
// field edges. Other field edges could be added after StoreP/LoadP
// nodes are processed during stage 4 below.
Node
*
base
=
get_addp_base
(
n
);
if
(
base
->
is_Proj
()
&&
base
->
in
(
0
)
->
is_Allocate
())
{
cg_worklist
.
append
(
n
->
_idx
);
}
}
else
if
(
n
->
is_MergeMem
())
{
// Collect all MergeMem nodes to add memory slices for
// scalar replaceable objects in split_unique_types().
_mergemem_worklist
.
append
(
n
->
as_MergeMem
());
}
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
Node
*
m
=
n
->
fast_out
(
i
);
// Get user
worklist_init
.
push
(
m
);
...
...
@@ -1423,12 +1559,13 @@ bool ConnectionGraph::compute_escape() {
}
}
VectorSet
ptset
(
Thread
::
current
()
->
resource_area
());
Arena
*
arena
=
Thread
::
current
()
->
resource_area
();
VectorSet
ptset
(
arena
);
GrowableArray
<
uint
>
deferred_edges
;
VectorSet
visited
(
Thread
::
current
()
->
resource_area
()
);
VectorSet
visited
(
arena
);
// 5. Remove deferred edges from the graph and
collec
t
//
information needed for type splitting
.
// 5. Remove deferred edges from the graph and
adjus
t
//
escape state of nonescaping objects
.
cg_length
=
cg_worklist
.
length
();
for
(
uint
next
=
0
;
next
<
cg_length
;
++
next
)
{
int
ni
=
cg_worklist
.
at
(
next
);
...
...
@@ -1438,98 +1575,9 @@ bool ConnectionGraph::compute_escape() {
remove_deferred
(
ni
,
&
deferred_edges
,
&
visited
);
Node
*
n
=
ptn
->
_node
;
if
(
n
->
is_AddP
())
{
// Search for objects which are not scalar replaceable.
// Mark their escape state as ArgEscape to propagate the state
// to referenced objects.
// Note: currently there are no difference in compiler optimizations
// for ArgEscape objects and NoEscape objects which are not
// scalar replaceable.
int
offset
=
ptn
->
offset
();
Node
*
base
=
get_addp_base
(
n
);
ptset
.
Clear
();
PointsTo
(
ptset
,
base
,
igvn
);
int
ptset_size
=
ptset
.
Size
();
// Check if a field's initializing value is recorded and add
// a corresponding NULL field's value if it is not recorded.
// Connection Graph does not record a default initialization by NULL
// captured by Initialize node.
//
// Note: it will disable scalar replacement in some cases:
//
// Point p[] = new Point[1];
// p[0] = new Point(); // Will be not scalar replaced
//
// but it will save us from incorrect optimizations in next cases:
//
// Point p[] = new Point[1];
// if ( x ) p[0] = new Point(); // Will be not scalar replaced
//
// Without a control flow analysis we can't distinguish above cases.
//
if
(
offset
!=
Type
::
OffsetBot
&&
ptset_size
==
1
)
{
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
// It does not matter if it is not Allocation node since
// only non-escaping allocations are scalar replaced.
if
(
ptnode_adr
(
elem
)
->
_node
->
is_Allocate
()
&&
ptnode_adr
(
elem
)
->
escape_state
()
==
PointsToNode
::
NoEscape
)
{
AllocateNode
*
alloc
=
ptnode_adr
(
elem
)
->
_node
->
as_Allocate
();
InitializeNode
*
ini
=
alloc
->
initialization
();
Node
*
value
=
NULL
;
if
(
ini
!=
NULL
)
{
BasicType
ft
=
UseCompressedOops
?
T_NARROWOOP
:
T_OBJECT
;
Node
*
store
=
ini
->
find_captured_store
(
offset
,
type2aelembytes
(
ft
),
igvn
);
if
(
store
!=
NULL
&&
store
->
is_Store
())
value
=
store
->
in
(
MemNode
::
ValueIn
);
}
if
(
value
==
NULL
||
value
!=
ptnode_adr
(
value
->
_idx
)
->
_node
)
{
// A field's initializing value was not recorded. Add NULL.
uint
null_idx
=
UseCompressedOops
?
_noop_null
:
_oop_null
;
add_pointsto_edge
(
ni
,
null_idx
);
}
}
}
// An object is not scalar replaceable if the field which may point
// to it has unknown offset (unknown element of an array of objects).
//
if
(
offset
==
Type
::
OffsetBot
)
{
uint
e_cnt
=
ptn
->
edge_count
();
for
(
uint
ei
=
0
;
ei
<
e_cnt
;
ei
++
)
{
uint
npi
=
ptn
->
edge_target
(
ei
);
set_escape_state
(
npi
,
PointsToNode
::
ArgEscape
);
ptnode_adr
(
npi
)
->
_scalar_replaceable
=
false
;
}
}
// Currently an object is not scalar replaceable if a LoadStore node
// access its field since the field value is unknown after it.
//
bool
has_LoadStore
=
false
;
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
Node
*
use
=
n
->
fast_out
(
i
);
if
(
use
->
is_LoadStore
())
{
has_LoadStore
=
true
;
break
;
}
}
// An object is not scalar replaceable if the address points
// to unknown field (unknown element for arrays, offset is OffsetBot).
//
// Or the address may point to more then one object. This may produce
// the false positive result (set scalar_replaceable to false)
// since the flow-insensitive escape analysis can't separate
// the case when stores overwrite the field's value from the case
// when stores happened on different control branches.
//
if
(
ptset_size
>
1
||
ptset_size
!=
0
&&
(
has_LoadStore
||
offset
==
Type
::
OffsetBot
))
{
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
set_escape_state
(
j
.
elem
,
PointsToNode
::
ArgEscape
);
ptnode_adr
(
j
.
elem
)
->
_scalar_replaceable
=
false
;
}
}
// Search for objects which are not scalar replaceable
// and adjust their escape state.
verify_escape_state
(
ni
,
ptset
,
igvn
);
}
}
}
...
...
@@ -1646,6 +1694,150 @@ bool ConnectionGraph::compute_escape() {
return
has_non_escaping_obj
;
}
// Search for objects which are not scalar replaceable.
void
ConnectionGraph
::
verify_escape_state
(
int
nidx
,
VectorSet
&
ptset
,
PhaseTransform
*
phase
)
{
PointsToNode
*
ptn
=
ptnode_adr
(
nidx
);
Node
*
n
=
ptn
->
_node
;
assert
(
n
->
is_AddP
(),
"Should be called for AddP nodes only"
);
// Search for objects which are not scalar replaceable.
// Mark their escape state as ArgEscape to propagate the state
// to referenced objects.
// Note: currently there are no difference in compiler optimizations
// for ArgEscape objects and NoEscape objects which are not
// scalar replaceable.
Compile
*
C
=
_compile
;
int
offset
=
ptn
->
offset
();
Node
*
base
=
get_addp_base
(
n
);
ptset
.
Clear
();
PointsTo
(
ptset
,
base
,
phase
);
int
ptset_size
=
ptset
.
Size
();
// Check if a oop field's initializing value is recorded and add
// a corresponding NULL field's value if it is not recorded.
// Connection Graph does not record a default initialization by NULL
// captured by Initialize node.
//
// Note: it will disable scalar replacement in some cases:
//
// Point p[] = new Point[1];
// p[0] = new Point(); // Will be not scalar replaced
//
// but it will save us from incorrect optimizations in next cases:
//
// Point p[] = new Point[1];
// if ( x ) p[0] = new Point(); // Will be not scalar replaced
//
// Do a simple control flow analysis to distinguish above cases.
//
if
(
offset
!=
Type
::
OffsetBot
&&
ptset_size
==
1
)
{
uint
elem
=
ptset
.
getelem
();
// Allocation node's index
// It does not matter if it is not Allocation node since
// only non-escaping allocations are scalar replaced.
if
(
ptnode_adr
(
elem
)
->
_node
->
is_Allocate
()
&&
ptnode_adr
(
elem
)
->
escape_state
()
==
PointsToNode
::
NoEscape
)
{
AllocateNode
*
alloc
=
ptnode_adr
(
elem
)
->
_node
->
as_Allocate
();
InitializeNode
*
ini
=
alloc
->
initialization
();
// Check only oop fields.
const
Type
*
adr_type
=
n
->
as_AddP
()
->
bottom_type
();
BasicType
basic_field_type
=
T_INT
;
if
(
adr_type
->
isa_instptr
())
{
ciField
*
field
=
C
->
alias_type
(
adr_type
->
isa_instptr
())
->
field
();
if
(
field
!=
NULL
)
{
basic_field_type
=
field
->
layout_type
();
}
else
{
// Ignore non field load (for example, klass load)
}
}
else
if
(
adr_type
->
isa_aryptr
())
{
const
Type
*
elemtype
=
adr_type
->
isa_aryptr
()
->
elem
();
basic_field_type
=
elemtype
->
array_element_basic_type
();
}
else
{
// Raw pointers are used for initializing stores so skip it.
assert
(
adr_type
->
isa_rawptr
()
&&
base
->
is_Proj
()
&&
(
base
->
in
(
0
)
==
alloc
),
"unexpected pointer type"
);
}
if
(
basic_field_type
==
T_OBJECT
||
basic_field_type
==
T_NARROWOOP
||
basic_field_type
==
T_ARRAY
)
{
Node
*
value
=
NULL
;
if
(
ini
!=
NULL
)
{
BasicType
ft
=
UseCompressedOops
?
T_NARROWOOP
:
T_OBJECT
;
Node
*
store
=
ini
->
find_captured_store
(
offset
,
type2aelembytes
(
ft
),
phase
);
if
(
store
!=
NULL
&&
store
->
is_Store
())
{
value
=
store
->
in
(
MemNode
::
ValueIn
);
}
else
if
(
ptn
->
edge_count
()
>
0
)
{
// Are there oop stores?
// Check for a store which follows allocation without branches.
// For example, a volatile field store is not collected
// by Initialize node. TODO: it would be nice to use idom() here.
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
store
=
n
->
fast_out
(
i
);
if
(
store
->
is_Store
()
&&
store
->
in
(
0
)
!=
NULL
)
{
Node
*
ctrl
=
store
->
in
(
0
);
while
(
!
(
ctrl
==
ini
||
ctrl
==
alloc
||
ctrl
==
NULL
||
ctrl
==
C
->
root
()
||
ctrl
==
C
->
top
()
||
ctrl
->
is_Region
()
||
ctrl
->
is_IfTrue
()
||
ctrl
->
is_IfFalse
()))
{
ctrl
=
ctrl
->
in
(
0
);
}
if
(
ctrl
==
ini
||
ctrl
==
alloc
)
{
value
=
store
->
in
(
MemNode
::
ValueIn
);
break
;
}
}
}
}
}
if
(
value
==
NULL
||
value
!=
ptnode_adr
(
value
->
_idx
)
->
_node
)
{
// A field's initializing value was not recorded. Add NULL.
uint
null_idx
=
UseCompressedOops
?
_noop_null
:
_oop_null
;
add_pointsto_edge
(
nidx
,
null_idx
);
}
}
}
}
// An object is not scalar replaceable if the field which may point
// to it has unknown offset (unknown element of an array of objects).
//
if
(
offset
==
Type
::
OffsetBot
)
{
uint
e_cnt
=
ptn
->
edge_count
();
for
(
uint
ei
=
0
;
ei
<
e_cnt
;
ei
++
)
{
uint
npi
=
ptn
->
edge_target
(
ei
);
set_escape_state
(
npi
,
PointsToNode
::
ArgEscape
);
ptnode_adr
(
npi
)
->
_scalar_replaceable
=
false
;
}
}
// Currently an object is not scalar replaceable if a LoadStore node
// access its field since the field value is unknown after it.
//
bool
has_LoadStore
=
false
;
for
(
DUIterator_Fast
imax
,
i
=
n
->
fast_outs
(
imax
);
i
<
imax
;
i
++
)
{
Node
*
use
=
n
->
fast_out
(
i
);
if
(
use
->
is_LoadStore
())
{
has_LoadStore
=
true
;
break
;
}
}
// An object is not scalar replaceable if the address points
// to unknown field (unknown element for arrays, offset is OffsetBot).
//
// Or the address may point to more then one object. This may produce
// the false positive result (set scalar_replaceable to false)
// since the flow-insensitive escape analysis can't separate
// the case when stores overwrite the field's value from the case
// when stores happened on different control branches.
//
if
(
ptset_size
>
1
||
ptset_size
!=
0
&&
(
has_LoadStore
||
offset
==
Type
::
OffsetBot
))
{
for
(
VectorSetI
j
(
&
ptset
);
j
.
test
();
++
j
)
{
set_escape_state
(
j
.
elem
,
PointsToNode
::
ArgEscape
);
ptnode_adr
(
j
.
elem
)
->
_scalar_replaceable
=
false
;
}
}
}
void
ConnectionGraph
::
process_call_arguments
(
CallNode
*
call
,
PhaseTransform
*
phase
)
{
switch
(
call
->
Opcode
())
{
...
...
@@ -1657,6 +1849,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
assert
(
false
,
"should be done already"
);
break
;
#endif
case
Op_CallLeaf
:
case
Op_CallLeafNoFP
:
{
// Stub calls, objects do not escape but they are not scale replaceable.
...
...
@@ -1667,9 +1860,23 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
const
Type
*
at
=
d
->
field_at
(
i
);
Node
*
arg
=
call
->
in
(
i
)
->
uncast
();
const
Type
*
aat
=
phase
->
type
(
arg
);
if
(
!
arg
->
is_top
()
&&
at
->
isa_ptr
()
&&
aat
->
isa_ptr
())
{
if
(
!
arg
->
is_top
()
&&
at
->
isa_ptr
()
&&
aat
->
isa_ptr
()
&&
ptnode_adr
(
arg
->
_idx
)
->
escape_state
()
<
PointsToNode
::
ArgEscape
)
{
assert
(
aat
==
Type
::
TOP
||
aat
==
TypePtr
::
NULL_PTR
||
aat
->
isa_ptr
()
!=
NULL
,
"expecting an Ptr"
);
#ifdef ASSERT
if
(
!
(
call
->
Opcode
()
==
Op_CallLeafNoFP
&&
call
->
as_CallLeaf
()
->
_name
!=
NULL
&&
(
strstr
(
call
->
as_CallLeaf
()
->
_name
,
"arraycopy"
)
!=
0
)
||
call
->
as_CallLeaf
()
->
_name
!=
NULL
&&
(
strcmp
(
call
->
as_CallLeaf
()
->
_name
,
"g1_wb_pre"
)
==
0
||
strcmp
(
call
->
as_CallLeaf
()
->
_name
,
"g1_wb_post"
)
==
0
))
)
{
call
->
dump
();
assert
(
false
,
"EA: unexpected CallLeaf"
);
}
#endif
set_escape_state
(
arg
->
_idx
,
PointsToNode
::
ArgEscape
);
if
(
arg
->
is_AddP
())
{
//
...
...
@@ -1706,9 +1913,10 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
for
(
uint
i
=
TypeFunc
::
Parms
;
i
<
d
->
cnt
();
i
++
)
{
const
Type
*
at
=
d
->
field_at
(
i
);
int
k
=
i
-
TypeFunc
::
Parms
;
Node
*
arg
=
call
->
in
(
i
)
->
uncast
();
if
(
at
->
isa_oopptr
()
!=
NULL
)
{
Node
*
arg
=
call
->
in
(
i
)
->
uncast
();
if
(
at
->
isa_oopptr
()
!=
NULL
&&
ptnode_adr
(
arg
->
_idx
)
->
escape_state
()
<
PointsToNode
::
ArgEscape
)
{
bool
global_escapes
=
false
;
bool
fields_escapes
=
false
;
...
...
@@ -1942,20 +2150,23 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
record_for_optimizer
(
n
);
_processed
.
set
(
n
->
_idx
);
}
else
{
//
Have to process call's arguments first
.
//
Don't mark as processed since call's arguments have to be processed
.
PointsToNode
::
NodeType
nt
=
PointsToNode
::
UnknownType
;
PointsToNode
::
EscapeState
es
=
PointsToNode
::
UnknownEscape
;
// Check if a call returns an object.
const
TypeTuple
*
r
=
n
->
as_Call
()
->
tf
()
->
range
();
if
(
n
->
is_CallStaticJava
()
&&
r
->
cnt
()
>
TypeFunc
::
Parms
&&
if
(
r
->
cnt
()
>
TypeFunc
::
Parms
&&
r
->
field_at
(
TypeFunc
::
Parms
)
->
isa_ptr
()
&&
n
->
as_Call
()
->
proj_out
(
TypeFunc
::
Parms
)
!=
NULL
)
{
// Note: use isa_ptr() instead of isa_oopptr() here because
// the _multianewarray functions return a TypeRawPtr.
if
(
r
->
field_at
(
TypeFunc
::
Parms
)
->
isa_ptr
()
!=
NULL
)
{
nt
=
PointsToNode
::
JavaObject
;
nt
=
PointsToNode
::
JavaObject
;
if
(
!
n
->
is_CallStaticJava
())
{
// Since the called mathod is statically unknown assume
// the worst case that the returned value globally escapes.
es
=
PointsToNode
::
GlobalEscape
;
}
}
add_node
(
n
,
nt
,
PointsToNode
::
UnknownEscape
,
false
);
add_node
(
n
,
nt
,
es
,
false
);
}
return
;
}
...
...
@@ -2088,18 +2299,27 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
}
case
Op_Proj
:
{
// we are only interested in the result projection from a call
// we are only interested in the
oop
result projection from a call
if
(
n
->
as_Proj
()
->
_con
==
TypeFunc
::
Parms
&&
n
->
in
(
0
)
->
is_Call
()
)
{
add_node
(
n
,
PointsToNode
::
LocalVar
,
PointsToNode
::
UnknownEscape
,
false
);
process_call_result
(
n
->
as_Proj
(),
phase
);
if
(
!
_processed
.
test
(
n
->
_idx
))
{
// The call's result may need to be processed later if the call
// returns it's argument and the argument is not processed yet.
_delayed_worklist
.
push
(
n
);
const
TypeTuple
*
r
=
n
->
in
(
0
)
->
as_Call
()
->
tf
()
->
range
();
assert
(
r
->
cnt
()
>
TypeFunc
::
Parms
,
"sanity"
);
if
(
r
->
field_at
(
TypeFunc
::
Parms
)
->
isa_ptr
()
!=
NULL
)
{
add_node
(
n
,
PointsToNode
::
LocalVar
,
PointsToNode
::
UnknownEscape
,
false
);
int
ti
=
n
->
in
(
0
)
->
_idx
;
// The call may not be registered yet (since not all its inputs are registered)
// if this is the projection from backbranch edge of Phi.
if
(
ptnode_adr
(
ti
)
->
node_type
()
!=
PointsToNode
::
UnknownType
)
{
process_call_result
(
n
->
as_Proj
(),
phase
);
}
if
(
!
_processed
.
test
(
n
->
_idx
))
{
// The call's result may need to be processed later if the call
// returns it's argument and the argument is not processed yet.
_delayed_worklist
.
push
(
n
);
}
break
;
}
}
else
{
_processed
.
set
(
n
->
_idx
);
}
_processed
.
set
(
n
->
_idx
);
break
;
}
case
Op_Return
:
...
...
@@ -2160,6 +2380,15 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
}
break
;
}
case
Op_AryEq
:
case
Op_StrComp
:
case
Op_StrEquals
:
case
Op_StrIndexOf
:
{
// char[] arrays passed to string intrinsics are not scalar replaceable.
add_node
(
n
,
PointsToNode
::
UnknownType
,
PointsToNode
::
UnknownEscape
,
false
);
break
;
}
case
Op_ThreadLocal
:
{
add_node
(
n
,
PointsToNode
::
JavaObject
,
PointsToNode
::
ArgEscape
,
true
);
...
...
@@ -2174,6 +2403,7 @@ void ConnectionGraph::record_for_escape_analysis(Node *n, PhaseTransform *phase)
void
ConnectionGraph
::
build_connection_graph
(
Node
*
n
,
PhaseTransform
*
phase
)
{
uint
n_idx
=
n
->
_idx
;
assert
(
ptnode_adr
(
n_idx
)
->
_node
!=
NULL
,
"node should be registered"
);
// Don't set processed bit for AddP, LoadP, StoreP since
// they may need more then one pass to process.
...
...
@@ -2211,6 +2441,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
case
Op_DecodeN
:
{
int
ti
=
n
->
in
(
1
)
->
_idx
;
assert
(
ptnode_adr
(
ti
)
->
node_type
()
!=
PointsToNode
::
UnknownType
,
"all nodes should be registered"
);
if
(
ptnode_adr
(
ti
)
->
node_type
()
==
PointsToNode
::
JavaObject
)
{
add_pointsto_edge
(
n_idx
,
ti
);
}
else
{
...
...
@@ -2250,7 +2481,6 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
#endif
Node
*
adr
=
n
->
in
(
MemNode
::
Address
)
->
uncast
();
const
Type
*
adr_type
=
phase
->
type
(
adr
);
Node
*
adr_base
;
if
(
adr
->
is_AddP
())
{
adr_base
=
get_addp_base
(
adr
);
...
...
@@ -2302,13 +2532,19 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
}
case
Op_Proj
:
{
// we are only interested in the result projection from a call
// we are only interested in the
oop
result projection from a call
if
(
n
->
as_Proj
()
->
_con
==
TypeFunc
::
Parms
&&
n
->
in
(
0
)
->
is_Call
()
)
{
process_call_result
(
n
->
as_Proj
(),
phase
);
assert
(
_processed
.
test
(
n_idx
),
"all call results should be processed"
);
}
else
{
assert
(
false
,
"Op_Proj"
);
assert
(
ptnode_adr
(
n
->
in
(
0
)
->
_idx
)
->
node_type
()
!=
PointsToNode
::
UnknownType
,
"all nodes should be registered"
);
const
TypeTuple
*
r
=
n
->
in
(
0
)
->
as_Call
()
->
tf
()
->
range
();
assert
(
r
->
cnt
()
>
TypeFunc
::
Parms
,
"sanity"
);
if
(
r
->
field_at
(
TypeFunc
::
Parms
)
->
isa_ptr
()
!=
NULL
)
{
process_call_result
(
n
->
as_Proj
(),
phase
);
assert
(
_processed
.
test
(
n_idx
),
"all call results should be processed"
);
break
;
}
}
assert
(
false
,
"Op_Proj"
);
break
;
}
case
Op_Return
:
...
...
@@ -2320,6 +2556,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
}
#endif
int
ti
=
n
->
in
(
TypeFunc
::
Parms
)
->
_idx
;
assert
(
ptnode_adr
(
ti
)
->
node_type
()
!=
PointsToNode
::
UnknownType
,
"node should be registered"
);
if
(
ptnode_adr
(
ti
)
->
node_type
()
==
PointsToNode
::
JavaObject
)
{
add_pointsto_edge
(
n_idx
,
ti
);
}
else
{
...
...
@@ -2354,14 +2591,38 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
}
break
;
}
case
Op_AryEq
:
case
Op_StrComp
:
case
Op_StrEquals
:
case
Op_StrIndexOf
:
{
// char[] arrays passed to string intrinsic do not escape but
// they are not scalar replaceable. Adjust escape state for them.
// Start from in(2) edge since in(1) is memory edge.
for
(
uint
i
=
2
;
i
<
n
->
req
();
i
++
)
{
Node
*
adr
=
n
->
in
(
i
)
->
uncast
();
const
Type
*
at
=
phase
->
type
(
adr
);
if
(
!
adr
->
is_top
()
&&
at
->
isa_ptr
())
{
assert
(
at
==
Type
::
TOP
||
at
==
TypePtr
::
NULL_PTR
||
at
->
isa_ptr
()
!=
NULL
,
"expecting an Ptr"
);
if
(
adr
->
is_AddP
())
{
adr
=
get_addp_base
(
adr
);
}
// Mark as ArgEscape everything "adr" could point to.
set_escape_state
(
adr
->
_idx
,
PointsToNode
::
ArgEscape
);
}
}
_processed
.
set
(
n_idx
);
break
;
}
case
Op_ThreadLocal
:
{
assert
(
false
,
"Op_ThreadLocal"
);
break
;
}
default:
;
// nothing to do
// This method should be called only for EA specific nodes.
ShouldNotReachHere
();
}
}
...
...
src/share/vm/opto/escape.hpp
浏览文件 @
6008e6c9
...
...
@@ -210,6 +210,8 @@ private:
Unique_Node_List
_delayed_worklist
;
// Nodes to be processed before
// the call build_connection_graph().
GrowableArray
<
MergeMemNode
*>
_mergemem_worklist
;
// List of all MergeMem nodes
VectorSet
_processed
;
// Records which nodes have been
// processed.
...
...
@@ -289,7 +291,7 @@ private:
bool
split_AddP
(
Node
*
addp
,
Node
*
base
,
PhaseGVN
*
igvn
);
PhiNode
*
create_split_phi
(
PhiNode
*
orig_phi
,
int
alias_idx
,
GrowableArray
<
PhiNode
*>
&
orig_phi_worklist
,
PhaseGVN
*
igvn
,
bool
&
new_created
);
PhiNode
*
split_memory_phi
(
PhiNode
*
orig_phi
,
int
alias_idx
,
GrowableArray
<
PhiNode
*>
&
orig_phi_worklist
,
PhaseGVN
*
igvn
);
Node
*
find_mem
(
Node
*
mem
,
int
alias_idx
,
PhaseGVN
*
igvn
);
void
move_inst_mem
(
Node
*
n
,
GrowableArray
<
PhiNode
*>
&
orig_phis
,
PhaseGVN
*
igvn
);
Node
*
find_inst_mem
(
Node
*
mem
,
int
alias_idx
,
GrowableArray
<
PhiNode
*>
&
orig_phi_worklist
,
PhaseGVN
*
igvn
);
// Propagate unique types created for unescaped allocated objects
...
...
@@ -298,7 +300,6 @@ private:
// manage entries in _node_map
void
set_map
(
int
idx
,
Node
*
n
)
{
_node_map
.
map
(
idx
,
n
);
}
void
set_map_phi
(
int
idx
,
PhiNode
*
p
)
{
_node_map
.
map
(
idx
,
(
Node
*
)
p
);
}
Node
*
get_map
(
int
idx
)
{
return
_node_map
[
idx
];
}
PhiNode
*
get_map_phi
(
int
idx
)
{
Node
*
phi
=
_node_map
[
idx
];
...
...
@@ -315,6 +316,9 @@ private:
// Set the escape state of a node
void
set_escape_state
(
uint
ni
,
PointsToNode
::
EscapeState
es
);
// Search for objects which are not scalar replaceable.
void
verify_escape_state
(
int
nidx
,
VectorSet
&
ptset
,
PhaseTransform
*
phase
);
public:
ConnectionGraph
(
Compile
*
C
);
...
...
src/share/vm/opto/graphKit.cpp
浏览文件 @
6008e6c9
...
...
@@ -1714,6 +1714,11 @@ void GraphKit::replace_call(CallNode* call, Node* result) {
C
->
gvn_replace_by
(
callprojs
.
catchall_catchproj
,
C
->
top
());
C
->
gvn_replace_by
(
callprojs
.
catchall_memproj
,
C
->
top
());
C
->
gvn_replace_by
(
callprojs
.
catchall_ioproj
,
C
->
top
());
// Replace the old exception object with top
if
(
callprojs
.
exobj
!=
NULL
)
{
C
->
gvn_replace_by
(
callprojs
.
exobj
,
C
->
top
());
}
}
else
{
GraphKit
ekit
(
ejvms
);
...
...
src/share/vm/opto/lcm.cpp
浏览文件 @
6008e6c9
...
...
@@ -616,8 +616,9 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
assert
(
cfg
->
_bbs
[
oop_store
->
_idx
]
->
_dom_depth
<=
this
->
_dom_depth
,
"oop_store must dominate card-mark"
);
}
}
if
(
n
->
is_Mach
()
&&
n
->
as_Mach
()
->
ideal_Opcode
()
==
Op_MemBarAcquire
&&
n
->
req
()
>
TypeFunc
::
Parms
)
{
if
(
n
->
is_Mach
()
&&
n
->
req
()
>
TypeFunc
::
Parms
&&
(
n
->
as_Mach
()
->
ideal_Opcode
()
==
Op_MemBarAcquire
||
n
->
as_Mach
()
->
ideal_Opcode
()
==
Op_MemBarVolatile
)
)
{
// MemBarAcquire could be created without Precedent edge.
// del_req() replaces the specified edge with the last input edge
// and then removes the last edge. If the specified edge > number of
...
...
src/share/vm/opto/macro.cpp
浏览文件 @
6008e6c9
...
...
@@ -316,6 +316,21 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
assert
(
adr_idx
==
Compile
::
AliasIdxRaw
,
"address must match or be raw"
);
}
mem
=
mem
->
in
(
MemNode
::
Memory
);
}
else
if
(
mem
->
is_ClearArray
())
{
if
(
!
ClearArrayNode
::
step_through
(
&
mem
,
alloc
->
_idx
,
phase
))
{
// Can not bypass initialization of the instance
// we are looking.
debug_only
(
intptr_t
offset
;)
assert
(
alloc
==
AllocateNode
::
Ideal_allocation
(
mem
->
in
(
3
),
phase
,
offset
),
"sanity"
);
InitializeNode
*
init
=
alloc
->
as_Allocate
()
->
initialization
();
// We are looking for stored value, return Initialize node
// or memory edge from Allocate node.
if
(
init
!=
NULL
)
return
init
;
else
return
alloc
->
in
(
TypeFunc
::
Memory
);
// It will produce zero value (see callers).
}
// Otherwise skip it (the call updated 'mem' value).
}
else
if
(
mem
->
Opcode
()
==
Op_SCMemProj
)
{
assert
(
mem
->
in
(
0
)
->
is_LoadStore
(),
"sanity"
);
const
TypePtr
*
atype
=
mem
->
in
(
0
)
->
in
(
MemNode
::
Address
)
->
bottom_type
()
->
is_ptr
();
...
...
@@ -823,6 +838,18 @@ void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) {
Node
*
n
=
use
->
last_out
(
k
);
uint
oc2
=
use
->
outcnt
();
if
(
n
->
is_Store
())
{
#ifdef ASSERT
// Verify that there is no dependent MemBarVolatile nodes,
// they should be removed during IGVN, see MemBarNode::Ideal().
for
(
DUIterator_Fast
pmax
,
p
=
n
->
fast_outs
(
pmax
);
p
<
pmax
;
p
++
)
{
Node
*
mb
=
n
->
fast_out
(
p
);
assert
(
mb
->
is_Initialize
()
||
!
mb
->
is_MemBar
()
||
mb
->
req
()
<=
MemBarNode
::
Precedent
||
mb
->
in
(
MemBarNode
::
Precedent
)
!=
n
,
"MemBarVolatile should be eliminated for non-escaping object"
);
}
#endif
_igvn
.
replace_node
(
n
,
n
->
in
(
MemNode
::
Memory
));
}
else
{
eliminate_card_mark
(
n
);
...
...
src/share/vm/opto/memnode.cpp
浏览文件 @
6008e6c9
...
...
@@ -123,6 +123,13 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr,
}
else
{
assert
(
false
,
"unexpected projection"
);
}
}
else
if
(
result
->
is_ClearArray
())
{
if
(
!
ClearArrayNode
::
step_through
(
&
result
,
instance_id
,
phase
))
{
// Can not bypass initialization of the instance
// we are looking for.
break
;
}
// Otherwise skip it (the call updated 'result' value).
}
else
if
(
result
->
is_MergeMem
())
{
result
=
step_through_mergemem
(
phase
,
result
->
as_MergeMem
(),
t_adr
,
NULL
,
tty
);
}
...
...
@@ -537,6 +544,15 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) {
}
else
if
(
mem
->
is_Proj
()
&&
mem
->
in
(
0
)
->
is_MemBar
())
{
mem
=
mem
->
in
(
0
)
->
in
(
TypeFunc
::
Memory
);
continue
;
// (a) advance through independent MemBar memory
}
else
if
(
mem
->
is_ClearArray
())
{
if
(
ClearArrayNode
::
step_through
(
&
mem
,
(
uint
)
addr_t
->
instance_id
(),
phase
))
{
// (the call updated 'mem' value)
continue
;
// (a) advance through independent allocation memory
}
else
{
// Can not bypass initialization of the instance
// we are looking for.
return
mem
;
}
}
else
if
(
mem
->
is_MergeMem
())
{
int
alias_idx
=
phase
->
C
->
get_alias_index
(
adr_type
());
mem
=
mem
->
as_MergeMem
()
->
memory_at
(
alias_idx
);
...
...
@@ -2454,6 +2470,31 @@ Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){
return
mem
;
}
//----------------------------step_through----------------------------------
// Return allocation input memory edge if it is different instance
// or itself if it is the one we are looking for.
bool
ClearArrayNode
::
step_through
(
Node
**
np
,
uint
instance_id
,
PhaseTransform
*
phase
)
{
Node
*
n
=
*
np
;
assert
(
n
->
is_ClearArray
(),
"sanity"
);
intptr_t
offset
;
AllocateNode
*
alloc
=
AllocateNode
::
Ideal_allocation
(
n
->
in
(
3
),
phase
,
offset
);
// This method is called only before Allocate nodes are expanded during
// macro nodes expansion. Before that ClearArray nodes are only generated
// in LibraryCallKit::generate_arraycopy() which follows allocations.
assert
(
alloc
!=
NULL
,
"should have allocation"
);
if
(
alloc
->
_idx
==
instance_id
)
{
// Can not bypass initialization of the instance we are looking for.
return
false
;
}
// Otherwise skip it.
InitializeNode
*
init
=
alloc
->
initialization
();
if
(
init
!=
NULL
)
*
np
=
init
->
in
(
TypeFunc
::
Memory
);
else
*
np
=
alloc
->
in
(
TypeFunc
::
Memory
);
return
true
;
}
//----------------------------clear_memory-------------------------------------
// Generate code to initialize object storage to zero.
Node
*
ClearArrayNode
::
clear_memory
(
Node
*
ctl
,
Node
*
mem
,
Node
*
dest
,
...
...
@@ -2627,7 +2668,30 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node
*
MemBarNode
::
Ideal
(
PhaseGVN
*
phase
,
bool
can_reshape
)
{
return
remove_dead_region
(
phase
,
can_reshape
)
?
this
:
NULL
;
if
(
remove_dead_region
(
phase
,
can_reshape
))
return
this
;
// Eliminate volatile MemBars for scalar replaced objects.
if
(
can_reshape
&&
req
()
==
(
Precedent
+
1
)
&&
(
Opcode
()
==
Op_MemBarAcquire
||
Opcode
()
==
Op_MemBarVolatile
))
{
// Volatile field loads and stores.
Node
*
my_mem
=
in
(
MemBarNode
::
Precedent
);
if
(
my_mem
!=
NULL
&&
my_mem
->
is_Mem
())
{
const
TypeOopPtr
*
t_oop
=
my_mem
->
in
(
MemNode
::
Address
)
->
bottom_type
()
->
isa_oopptr
();
// Check for scalar replaced object reference.
if
(
t_oop
!=
NULL
&&
t_oop
->
is_known_instance_field
()
&&
t_oop
->
offset
()
!=
Type
::
OffsetBot
&&
t_oop
->
offset
()
!=
Type
::
OffsetTop
)
{
// Replace MemBar projections by its inputs.
PhaseIterGVN
*
igvn
=
phase
->
is_IterGVN
();
igvn
->
replace_node
(
proj_out
(
TypeFunc
::
Memory
),
in
(
TypeFunc
::
Memory
));
igvn
->
replace_node
(
proj_out
(
TypeFunc
::
Control
),
in
(
TypeFunc
::
Control
));
// Must return either the original node (now dead) or a new node
// (Do not return a top here, since that would break the uniqueness of top.)
return
new
(
phase
->
C
,
1
)
ConINode
(
TypeInt
::
ZERO
);
}
}
}
return
NULL
;
}
//------------------------------Value------------------------------------------
...
...
src/share/vm/opto/memnode.hpp
浏览文件 @
6008e6c9
...
...
@@ -717,7 +717,10 @@ public:
//------------------------------ClearArray-------------------------------------
class
ClearArrayNode
:
public
Node
{
public:
ClearArrayNode
(
Node
*
ctrl
,
Node
*
arymem
,
Node
*
word_cnt
,
Node
*
base
)
:
Node
(
ctrl
,
arymem
,
word_cnt
,
base
)
{}
ClearArrayNode
(
Node
*
ctrl
,
Node
*
arymem
,
Node
*
word_cnt
,
Node
*
base
)
:
Node
(
ctrl
,
arymem
,
word_cnt
,
base
)
{
init_class_id
(
Class_ClearArray
);
}
virtual
int
Opcode
()
const
;
virtual
const
Type
*
bottom_type
()
const
{
return
Type
::
MEMORY
;
}
// ClearArray modifies array elements, and so affects only the
...
...
@@ -743,6 +746,9 @@ public:
Node
*
start_offset
,
Node
*
end_offset
,
PhaseGVN
*
phase
);
// Return allocation input memory edge if it is different instance
// or itself if it is the one we are looking for.
static
bool
step_through
(
Node
**
np
,
uint
instance_id
,
PhaseTransform
*
phase
);
};
//------------------------------StrComp-------------------------------------
...
...
src/share/vm/opto/node.hpp
浏览文件 @
6008e6c9
...
...
@@ -47,6 +47,7 @@ class CallStaticJavaNode;
class
CatchNode
;
class
CatchProjNode
;
class
CheckCastPPNode
;
class
ClearArrayNode
;
class
CmpNode
;
class
CodeBuffer
;
class
ConstraintCastNode
;
...
...
@@ -599,8 +600,9 @@ public:
DEFINE_CLASS_ID
(
BoxLock
,
Node
,
10
)
DEFINE_CLASS_ID
(
Add
,
Node
,
11
)
DEFINE_CLASS_ID
(
Mul
,
Node
,
12
)
DEFINE_CLASS_ID
(
ClearArray
,
Node
,
13
)
_max_classes
=
ClassMask_
Mul
_max_classes
=
ClassMask_
ClearArray
};
#undef DEFINE_CLASS_ID
...
...
@@ -698,6 +700,7 @@ public:
DEFINE_CLASS_QUERY
(
CatchProj
)
DEFINE_CLASS_QUERY
(
CheckCastPP
)
DEFINE_CLASS_QUERY
(
ConstraintCast
)
DEFINE_CLASS_QUERY
(
ClearArray
)
DEFINE_CLASS_QUERY
(
CMove
)
DEFINE_CLASS_QUERY
(
Cmp
)
DEFINE_CLASS_QUERY
(
CountedLoop
)
...
...
src/share/vm/opto/parse3.cpp
浏览文件 @
6008e6c9
...
...
@@ -240,19 +240,19 @@ void Parse::do_put_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool
// membar is dependent on the store, keeping any other membars generated
// below from floating up past the store.
int
adr_idx
=
C
->
get_alias_index
(
adr_type
);
insert_mem_bar_volatile
(
Op_MemBarVolatile
,
adr_idx
);
insert_mem_bar_volatile
(
Op_MemBarVolatile
,
adr_idx
,
store
);
// Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
// volatile alias indices. Skip this if the membar is redundant.
if
(
adr_idx
!=
Compile
::
AliasIdxBot
)
{
insert_mem_bar_volatile
(
Op_MemBarVolatile
,
Compile
::
AliasIdxBot
);
insert_mem_bar_volatile
(
Op_MemBarVolatile
,
Compile
::
AliasIdxBot
,
store
);
}
// Finally, place alias-index-specific membars for each volatile index
// that isn't the adr_idx membar. Typically there's only 1 or 2.
for
(
int
i
=
Compile
::
AliasIdxRaw
;
i
<
C
->
num_alias_types
();
i
++
)
{
if
(
i
!=
adr_idx
&&
C
->
alias_type
(
i
)
->
is_volatile
())
{
insert_mem_bar_volatile
(
Op_MemBarVolatile
,
i
);
insert_mem_bar_volatile
(
Op_MemBarVolatile
,
i
,
store
);
}
}
}
...
...
test/compiler/6895383/Test.java
0 → 100644
浏览文件 @
6008e6c9
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/**
* @test
* @bug 6895383
* @summary JCK test throws NPE for method compiled with Escape Analysis
*
* @run main/othervm -Xcomp Test
*/
public
class
Test
{
public
static
void
main
(
String
argv
[])
{
Test
test
=
new
Test
();
test
.
testRemove1_IndexOutOfBounds
();
test
.
testAddAll1_IndexOutOfBoundsException
();
}
public
void
testRemove1_IndexOutOfBounds
()
{
CopyOnWriteArrayList
c
=
new
CopyOnWriteArrayList
();
}
public
void
testAddAll1_IndexOutOfBoundsException
()
{
try
{
CopyOnWriteArrayList
c
=
new
CopyOnWriteArrayList
();
c
.
addAll
(-
1
,
new
LinkedList
());
// should throw IndexOutOfBoundsException
}
catch
(
IndexOutOfBoundsException
e
)
{
}
}
}
test/compiler/6896727/Test.java
0 → 100644
浏览文件 @
6008e6c9
/*
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
*/
/*
* @test
* @bug 6896727
* @summary nsk/logging/LoggingPermission/LoggingPermission/logperm002 fails with G1, EscapeAnalisys w/o COOPs
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:+DoEscapeAnalysis -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC Test
*/
public
class
Test
{
final
static
String
testString
=
"abracadabra"
;
public
static
void
main
(
String
args
[])
{
String
params
[][]
=
{
{
"control"
,
testString
}
};
for
(
int
i
=
0
;
i
<
params
.
length
;
i
++)
{
try
{
System
.
out
.
println
(
"Params :"
+
testString
+
" and "
+
params
[
i
][
0
]
+
", "
+
params
[
i
][
1
]);
if
(
params
[
i
][
1
]
==
null
)
{
System
.
exit
(
97
);
}
}
catch
(
Exception
e
)
{}
}
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录