Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
054034d1
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
054034d1
编写于
9月 26, 2014
作者:
A
amurillo
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
e8b26c7e
5a581dd5
变更
36
隐藏空白更改
内联
并排
Showing
36 changed file
with
1253 addition
and
819 deletion
+1253
-819
make/hotspot_version
make/hotspot_version
+1
-1
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+63
-33
src/cpu/sparc/vm/sparc.ad
src/cpu/sparc/vm/sparc.ad
+3
-3
src/share/vm/c1/c1_Canonicalizer.cpp
src/share/vm/c1/c1_Canonicalizer.cpp
+74
-42
src/share/vm/c1/c1_LIRGenerator.cpp
src/share/vm/c1/c1_LIRGenerator.cpp
+65
-40
src/share/vm/classfile/symbolTable.cpp
src/share/vm/classfile/symbolTable.cpp
+2
-2
src/share/vm/classfile/symbolTable.hpp
src/share/vm/classfile/symbolTable.hpp
+6
-6
src/share/vm/compiler/compileBroker.cpp
src/share/vm/compiler/compileBroker.cpp
+6
-0
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp
+288
-274
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp
+25
-183
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+54
-53
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+1
-7
src/share/vm/gc_implementation/g1/g1EvacFailure.hpp
src/share/vm/gc_implementation/g1/g1EvacFailure.hpp
+2
-0
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
+0
-4
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
+0
-5
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+4
-4
src/share/vm/gc_implementation/g1/g1RemSet.hpp
src/share/vm/gc_implementation/g1/g1RemSet.hpp
+2
-2
src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
+1
-0
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+0
-4
src/share/vm/gc_implementation/g1/heapRegion.cpp
src/share/vm/gc_implementation/g1/heapRegion.cpp
+5
-9
src/share/vm/gc_implementation/g1/heapRegion.hpp
src/share/vm/gc_implementation/g1/heapRegion.hpp
+1
-6
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
+22
-83
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
+5
-8
src/share/vm/memory/freeList.cpp
src/share/vm/memory/freeList.cpp
+0
-2
src/share/vm/oops/method.cpp
src/share/vm/oops/method.cpp
+4
-6
src/share/vm/oops/method.hpp
src/share/vm/oops/method.hpp
+7
-5
src/share/vm/utilities/hashtable.cpp
src/share/vm/utilities/hashtable.cpp
+32
-23
src/share/vm/utilities/hashtable.hpp
src/share/vm/utilities/hashtable.hpp
+26
-10
test/Makefile
test/Makefile
+2
-2
test/compiler/unsafe/UnsafeRaw.java
test/compiler/unsafe/UnsafeRaw.java
+140
-0
test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java
test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java
+68
-0
test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java
...untime/SharedArchiveFile/CdsDifferentObjectAlignment.java
+7
-2
test/runtime/SharedArchiveFile/DefaultUseWithClient.java
test/runtime/SharedArchiveFile/DefaultUseWithClient.java
+72
-0
test/runtime/SharedArchiveFile/LimitSharedSizes.java
test/runtime/SharedArchiveFile/LimitSharedSizes.java
+92
-0
test/runtime/SharedArchiveFile/SharedBaseAddress.java
test/runtime/SharedArchiveFile/SharedBaseAddress.java
+77
-0
test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java
test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java
+96
-0
未找到文件。
make/hotspot_version
浏览文件 @
054034d1
...
...
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=1
1
HS_BUILD_NUMBER=1
2
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
...
...
src/cpu/sparc/vm/sharedRuntime_sparc.cpp
浏览文件 @
054034d1
...
...
@@ -1128,51 +1128,82 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// Hoist any int/ptr/long's in the first 6 to int regs.
// Hoist any flt/dbl's in the first 16 dbl regs.
int
j
=
0
;
// Count of actual args, not HALVES
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
,
j
++
)
{
switch
(
sig_bt
[
i
]
)
{
VMRegPair
param_array_reg
;
// location of the argument in the parameter array
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
,
j
++
)
{
param_array_reg
.
set_bad
();
switch
(
sig_bt
[
i
])
{
case
T_BOOLEAN
:
case
T_BYTE
:
case
T_CHAR
:
case
T_INT
:
case
T_SHORT
:
regs
[
i
].
set1
(
int_stk_helper
(
j
)
);
break
;
regs
[
i
].
set1
(
int_stk_helper
(
j
));
break
;
case
T_LONG
:
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting half"
);
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting half"
);
case
T_ADDRESS
:
// raw pointers, like current thread, for VM calls
case
T_ARRAY
:
case
T_OBJECT
:
case
T_METADATA
:
regs
[
i
].
set2
(
int_stk_helper
(
j
)
);
regs
[
i
].
set2
(
int_stk_helper
(
j
)
);
break
;
case
T_FLOAT
:
if
(
j
<
16
)
{
// V9ism: floats go in ODD registers
regs
[
i
].
set1
(
as_FloatRegister
(
1
+
(
j
<<
1
))
->
as_VMReg
());
}
else
{
// V9ism: floats go in ODD stack slot
regs
[
i
].
set1
(
VMRegImpl
::
stack2reg
(
1
+
(
j
<<
1
)));
// Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
// http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
//
// "When a callee prototype exists, and does not indicate variable arguments,
// floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
// will be promoted to floating-point registers"
//
// By "promoted" it means that the argument is located in two places, an unused
// spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
// float register. In most cases, there are 6 or fewer arguments of any type,
// and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
// serve as shadow slots. Per the spec floating point registers %d6 to %d16
// require slots beyond that (up to %sp+BIAS+248).
//
{
// V9ism: floats go in ODD registers and stack slots
int
float_index
=
1
+
(
j
<<
1
);
param_array_reg
.
set1
(
VMRegImpl
::
stack2reg
(
float_index
));
if
(
j
<
16
)
{
regs
[
i
].
set1
(
as_FloatRegister
(
float_index
)
->
as_VMReg
());
}
else
{
regs
[
i
]
=
param_array_reg
;
}
}
break
;
case
T_DOUBLE
:
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting half"
);
if
(
j
<
16
)
{
// V9ism: doubles go in EVEN/ODD regs
regs
[
i
].
set2
(
as_FloatRegister
(
j
<<
1
)
->
as_VMReg
());
}
else
{
// V9ism: doubles go in EVEN/ODD stack slots
regs
[
i
].
set2
(
VMRegImpl
::
stack2reg
(
j
<<
1
));
{
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting half"
);
// V9ism: doubles go in EVEN/ODD regs and stack slots
int
double_index
=
(
j
<<
1
);
param_array_reg
.
set2
(
VMRegImpl
::
stack2reg
(
double_index
));
if
(
j
<
16
)
{
regs
[
i
].
set2
(
as_FloatRegister
(
double_index
)
->
as_VMReg
());
}
else
{
// V9ism: doubles go in EVEN/ODD stack slots
regs
[
i
]
=
param_array_reg
;
}
}
break
;
case
T_VOID
:
regs
[
i
].
set_bad
();
j
--
;
break
;
// Do not count HALVES
case
T_VOID
:
regs
[
i
].
set_bad
();
j
--
;
break
;
// Do not count HALVES
default:
ShouldNotReachHere
();
}
if
(
regs
[
i
].
first
()
->
is_stack
())
{
int
off
=
regs
[
i
].
first
()
->
reg2stack
();
// Keep track of the deepest parameter array slot.
if
(
!
param_array_reg
.
first
()
->
is_valid
())
{
param_array_reg
=
regs
[
i
];
}
if
(
param_array_reg
.
first
()
->
is_stack
())
{
int
off
=
param_array_reg
.
first
()
->
reg2stack
();
if
(
off
>
max_stack_slots
)
max_stack_slots
=
off
;
}
if
(
regs
[
i
]
.
second
()
->
is_stack
())
{
int
off
=
regs
[
i
]
.
second
()
->
reg2stack
();
if
(
param_array_reg
.
second
()
->
is_stack
())
{
int
off
=
param_array_reg
.
second
()
->
reg2stack
();
if
(
off
>
max_stack_slots
)
max_stack_slots
=
off
;
}
}
...
...
@@ -1180,8 +1211,8 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
#else // _LP64
// V8 convention: first 6 things in O-regs, rest on stack.
// Alignment is willy-nilly.
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
switch
(
sig_bt
[
i
]
)
{
for
(
int
i
=
0
;
i
<
total_args_passed
;
i
++
)
{
switch
(
sig_bt
[
i
]
)
{
case
T_ADDRESS
:
// raw pointers, like current thread, for VM calls
case
T_ARRAY
:
case
T_BOOLEAN
:
...
...
@@ -1192,23 +1223,23 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
case
T_OBJECT
:
case
T_METADATA
:
case
T_SHORT
:
regs
[
i
].
set1
(
int_stk_helper
(
i
)
);
regs
[
i
].
set1
(
int_stk_helper
(
i
)
);
break
;
case
T_DOUBLE
:
case
T_LONG
:
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting half"
);
regs
[
i
].
set_pair
(
int_stk_helper
(
i
+
1
),
int_stk_helper
(
i
)
);
assert
(
sig_bt
[
i
+
1
]
==
T_VOID
,
"expecting half"
);
regs
[
i
].
set_pair
(
int_stk_helper
(
i
+
1
),
int_stk_helper
(
i
)
);
break
;
case
T_VOID
:
regs
[
i
].
set_bad
();
break
;
default:
ShouldNotReachHere
();
}
if
(
regs
[
i
].
first
()
->
is_stack
())
{
int
off
=
regs
[
i
].
first
()
->
reg2stack
();
int
off
=
regs
[
i
].
first
()
->
reg2stack
();
if
(
off
>
max_stack_slots
)
max_stack_slots
=
off
;
}
if
(
regs
[
i
].
second
()
->
is_stack
())
{
int
off
=
regs
[
i
].
second
()
->
reg2stack
();
int
off
=
regs
[
i
].
second
()
->
reg2stack
();
if
(
off
>
max_stack_slots
)
max_stack_slots
=
off
;
}
}
...
...
@@ -1357,11 +1388,10 @@ static void object_move(MacroAssembler* masm,
const
Register
rOop
=
src
.
first
()
->
as_Register
();
const
Register
rHandle
=
L5
;
int
oop_slot
=
rOop
->
input_number
()
*
VMRegImpl
::
slots_per_word
+
oop_handle_offset
;
int
offset
=
oop_slot
*
VMRegImpl
::
stack_slot_size
;
Label
skip
;
int
offset
=
oop_slot
*
VMRegImpl
::
stack_slot_size
;
__
st_ptr
(
rOop
,
SP
,
offset
+
STACK_BIAS
);
if
(
is_receiver
)
{
*
receiver_offset
=
oop_slot
*
VMRegImpl
::
stack_slot_size
;
*
receiver_offset
=
offset
;
}
map
->
set_oop
(
VMRegImpl
::
stack2reg
(
oop_slot
));
__
add
(
SP
,
offset
+
STACK_BIAS
,
rHandle
);
...
...
src/cpu/sparc/vm/sparc.ad
浏览文件 @
054034d1
...
...
@@ -1989,7 +1989,7 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
// to implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = false;
// Are floats conerted to double when stored to stack during deoptimization?
// Are floats con
v
erted to double when stored to stack during deoptimization?
// Sparc does not handle callee-save floats.
bool Matcher::float_in_double() { return false; }
...
...
@@ -3218,7 +3218,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
// are owned by the CALLEE. Holes should not be nessecary in the
// incoming area, as the Java calling convention is completely under
// the control of the AD file. Doubles can be sorted and packed to
// avoid holes. Holes in the outgoing arguments may be ne
ssec
ary for
// avoid holes. Holes in the outgoing arguments may be ne
cess
ary for
// varargs C calling conventions.
// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
// even aligned with pad0 as needed.
...
...
@@ -3284,7 +3284,7 @@ frame %{
%}
// Body of function which returns an OptoRegs array locating
// arguments either in registers or in stack slots for callin
// arguments either in registers or in stack slots for callin
g
// C.
c_calling_convention %{
// This is obviously always outgoing
...
...
src/share/vm/c1/c1_Canonicalizer.cpp
浏览文件 @
054034d1
...
...
@@ -327,7 +327,7 @@ void Canonicalizer::do_ShiftOp (ShiftOp* x) {
if
(
t2
->
is_constant
())
{
switch
(
t2
->
tag
())
{
case
intTag
:
if
(
t2
->
as_IntConstant
()
->
value
()
==
0
)
set_canonical
(
x
->
x
());
return
;
case
longTag
:
if
(
t2
->
as_
IntConstant
()
->
value
()
==
0
)
set_canonical
(
x
->
x
());
return
;
case
longTag
:
if
(
t2
->
as_
LongConstant
()
->
value
()
==
(
jlong
)
0
)
set_canonical
(
x
->
x
());
return
;
default
:
ShouldNotReachHere
();
}
}
...
...
@@ -808,28 +808,41 @@ void Canonicalizer::do_ExceptionObject(ExceptionObject* x) {}
static
bool
match_index_and_scale
(
Instruction
*
instr
,
Instruction
**
index
,
int
*
log2_scale
,
Instruction
**
instr_to_unpin
)
{
*
instr_to_unpin
=
NULL
;
// Skip conversion ops
int
*
log2_scale
)
{
// Skip conversion ops. This works only on 32bit because of the implicit l2i that the
// unsafe performs.
#ifndef _LP64
Convert
*
convert
=
instr
->
as_Convert
();
if
(
convert
!=
NULL
)
{
if
(
convert
!=
NULL
&&
convert
->
op
()
==
Bytecodes
::
_i2l
)
{
assert
(
convert
->
value
()
->
type
()
==
intType
,
"invalid input type"
);
instr
=
convert
->
value
();
}
#endif
ShiftOp
*
shift
=
instr
->
as_ShiftOp
();
if
(
shift
!=
NULL
)
{
if
(
shift
->
is_pinned
())
{
*
instr_to_unpin
=
shift
;
if
(
shift
->
op
()
==
Bytecodes
::
_lshl
)
{
assert
(
shift
->
x
()
->
type
()
==
longType
,
"invalid input type"
);
}
else
{
#ifndef _LP64
if
(
shift
->
op
()
==
Bytecodes
::
_ishl
)
{
assert
(
shift
->
x
()
->
type
()
==
intType
,
"invalid input type"
);
}
else
{
return
false
;
}
#else
return
false
;
#endif
}
// Constant shift value?
Constant
*
con
=
shift
->
y
()
->
as_Constant
();
if
(
con
==
NULL
)
return
false
;
// Well-known type and value?
IntConstant
*
val
=
con
->
type
()
->
as_IntConstant
();
if
(
val
==
NULL
)
return
false
;
if
(
shift
->
x
()
->
type
()
!=
intType
)
return
false
;
assert
(
val
!=
NULL
,
"Should be an int constant"
)
;
*
index
=
shift
->
x
();
int
tmp_scale
=
val
->
value
();
if
(
tmp_scale
>=
0
&&
tmp_scale
<
4
)
{
...
...
@@ -842,31 +855,42 @@ static bool match_index_and_scale(Instruction* instr,
ArithmeticOp
*
arith
=
instr
->
as_ArithmeticOp
();
if
(
arith
!=
NULL
)
{
if
(
arith
->
is_pinned
())
{
*
instr_to_unpin
=
arith
;
// See if either arg is a known constant
Constant
*
con
=
arith
->
x
()
->
as_Constant
();
if
(
con
!=
NULL
)
{
*
index
=
arith
->
y
();
}
else
{
con
=
arith
->
y
()
->
as_Constant
();
if
(
con
==
NULL
)
return
false
;
*
index
=
arith
->
x
();
}
long
const_value
;
// Check for integer multiply
if
(
arith
->
op
()
==
Bytecodes
::
_imul
)
{
// See if either arg is a known constant
Constant
*
con
=
arith
->
x
()
->
as_Constant
();
if
(
con
!=
NULL
)
{
*
index
=
arith
->
y
();
if
(
arith
->
op
()
==
Bytecodes
::
_lmul
)
{
assert
((
*
index
)
->
type
()
==
longType
,
"invalid input type"
);
LongConstant
*
val
=
con
->
type
()
->
as_LongConstant
();
assert
(
val
!=
NULL
,
"expecting a long constant"
);
const_value
=
val
->
value
();
}
else
{
#ifndef _LP64
if
(
arith
->
op
()
==
Bytecodes
::
_imul
)
{
assert
((
*
index
)
->
type
()
==
intType
,
"invalid input type"
);
IntConstant
*
val
=
con
->
type
()
->
as_IntConstant
();
assert
(
val
!=
NULL
,
"expecting an int constant"
);
const_value
=
val
->
value
();
}
else
{
con
=
arith
->
y
()
->
as_Constant
();
if
(
con
==
NULL
)
return
false
;
*
index
=
arith
->
x
();
}
if
((
*
index
)
->
type
()
!=
intType
)
return
false
;
// Well-known type and value?
IntConstant
*
val
=
con
->
type
()
->
as_IntConstant
();
if
(
val
==
NULL
)
return
false
;
switch
(
val
->
value
())
{
case
1
:
*
log2_scale
=
0
;
return
true
;
case
2
:
*
log2_scale
=
1
;
return
true
;
case
4
:
*
log2_scale
=
2
;
return
true
;
case
8
:
*
log2_scale
=
3
;
return
true
;
default:
return
false
;
return
false
;
}
#else
return
false
;
#endif
}
switch
(
const_value
)
{
case
1
:
*
log2_scale
=
0
;
return
true
;
case
2
:
*
log2_scale
=
1
;
return
true
;
case
4
:
*
log2_scale
=
2
;
return
true
;
case
8
:
*
log2_scale
=
3
;
return
true
;
default:
return
false
;
}
}
...
...
@@ -879,29 +903,37 @@ static bool match(UnsafeRawOp* x,
Instruction
**
base
,
Instruction
**
index
,
int
*
log2_scale
)
{
Instruction
*
instr_to_unpin
=
NULL
;
ArithmeticOp
*
root
=
x
->
base
()
->
as_ArithmeticOp
();
if
(
root
==
NULL
)
return
false
;
// Limit ourselves to addition for now
if
(
root
->
op
()
!=
Bytecodes
::
_ladd
)
return
false
;
bool
match_found
=
false
;
// Try to find shift or scale op
if
(
match_index_and_scale
(
root
->
y
(),
index
,
log2_scale
,
&
instr_to_unpin
))
{
if
(
match_index_and_scale
(
root
->
y
(),
index
,
log2_scale
))
{
*
base
=
root
->
x
();
}
else
if
(
match_index_and_scale
(
root
->
x
(),
index
,
log2_scale
,
&
instr_to_unpin
))
{
match_found
=
true
;
}
else
if
(
match_index_and_scale
(
root
->
x
(),
index
,
log2_scale
))
{
*
base
=
root
->
y
();
}
else
if
(
root
->
y
()
->
as_Convert
()
!=
NULL
)
{
match_found
=
true
;
}
else
if
(
NOT_LP64
(
root
->
y
()
->
as_Convert
()
!=
NULL
)
LP64_ONLY
(
false
))
{
// Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs.
// 64bit needs a real sign-extending conversion.
Convert
*
convert
=
root
->
y
()
->
as_Convert
();
if
(
convert
->
op
()
==
Bytecodes
::
_i2l
&&
convert
->
value
()
->
type
()
==
intType
)
{
if
(
convert
->
op
()
==
Bytecodes
::
_i2l
)
{
assert
(
convert
->
value
()
->
type
()
==
intType
,
"should be an int"
);
// pick base and index, setting scale at 1
*
base
=
root
->
x
();
*
index
=
convert
->
value
();
*
log2_scale
=
0
;
}
else
{
return
false
;
match_found
=
true
;
}
}
else
{
// doesn't match any expected sequences
return
false
;
}
// The default solution
if
(
!
match_found
)
{
*
base
=
root
->
x
();
*
index
=
root
->
y
();
*
log2_scale
=
0
;
}
// If the value is pinned then it will be always be computed so
...
...
src/share/vm/c1/c1_LIRGenerator.cpp
浏览文件 @
054034d1
...
...
@@ -2042,6 +2042,8 @@ void LIRGenerator::do_RoundFP(RoundFP* x) {
}
}
// Here UnsafeGetRaw may have x->base() and x->index() be int or long
// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
void
LIRGenerator
::
do_UnsafeGetRaw
(
UnsafeGetRaw
*
x
)
{
LIRItem
base
(
x
->
base
(),
this
);
LIRItem
idx
(
this
);
...
...
@@ -2056,50 +2058,73 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
int
log2_scale
=
0
;
if
(
x
->
has_index
())
{
assert
(
x
->
index
()
->
type
()
->
tag
()
==
intTag
,
"should not find non-int index"
);
log2_scale
=
x
->
log2_scale
();
}
assert
(
!
x
->
has_index
()
||
idx
.
value
()
==
x
->
index
(),
"should match"
);
LIR_Opr
base_op
=
base
.
result
();
LIR_Opr
index_op
=
idx
.
result
();
#ifndef _LP64
if
(
x
->
base
()
->
type
()
->
tag
()
==
longTag
)
{
base_op
=
new_register
(
T_INT
);
__
convert
(
Bytecodes
::
_l2i
,
base
.
result
(),
base_op
);
}
else
{
assert
(
x
->
base
()
->
type
()
->
tag
()
==
intTag
,
"must be"
);
}
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
longTag
)
{
LIR_Opr
long_index_op
=
index_op
;
if
(
x
->
index
()
->
type
()
->
is_constant
())
{
long_index_op
=
new_register
(
T_LONG
);
__
move
(
index_op
,
long_index_op
);
}
index_op
=
new_register
(
T_INT
);
__
convert
(
Bytecodes
::
_l2i
,
long_index_op
,
index_op
);
}
else
{
assert
(
x
->
index
()
->
type
()
->
tag
()
==
intTag
,
"must be"
);
}
}
// At this point base and index should be all ints.
assert
(
base_op
->
type
()
==
T_INT
&&
!
base_op
->
is_constant
(),
"base should be an non-constant int"
);
assert
(
!
x
->
has_index
()
||
index_op
->
type
()
==
T_INT
,
"index should be an int"
);
#else
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
intTag
)
{
if
(
!
x
->
index
()
->
type
()
->
is_constant
())
{
index_op
=
new_register
(
T_LONG
);
__
convert
(
Bytecodes
::
_i2l
,
idx
.
result
(),
index_op
);
}
}
else
{
assert
(
x
->
index
()
->
type
()
->
tag
()
==
longTag
,
"must be"
);
if
(
x
->
index
()
->
type
()
->
is_constant
())
{
index_op
=
new_register
(
T_LONG
);
__
move
(
idx
.
result
(),
index_op
);
}
}
}
// At this point base is a long non-constant
// Index is a long register or a int constant.
// We allow the constant to stay an int because that would allow us a more compact encoding by
// embedding an immediate offset in the address expression. If we have a long constant, we have to
// move it into a register first.
assert
(
base_op
->
type
()
==
T_LONG
&&
!
base_op
->
is_constant
(),
"base must be a long non-constant"
);
assert
(
!
x
->
has_index
()
||
(
index_op
->
type
()
==
T_INT
&&
index_op
->
is_constant
())
||
(
index_op
->
type
()
==
T_LONG
&&
!
index_op
->
is_constant
()),
"unexpected index type"
);
#endif
BasicType
dst_type
=
x
->
basic_type
();
LIR_Opr
index_op
=
idx
.
result
();
LIR_Address
*
addr
;
if
(
index_op
->
is_constant
())
{
assert
(
log2_scale
==
0
,
"must not have a scale"
);
assert
(
index_op
->
type
()
==
T_INT
,
"only int constants supported"
);
addr
=
new
LIR_Address
(
base_op
,
index_op
->
as_jint
(),
dst_type
);
}
else
{
#ifdef X86
#ifdef _LP64
if
(
!
index_op
->
is_illegal
()
&&
index_op
->
type
()
==
T_INT
)
{
LIR_Opr
tmp
=
new_pointer_register
();
__
convert
(
Bytecodes
::
_i2l
,
index_op
,
tmp
);
index_op
=
tmp
;
}
#endif
addr
=
new
LIR_Address
(
base_op
,
index_op
,
LIR_Address
::
Scale
(
log2_scale
),
0
,
dst_type
);
#elif defined(ARM)
addr
=
generate_address
(
base_op
,
index_op
,
log2_scale
,
0
,
dst_type
);
#else
if
(
index_op
->
is_illegal
()
||
log2_scale
==
0
)
{
#ifdef _LP64
if
(
!
index_op
->
is_illegal
()
&&
index_op
->
type
()
==
T_INT
)
{
LIR_Opr
tmp
=
new_pointer_register
();
__
convert
(
Bytecodes
::
_i2l
,
index_op
,
tmp
);
index_op
=
tmp
;
}
#endif
addr
=
new
LIR_Address
(
base_op
,
index_op
,
dst_type
);
}
else
{
LIR_Opr
tmp
=
new_pointer_register
();
...
...
@@ -2126,7 +2151,6 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
BasicType
type
=
x
->
basic_type
();
if
(
x
->
has_index
())
{
assert
(
x
->
index
()
->
type
()
->
tag
()
==
intTag
,
"should not find non-int index"
);
log2_scale
=
x
->
log2_scale
();
}
...
...
@@ -2149,38 +2173,39 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
set_no_result
(
x
);
LIR_Opr
base_op
=
base
.
result
();
LIR_Opr
index_op
=
idx
.
result
();
#ifndef _LP64
if
(
x
->
base
()
->
type
()
->
tag
()
==
longTag
)
{
base_op
=
new_register
(
T_INT
);
__
convert
(
Bytecodes
::
_l2i
,
base
.
result
(),
base_op
);
}
else
{
assert
(
x
->
base
()
->
type
()
->
tag
()
==
intTag
,
"must be"
);
}
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
longTag
)
{
index_op
=
new_register
(
T_INT
);
__
convert
(
Bytecodes
::
_l2i
,
idx
.
result
(),
index_op
);
}
}
// At this point base and index should be all ints and not constants
assert
(
base_op
->
type
()
==
T_INT
&&
!
base_op
->
is_constant
(),
"base should be an non-constant int"
);
assert
(
!
x
->
has_index
()
||
(
index_op
->
type
()
==
T_INT
&&
!
index_op
->
is_constant
()),
"index should be an non-constant int"
);
#else
if
(
x
->
has_index
())
{
if
(
x
->
index
()
->
type
()
->
tag
()
==
intTag
)
{
index_op
=
new_register
(
T_LONG
);
__
convert
(
Bytecodes
::
_i2l
,
idx
.
result
(),
index_op
);
}
}
// At this point base and index are long and non-constant
assert
(
base_op
->
type
()
==
T_LONG
&&
!
base_op
->
is_constant
(),
"base must be a non-constant long"
);
assert
(
!
x
->
has_index
()
||
(
index_op
->
type
()
==
T_LONG
&&
!
index_op
->
is_constant
()),
"index must be a non-constant long"
);
#endif
LIR_Opr
index_op
=
idx
.
result
();
if
(
log2_scale
!=
0
)
{
// temporary fix (platform dependent code without shift on Intel would be better)
index_op
=
new_pointer_register
();
#ifdef _LP64
if
(
idx
.
result
()
->
type
()
==
T_INT
)
{
__
convert
(
Bytecodes
::
_i2l
,
idx
.
result
(),
index_op
);
}
else
{
#endif
// TODO: ARM also allows embedded shift in the address
__
move
(
idx
.
result
(),
index_op
);
#ifdef _LP64
}
#endif
// TODO: ARM also allows embedded shift in the address
__
shift_left
(
index_op
,
log2_scale
,
index_op
);
}
#ifdef _LP64
else
if
(
!
index_op
->
is_illegal
()
&&
index_op
->
type
()
==
T_INT
)
{
LIR_Opr
tmp
=
new_pointer_register
();
__
convert
(
Bytecodes
::
_i2l
,
index_op
,
tmp
);
index_op
=
tmp
;
}
#endif
LIR_Address
*
addr
=
new
LIR_Address
(
base_op
,
index_op
,
x
->
basic_type
());
__
move
(
value
.
result
(),
addr
);
...
...
src/share/vm/classfile/symbolTable.cpp
浏览文件 @
054034d1
...
...
@@ -205,7 +205,7 @@ Symbol* SymbolTable::lookup(int index, const char* name,
}
}
// If the bucket size is too deep check if this hash code is insufficient.
if
(
count
>=
BasicHashtable
<
mtSymbol
>::
rehash_count
&&
!
needs_rehashing
())
{
if
(
count
>=
rehash_count
&&
!
needs_rehashing
())
{
_needs_rehashing
=
check_rehash_table
(
count
);
}
return
NULL
;
...
...
@@ -656,7 +656,7 @@ oop StringTable::lookup(int index, jchar* name,
}
}
// If the bucket size is too deep check if this hash code is insufficient.
if
(
count
>=
BasicHashtable
<
mtSymbol
>::
rehash_count
&&
!
needs_rehashing
())
{
if
(
count
>=
rehash_count
&&
!
needs_rehashing
())
{
_needs_rehashing
=
check_rehash_table
(
count
);
}
return
NULL
;
...
...
src/share/vm/classfile/symbolTable.hpp
浏览文件 @
054034d1
...
...
@@ -74,7 +74,7 @@ class TempNewSymbol : public StackObj {
operator
Symbol
*
()
{
return
_temp
;
}
};
class
SymbolTable
:
public
Hashtable
<
Symbol
*
,
mtSymbol
>
{
class
SymbolTable
:
public
Rehashable
Hashtable
<
Symbol
*
,
mtSymbol
>
{
friend
class
VMStructs
;
friend
class
ClassFileParser
;
...
...
@@ -110,10 +110,10 @@ private:
Symbol
*
lookup
(
int
index
,
const
char
*
name
,
int
len
,
unsigned
int
hash
);
SymbolTable
()
:
Hashtable
<
Symbol
*
,
mtSymbol
>
(
SymbolTableSize
,
sizeof
(
HashtableEntry
<
Symbol
*
,
mtSymbol
>
))
{}
:
Rehashable
Hashtable
<
Symbol
*
,
mtSymbol
>
(
SymbolTableSize
,
sizeof
(
HashtableEntry
<
Symbol
*
,
mtSymbol
>
))
{}
SymbolTable
(
HashtableBucket
<
mtSymbol
>*
t
,
int
number_of_entries
)
:
Hashtable
<
Symbol
*
,
mtSymbol
>
(
SymbolTableSize
,
sizeof
(
HashtableEntry
<
Symbol
*
,
mtSymbol
>
),
t
,
:
Rehashable
Hashtable
<
Symbol
*
,
mtSymbol
>
(
SymbolTableSize
,
sizeof
(
HashtableEntry
<
Symbol
*
,
mtSymbol
>
),
t
,
number_of_entries
)
{}
// Arena for permanent symbols (null class loader) that are never unloaded
...
...
@@ -252,7 +252,7 @@ public:
static
int
parallel_claimed_index
()
{
return
_parallel_claimed_idx
;
}
};
class
StringTable
:
public
Hashtable
<
oop
,
mtSymbol
>
{
class
StringTable
:
public
Rehashable
Hashtable
<
oop
,
mtSymbol
>
{
friend
class
VMStructs
;
private:
...
...
@@ -278,11 +278,11 @@ private:
// in the range [start_idx, end_idx).
static
void
buckets_unlink_or_oops_do
(
BoolObjectClosure
*
is_alive
,
OopClosure
*
f
,
int
start_idx
,
int
end_idx
,
int
*
processed
,
int
*
removed
);
StringTable
()
:
Hashtable
<
oop
,
mtSymbol
>
((
int
)
StringTableSize
,
StringTable
()
:
Rehashable
Hashtable
<
oop
,
mtSymbol
>
((
int
)
StringTableSize
,
sizeof
(
HashtableEntry
<
oop
,
mtSymbol
>
))
{}
StringTable
(
HashtableBucket
<
mtSymbol
>*
t
,
int
number_of_entries
)
:
Hashtable
<
oop
,
mtSymbol
>
((
int
)
StringTableSize
,
sizeof
(
HashtableEntry
<
oop
,
mtSymbol
>
),
t
,
:
Rehashable
Hashtable
<
oop
,
mtSymbol
>
((
int
)
StringTableSize
,
sizeof
(
HashtableEntry
<
oop
,
mtSymbol
>
),
t
,
number_of_entries
)
{}
public:
// The string table
...
...
src/share/vm/compiler/compileBroker.cpp
浏览文件 @
054034d1
...
...
@@ -1175,6 +1175,12 @@ void CompileBroker::compile_method_base(methodHandle method,
return
;
}
if
(
TieredCompilation
)
{
// Tiered policy requires MethodCounters to exist before adding a method to
// the queue. Create if we don't have them yet.
method
->
get_method_counters
(
thread
);
}
// Outputs from the following MutexLocker block:
CompileTask
*
task
=
NULL
;
bool
blocking
=
false
;
...
...
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp
浏览文件 @
054034d1
...
...
@@ -22,372 +22,386 @@
*
*/
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/nmethod.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/heap.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/hashtable.inline.hpp"
#include "utilities/stack.inline.hpp"
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
G1CodeRootChunk
::
G1CodeRootChunk
()
:
_top
(
NULL
),
_next
(
NULL
),
_prev
(
NULL
),
_free
(
NULL
)
{
_top
=
bottom
()
;
}
class
CodeRootSetTable
:
public
Hashtable
<
nmethod
*
,
mtGC
>
{
friend
class
G1CodeRootSetTest
;
typedef
HashtableEntry
<
nmethod
*
,
mtGC
>
Entry
;
void
G1CodeRootChunk
::
reset
()
{
_next
=
_prev
=
NULL
;
_free
=
NULL
;
_top
=
bottom
();
}
static
CodeRootSetTable
*
volatile
_purge_list
;
void
G1CodeRootChunk
::
nmethods_do
(
CodeBlobClosure
*
cl
)
{
NmethodOrLink
*
cur
=
bottom
();
while
(
cur
!=
_top
)
{
if
(
is_nmethod
(
cur
))
{
cl
->
do_code_blob
(
cur
->
_nmethod
);
}
cur
++
;
CodeRootSetTable
*
_purge_next
;
unsigned
int
compute_hash
(
nmethod
*
nm
)
{
uintptr_t
hash
=
(
uintptr_t
)
nm
;
return
hash
^
(
hash
>>
7
);
// code heap blocks are 128byte aligned
}
}
bool
G1CodeRootChunk
::
remove_lock_free
(
nmethod
*
method
)
{
NmethodOrLink
*
cur
=
bottom
(
);
void
remove_entry
(
Entry
*
e
,
Entry
*
previous
);
Entry
*
new_entry
(
nmethod
*
nm
);
for
(
NmethodOrLink
*
cur
=
bottom
();
cur
!=
_top
;
cur
++
)
{
if
(
cur
->
_nmethod
==
method
)
{
bool
result
=
Atomic
::
cmpxchg_ptr
(
NULL
,
&
cur
->
_nmethod
,
method
)
==
method
;
public:
CodeRootSetTable
(
int
size
)
:
Hashtable
<
nmethod
*
,
mtGC
>
(
size
,
sizeof
(
Entry
)),
_purge_next
(
NULL
)
{}
~
CodeRootSetTable
()
;
if
(
!
result
)
{
// Someone else cleared out this entry.
return
false
;
}
// Needs to be protected locks
bool
add
(
nmethod
*
nm
);
bool
remove
(
nmethod
*
nm
);
// The method was cleared. Time to link it into the free list.
NmethodOrLink
*
prev_free
;
do
{
prev_free
=
(
NmethodOrLink
*
)
_free
;
cur
->
_link
=
prev_free
;
}
while
(
Atomic
::
cmpxchg_ptr
(
cur
,
&
_free
,
prev_free
)
!=
prev_free
);
// Can be called without locking
bool
contains
(
nmethod
*
nm
);
return
true
;
}
}
int
entry_size
()
const
{
return
BasicHashtable
<
mtGC
>::
entry_size
();
}
return
false
;
}
void
copy_to
(
CodeRootSetTable
*
new_table
)
;
void
nmethods_do
(
CodeBlobClosure
*
blk
);
G1CodeRootChunkManager
::
G1CodeRootChunkManager
()
:
_free_list
(),
_num_chunks_handed_out
(
0
)
{
_free_list
.
initialize
();
_free_list
.
set_size
(
G1CodeRootChunk
::
word_size
());
}
template
<
typename
CB
>
int
remove_if
(
CB
&
should_remove
);
size_t
G1CodeRootChunkManager
::
fl_mem_size
()
{
return
_free_list
.
count
()
*
_free_list
.
size
();
}
static
void
purge_list_append
(
CodeRootSetTable
*
tbl
);
static
void
purge
();
void
G1CodeRootChunkManager
::
free_all_chunks
(
FreeList
<
G1CodeRootChunk
>*
list
)
{
_num_chunks_handed_out
-=
list
->
count
(
);
_free_list
.
prepend
(
list
);
}
static
size_t
static_mem_size
(
)
{
return
sizeof
(
_purge_list
);
}
}
;
void
G1CodeRootChunkManager
::
free_chunk
(
G1CodeRootChunk
*
chunk
)
{
_free_list
.
return_chunk_at_head
(
chunk
);
_num_chunks_handed_out
--
;
}
CodeRootSetTable
*
volatile
CodeRootSetTable
::
_purge_list
=
NULL
;
void
G1CodeRootChunkManager
::
purge_chunks
(
size_t
keep_ratio
)
{
size_t
keep
=
_num_chunks_handed_out
*
keep_ratio
/
100
;
if
(
keep
>=
(
size_t
)
_free_list
.
count
())
{
return
;
CodeRootSetTable
::
Entry
*
CodeRootSetTable
::
new_entry
(
nmethod
*
nm
)
{
unsigned
int
hash
=
compute_hash
(
nm
);
Entry
*
entry
=
(
Entry
*
)
new_entry_free_list
();
if
(
entry
==
NULL
)
{
entry
=
(
Entry
*
)
NEW_C_HEAP_ARRAY2
(
char
,
entry_size
(),
mtGC
,
CURRENT_PC
);
}
entry
->
set_next
(
NULL
);
entry
->
set_hash
(
hash
);
entry
->
set_literal
(
nm
);
return
entry
;
}
FreeList
<
G1CodeRootChunk
>
temp
;
temp
.
initialize
();
temp
.
set_size
(
G1CodeRootChunk
::
word_size
());
_free_list
.
getFirstNChunksFromList
((
size_t
)
_free_list
.
count
()
-
keep
,
&
temp
);
void
CodeRootSetTable
::
remove_entry
(
Entry
*
e
,
Entry
*
previous
)
{
int
index
=
hash_to_index
(
e
->
hash
());
assert
((
e
==
bucket
(
index
))
==
(
previous
==
NULL
),
"if e is the first entry then previous should be null"
);
G1CodeRootChunk
*
cur
=
temp
.
get_chunk_at_head
();
while
(
cur
!=
NULL
)
{
delete
cur
;
cur
=
temp
.
get_chunk_at_head
(
);
if
(
previous
==
NULL
)
{
set_entry
(
index
,
e
->
next
());
}
else
{
previous
->
set_next
(
e
->
next
()
);
}
free_entry
(
e
);
}
size_t
G1CodeRootChunkManager
::
static_mem_size
()
{
return
sizeof
(
G1CodeRootChunkManager
);
CodeRootSetTable
::~
CodeRootSetTable
()
{
for
(
int
index
=
0
;
index
<
table_size
();
++
index
)
{
for
(
Entry
*
e
=
bucket
(
index
);
e
!=
NULL
;
)
{
Entry
*
to_remove
=
e
;
// read next before freeing.
e
=
e
->
next
();
unlink_entry
(
to_remove
);
FREE_C_HEAP_ARRAY
(
char
,
to_remove
,
mtGC
);
}
}
assert
(
number_of_entries
()
==
0
,
"should have removed all entries"
);
free_buckets
();
for
(
BasicHashtableEntry
<
mtGC
>*
e
=
new_entry_free_list
();
e
!=
NULL
;
e
=
new_entry_free_list
())
{
FREE_C_HEAP_ARRAY
(
char
,
e
,
mtGC
);
}
}
G1CodeRootChunk
*
G1CodeRootChunkManager
::
new_chunk
()
{
G1CodeRootChunk
*
result
=
_free_list
.
get_chunk_at_head
();
if
(
result
==
NULL
)
{
result
=
new
G1CodeRootChunk
();
bool
CodeRootSetTable
::
add
(
nmethod
*
nm
)
{
if
(
!
contains
(
nm
))
{
Entry
*
e
=
new_entry
(
nm
);
int
index
=
hash_to_index
(
e
->
hash
());
add_entry
(
index
,
e
);
return
true
;
}
_num_chunks_handed_out
++
;
result
->
reset
();
return
result
;
return
false
;
}
#ifndef PRODUCT
bool
CodeRootSetTable
::
contains
(
nmethod
*
nm
)
{
int
index
=
hash_to_index
(
compute_hash
(
nm
));
for
(
Entry
*
e
=
bucket
(
index
);
e
!=
NULL
;
e
=
e
->
next
())
{
if
(
e
->
literal
()
==
nm
)
{
return
true
;
}
}
return
false
;
}
size_t
G1CodeRootChunkManager
::
num_chunks_handed_out
()
const
{
return
_num_chunks_handed_out
;
bool
CodeRootSetTable
::
remove
(
nmethod
*
nm
)
{
int
index
=
hash_to_index
(
compute_hash
(
nm
));
Entry
*
previous
=
NULL
;
for
(
Entry
*
e
=
bucket
(
index
);
e
!=
NULL
;
previous
=
e
,
e
=
e
->
next
())
{
if
(
e
->
literal
()
==
nm
)
{
remove_entry
(
e
,
previous
);
return
true
;
}
}
return
false
;
}
size_t
G1CodeRootChunkManager
::
num_free_chunks
()
const
{
return
(
size_t
)
_free_list
.
count
();
void
CodeRootSetTable
::
copy_to
(
CodeRootSetTable
*
new_table
)
{
for
(
int
index
=
0
;
index
<
table_size
();
++
index
)
{
for
(
Entry
*
e
=
bucket
(
index
);
e
!=
NULL
;
e
=
e
->
next
())
{
new_table
->
add
(
e
->
literal
());
}
}
new_table
->
copy_freelist
(
this
);
}
#endif
void
CodeRootSetTable
::
nmethods_do
(
CodeBlobClosure
*
blk
)
{
for
(
int
index
=
0
;
index
<
table_size
();
++
index
)
{
for
(
Entry
*
e
=
bucket
(
index
);
e
!=
NULL
;
e
=
e
->
next
())
{
blk
->
do_code_blob
(
e
->
literal
());
}
}
}
G1CodeRootChunkManager
G1CodeRootSet
::
_default_chunk_manager
;
template
<
typename
CB
>
int
CodeRootSetTable
::
remove_if
(
CB
&
should_remove
)
{
int
num_removed
=
0
;
for
(
int
index
=
0
;
index
<
table_size
();
++
index
)
{
Entry
*
previous
=
NULL
;
Entry
*
e
=
bucket
(
index
);
while
(
e
!=
NULL
)
{
Entry
*
next
=
e
->
next
();
if
(
should_remove
(
e
->
literal
()))
{
remove_entry
(
e
,
previous
);
++
num_removed
;
}
else
{
previous
=
e
;
}
e
=
next
;
}
}
return
num_removed
;
}
void
G1CodeRootSet
::
purge_chunks
(
size_t
keep_ratio
)
{
_default_chunk_manager
.
purge_chunks
(
keep_ratio
)
;
G1CodeRootSet
::~
G1CodeRootSet
(
)
{
delete
_table
;
}
size_t
G1CodeRootSet
::
free_chunks_static_mem_siz
e
()
{
return
_default_chunk_manager
.
static_mem_size
(
);
CodeRootSetTable
*
G1CodeRootSet
::
load_acquire_tabl
e
()
{
return
(
CodeRootSetTable
*
)
OrderAccess
::
load_ptr_acquire
(
&
_table
);
}
size_t
G1CodeRootSet
::
free_chunks_mem_siz
e
()
{
return
_default_chunk_manager
.
fl_mem_size
(
);
void
G1CodeRootSet
::
allocate_small_tabl
e
()
{
_table
=
new
CodeRootSetTable
(
SmallSize
);
}
G1CodeRootSet
::
G1CodeRootSet
(
G1CodeRootChunkManager
*
manager
)
:
_manager
(
manager
),
_list
(),
_length
(
0
)
{
if
(
_manager
==
NULL
)
{
_manager
=
&
_default_chunk_manager
;
void
CodeRootSetTable
::
purge_list_append
(
CodeRootSetTable
*
table
)
{
for
(;;)
{
table
->
_purge_next
=
_purge_list
;
CodeRootSetTable
*
old
=
(
CodeRootSetTable
*
)
Atomic
::
cmpxchg_ptr
(
table
,
&
_purge_list
,
table
->
_purge_next
);
if
(
old
==
table
->
_purge_next
)
{
break
;
}
}
_list
.
initialize
();
_list
.
set_size
(
G1CodeRootChunk
::
word_size
());
}
G1CodeRootSet
::~
G1CodeRootSet
()
{
clear
();
void
CodeRootSetTable
::
purge
()
{
CodeRootSetTable
*
table
=
_purge_list
;
_purge_list
=
NULL
;
while
(
table
!=
NULL
)
{
CodeRootSetTable
*
to_purge
=
table
;
table
=
table
->
_purge_next
;
delete
to_purge
;
}
}
void
G1CodeRootSet
::
add
(
nmethod
*
method
)
{
if
(
!
contains
(
method
))
{
// Find the first chunk that isn't full.
G1CodeRootChunk
*
cur
=
_list
.
head
();
while
(
cur
!=
NULL
)
{
if
(
!
cur
->
is_full
())
{
break
;
}
cur
=
cur
->
next
();
}
void
G1CodeRootSet
::
move_to_large
()
{
CodeRootSetTable
*
temp
=
new
CodeRootSetTable
(
LargeSize
);
// All chunks are full, get a new chunk.
if
(
cur
==
NULL
)
{
cur
=
new_chunk
();
_list
.
return_chunk_at_head
(
cur
);
}
_table
->
copy_to
(
temp
);
CodeRootSetTable
::
purge_list_append
(
_table
);
// Add the nmethod.
bool
result
=
cur
->
add
(
method
);
OrderAccess
::
release_store_ptr
(
&
_table
,
temp
);
}
guarantee
(
result
,
err_msg
(
"Not able to add nmethod "
PTR_FORMAT
" to newly allocated chunk."
,
method
));
_length
++
;
}
void
G1CodeRootSet
::
purge
()
{
CodeRootSetTable
::
purge
();
}
void
G1CodeRootSet
::
remove_lock_free
(
nmethod
*
method
)
{
G1CodeRootChunk
*
found
=
find
(
method
);
if
(
found
!=
NULL
)
{
bool
result
=
found
->
remove_lock_free
(
method
);
if
(
result
)
{
Atomic
::
dec_ptr
((
volatile
intptr_t
*
)
&
_length
);
}
}
assert
(
!
contains
(
method
),
err_msg
(
PTR_FORMAT
" still contains nmethod "
PTR_FORMAT
,
this
,
method
));
size_t
G1CodeRootSet
::
static_mem_size
()
{
return
CodeRootSetTable
::
static_mem_size
();
}
nmethod
*
G1CodeRootSet
::
pop
()
{
while
(
true
)
{
G1CodeRootChunk
*
cur
=
_list
.
head
();
if
(
cur
==
NULL
)
{
assert
(
_length
==
0
,
"when there are no chunks, there should be no elements"
);
return
NULL
;
}
nmethod
*
result
=
cur
->
pop
();
if
(
result
!=
NULL
)
{
_length
--
;
return
result
;
}
else
{
free
(
_list
.
get_chunk_at_head
());
}
void
G1CodeRootSet
::
add
(
nmethod
*
method
)
{
bool
added
=
false
;
if
(
is_empty
())
{
allocate_small_table
();
}
added
=
_table
->
add
(
method
);
if
(
_length
==
Threshold
)
{
move_to_large
();
}
if
(
added
)
{
++
_length
;
}
}
G1CodeRootChunk
*
G1CodeRootSet
::
find
(
nmethod
*
method
)
{
G1CodeRootChunk
*
cur
=
_list
.
head
();
while
(
cur
!=
NULL
)
{
if
(
cur
->
contains
(
method
))
{
return
cur
;
bool
G1CodeRootSet
::
remove
(
nmethod
*
method
)
{
bool
removed
=
false
;
if
(
_table
!=
NULL
)
{
removed
=
_table
->
remove
(
method
);
}
if
(
removed
)
{
_length
--
;
if
(
_length
==
0
)
{
clear
();
}
cur
=
(
G1CodeRootChunk
*
)
cur
->
next
();
}
return
NULL
;
}
void
G1CodeRootSet
::
free
(
G1CodeRootChunk
*
chunk
)
{
free_chunk
(
chunk
);
return
removed
;
}
bool
G1CodeRootSet
::
contains
(
nmethod
*
method
)
{
return
find
(
method
)
!=
NULL
;
CodeRootSetTable
*
table
=
load_acquire_table
();
if
(
table
!=
NULL
)
{
return
table
->
contains
(
method
);
}
return
false
;
}
void
G1CodeRootSet
::
clear
()
{
free_all_chunks
(
&
_list
);
delete
_table
;
_table
=
NULL
;
_length
=
0
;
}
size_t
G1CodeRootSet
::
mem_size
()
{
return
sizeof
(
*
this
)
+
(
_table
!=
NULL
?
sizeof
(
CodeRootSetTable
)
+
_table
->
entry_size
()
*
_length
:
0
);
}
void
G1CodeRootSet
::
nmethods_do
(
CodeBlobClosure
*
blk
)
const
{
G1CodeRootChunk
*
cur
=
_list
.
head
();
while
(
cur
!=
NULL
)
{
cur
->
nmethods_do
(
blk
);
cur
=
(
G1CodeRootChunk
*
)
cur
->
next
();
if
(
_table
!=
NULL
)
{
_table
->
nmethods_do
(
blk
);
}
}
size_t
G1CodeRootSet
::
static_mem_size
()
{
return
sizeof
(
G1CodeRootSet
);
}
class
CleanCallback
:
public
StackObj
{
class
PointsIntoHRDetectionClosure
:
public
OopClosure
{
HeapRegion
*
_hr
;
public:
bool
_points_into
;
PointsIntoHRDetectionClosure
(
HeapRegion
*
hr
)
:
_hr
(
hr
),
_points_into
(
false
)
{}
size_t
G1CodeRootSet
::
mem_size
(
)
{
return
G1CodeRootSet
::
static_mem_size
()
+
_list
.
count
()
*
_list
.
size
(
);
}
void
do_oop
(
narrowOop
*
o
)
{
do_oop_work
(
o
);
}
#ifndef PRODUCT
void
do_oop
(
oop
*
o
)
{
do_oop_work
(
o
);
}
void
G1CodeRootSet
::
test
()
{
G1CodeRootChunkManager
mgr
;
template
<
typename
T
>
void
do_oop_work
(
T
*
p
)
{
if
(
_hr
->
is_in
(
oopDesc
::
load_decode_heap_oop
(
p
)))
{
_points_into
=
true
;
}
}
};
assert
(
mgr
.
num_chunks_handed_out
()
==
0
,
"Must not have handed out chunks yet"
);
PointsIntoHRDetectionClosure
_detector
;
CodeBlobToOopClosure
_blobs
;
assert
(
G1CodeRootChunkManager
::
static_mem_size
()
>
sizeof
(
void
*
),
err_msg
(
"The chunk manager's static memory usage seems too small, is only "
SIZE_FORMAT
" bytes."
,
G1CodeRootChunkManager
::
static_mem_size
()));
public:
CleanCallback
(
HeapRegion
*
hr
)
:
_detector
(
hr
),
_blobs
(
&
_detector
,
!
CodeBlobToOopClosure
::
FixRelocations
)
{}
// The number of chunks that we allocate for purge testing.
size_t
const
num_chunks
=
10
;
bool
operator
()
(
nmethod
*
nm
)
{
_detector
.
_points_into
=
false
;
_blobs
.
do_code_blob
(
nm
);
return
!
_detector
.
_points_into
;
}
};
void
G1CodeRootSet
::
clean
(
HeapRegion
*
owner
)
{
CleanCallback
should_clean
(
owner
);
if
(
_table
!=
NULL
)
{
int
removed
=
_table
->
remove_if
(
should_clean
);
assert
((
size_t
)
removed
<=
_length
,
"impossible"
);
_length
-=
removed
;
}
if
(
_length
==
0
)
{
clear
();
}
}
{
G1CodeRootSet
set1
(
&
mgr
);
assert
(
set1
.
is_empty
(),
"Code root set must be initially empty but is not."
);
#ifndef PRODUCT
assert
(
G1CodeRootSet
::
static_mem_size
()
>
sizeof
(
void
*
),
err_msg
(
"The code root set's static memory usage seems too small, is only "
SIZE_FORMAT
" bytes"
,
G1CodeRootSet
::
static_mem_size
()));
class
G1CodeRootSetTest
{
public:
static
void
test
()
{
{
G1CodeRootSet
set1
;
assert
(
set1
.
is_empty
(),
"Code root set must be initially empty but is not."
);
set1
.
add
((
nmethod
*
)
1
);
assert
(
mgr
.
num_chunks_handed_out
()
==
1
,
err_msg
(
"Must have allocated and handed out one chunk, but handed out "
SIZE_FORMAT
" chunks"
,
mgr
.
num_chunks_handed_out
()));
assert
(
set1
.
length
()
==
1
,
err_msg
(
"Added exactly one element, but set contains "
SIZE_FORMAT
" elements"
,
set1
.
length
()));
assert
(
G1CodeRootSet
::
static_mem_size
()
==
sizeof
(
void
*
),
err_msg
(
"The code root set's static memory usage is incorrect, "
SIZE_FORMAT
" bytes"
,
G1CodeRootSet
::
static_mem_size
()));
// G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which
// we cannot access.
for
(
uint
i
=
0
;
i
<
G1CodeRootChunk
::
word_size
()
+
1
;
i
++
)
{
set1
.
add
((
nmethod
*
)
1
);
}
assert
(
mgr
.
num_chunks_handed_out
()
==
1
,
err_msg
(
"Duplicate detection must have prevented allocation of further "
"chunks but allocated "
SIZE_FORMAT
,
mgr
.
num_chunks_handed_out
()));
assert
(
set1
.
length
()
==
1
,
err_msg
(
"Duplicate detection should not have increased the set size but "
"is "
SIZE_FORMAT
,
set1
.
length
()));
size_t
num_total_after_add
=
G1CodeRootChunk
::
word_size
()
+
1
;
for
(
size_t
i
=
0
;
i
<
num_total_after_add
-
1
;
i
++
)
{
set1
.
add
((
nmethod
*
)(
uintptr_t
)(
2
+
i
));
}
assert
(
mgr
.
num_chunks_handed_out
()
>
1
,
"After adding more code roots, more than one additional chunk should have been handed out"
);
assert
(
set1
.
length
()
==
num_total_after_add
,
err_msg
(
"After adding in total "
SIZE_FORMAT
" distinct code roots, they "
"need to be in the set, but there are only "
SIZE_FORMAT
,
num_total_after_add
,
set1
.
length
()));
size_t
num_popped
=
0
;
while
(
set1
.
pop
()
!=
NULL
)
{
num_popped
++
;
}
assert
(
num_popped
==
num_total_after_add
,
err_msg
(
"Managed to pop "
SIZE_FORMAT
" code roots, but only "
SIZE_FORMAT
" "
"were added"
,
num_popped
,
num_total_after_add
));
assert
(
mgr
.
num_chunks_handed_out
()
==
0
,
err_msg
(
"After popping all elements, all chunks must have been returned "
"but there are still "
SIZE_FORMAT
" additional"
,
mgr
.
num_chunks_handed_out
()));
mgr
.
purge_chunks
(
0
);
assert
(
mgr
.
num_free_chunks
()
==
0
,
err_msg
(
"After purging everything, the free list must be empty but still "
"contains "
SIZE_FORMAT
" chunks"
,
mgr
.
num_free_chunks
()));
// Add some more handed out chunks.
size_t
i
=
0
;
while
(
mgr
.
num_chunks_handed_out
()
<
num_chunks
)
{
set1
.
add
((
nmethod
*
)
i
);
i
++
;
}
assert
(
set1
.
length
()
==
1
,
err_msg
(
"Added exactly one element, but set contains "
SIZE_FORMAT
" elements"
,
set1
.
length
()));
{
// Generate chunks on the free list.
G1CodeRootSet
set2
(
&
mgr
);
size_t
i
=
0
;
while
(
mgr
.
num_chunks_handed_out
()
<
(
num_chunks
*
2
))
{
set2
.
add
((
nmethod
*
)
i
);
i
++
;
const
size_t
num_to_add
=
(
size_t
)
G1CodeRootSet
::
Threshold
+
1
;
for
(
size_t
i
=
1
;
i
<=
num_to_add
;
i
++
)
{
set1
.
add
((
nmethod
*
)
1
);
}
assert
(
set1
.
length
()
==
1
,
err_msg
(
"Duplicate detection should not have increased the set size but "
"is "
SIZE_FORMAT
,
set1
.
length
()));
for
(
size_t
i
=
2
;
i
<=
num_to_add
;
i
++
)
{
set1
.
add
((
nmethod
*
)(
uintptr_t
)(
i
));
}
assert
(
set1
.
length
()
==
num_to_add
,
err_msg
(
"After adding in total "
SIZE_FORMAT
" distinct code roots, they "
"need to be in the set, but there are only "
SIZE_FORMAT
,
num_to_add
,
set1
.
length
()));
assert
(
CodeRootSetTable
::
_purge_list
!=
NULL
,
"should have grown to large hashtable"
);
size_t
num_popped
=
0
;
for
(
size_t
i
=
1
;
i
<=
num_to_add
;
i
++
)
{
bool
removed
=
set1
.
remove
((
nmethod
*
)
i
);
if
(
removed
)
{
num_popped
+=
1
;
}
else
{
break
;
}
}
// Exit of the scope of the set2 object will call the destructor that generates
// num_chunks elements on the free list.
assert
(
num_popped
==
num_to_add
,
err_msg
(
"Managed to pop "
SIZE_FORMAT
" code roots, but only "
SIZE_FORMAT
" "
"were added"
,
num_popped
,
num_to_add
));
assert
(
CodeRootSetTable
::
_purge_list
!=
NULL
,
"should have grown to large hashtable"
);
G1CodeRootSet
::
purge
();
assert
(
CodeRootSetTable
::
_purge_list
==
NULL
,
"should have purged old small tables"
);
}
assert
(
mgr
.
num_chunks_handed_out
()
==
num_chunks
,
err_msg
(
"Deletion of the second set must have resulted in giving back "
"those, but there are still "
SIZE_FORMAT
" additional handed out, expecting "
SIZE_FORMAT
,
mgr
.
num_chunks_handed_out
(),
num_chunks
));
assert
(
mgr
.
num_free_chunks
()
==
num_chunks
,
err_msg
(
"After freeing "
SIZE_FORMAT
" chunks, they must be on the free list "
"but there are only "
SIZE_FORMAT
,
num_chunks
,
mgr
.
num_free_chunks
()));
size_t
const
test_percentage
=
50
;
mgr
.
purge_chunks
(
test_percentage
);
assert
(
mgr
.
num_chunks_handed_out
()
==
num_chunks
,
err_msg
(
"Purging must not hand out chunks but there are "
SIZE_FORMAT
,
mgr
.
num_chunks_handed_out
()));
assert
(
mgr
.
num_free_chunks
()
==
(
size_t
)(
mgr
.
num_chunks_handed_out
()
*
test_percentage
/
100
),
err_msg
(
"Must have purged "
SIZE_FORMAT
" percent of "
SIZE_FORMAT
" chunks"
"but there are "
SIZE_FORMAT
,
test_percentage
,
num_chunks
,
mgr
.
num_free_chunks
()));
// Purge the remainder of the chunks on the free list.
mgr
.
purge_chunks
(
0
);
assert
(
mgr
.
num_free_chunks
()
==
0
,
"Free List must be empty"
);
assert
(
mgr
.
num_chunks_handed_out
()
==
num_chunks
,
err_msg
(
"Expected to be "
SIZE_FORMAT
" chunks handed out from the first set "
"but there are "
SIZE_FORMAT
,
num_chunks
,
mgr
.
num_chunks_handed_out
()));
// Exit of the scope of the set1 object will call the destructor that generates
// num_chunks additional elements on the free list.
}
assert
(
mgr
.
num_chunks_handed_out
()
==
0
,
err_msg
(
"Deletion of the only set must have resulted in no chunks handed "
"out, but there is still "
SIZE_FORMAT
" handed out"
,
mgr
.
num_chunks_handed_out
()));
assert
(
mgr
.
num_free_chunks
()
==
num_chunks
,
err_msg
(
"After freeing "
SIZE_FORMAT
" chunks, they must be on the free list "
"but there are only "
SIZE_FORMAT
,
num_chunks
,
mgr
.
num_free_chunks
()));
// Restore initial state.
mgr
.
purge_chunks
(
0
);
assert
(
mgr
.
num_free_chunks
()
==
0
,
"Free List must be empty"
);
assert
(
mgr
.
num_chunks_handed_out
()
==
0
,
"No additional elements must have been handed out yet"
);
}
}
};
void
TestCodeCacheRemSet_test
()
{
G1CodeRootSet
::
test
();
G1CodeRootSet
Test
::
test
();
}
#endif
src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp
浏览文件 @
054034d1
...
...
@@ -26,222 +26,64 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#include "memory/allocation.hpp"
#include "memory/freeList.hpp"
#include "runtime/globals.hpp"
class
CodeBlobClosure
;
// The elements of the G1CodeRootChunk is either:
// 1) nmethod pointers
// 2) nodes in an internally chained free list
typedef
union
{
nmethod
*
_nmethod
;
void
*
_link
;
}
NmethodOrLink
;
class
G1CodeRootChunk
:
public
CHeapObj
<
mtGC
>
{
private:
static
const
int
NUM_ENTRIES
=
32
;
public:
G1CodeRootChunk
*
_next
;
G1CodeRootChunk
*
_prev
;
NmethodOrLink
*
_top
;
// First free position within the chunk.
volatile
NmethodOrLink
*
_free
;
NmethodOrLink
_data
[
NUM_ENTRIES
];
NmethodOrLink
*
bottom
()
const
{
return
(
NmethodOrLink
*
)
&
(
_data
[
0
]);
}
NmethodOrLink
*
end
()
const
{
return
(
NmethodOrLink
*
)
&
(
_data
[
NUM_ENTRIES
]);
}
bool
is_link
(
NmethodOrLink
*
nmethod_or_link
)
{
return
nmethod_or_link
->
_link
==
NULL
||
(
bottom
()
<=
nmethod_or_link
->
_link
&&
nmethod_or_link
->
_link
<
end
());
}
bool
is_nmethod
(
NmethodOrLink
*
nmethod_or_link
)
{
return
!
is_link
(
nmethod_or_link
);
}
public:
G1CodeRootChunk
();
~
G1CodeRootChunk
()
{}
static
size_t
word_size
()
{
return
(
size_t
)(
align_size_up_
(
sizeof
(
G1CodeRootChunk
),
HeapWordSize
)
/
HeapWordSize
);
}
// FreeList "interface" methods
G1CodeRootChunk
*
next
()
const
{
return
_next
;
}
G1CodeRootChunk
*
prev
()
const
{
return
_prev
;
}
void
set_next
(
G1CodeRootChunk
*
v
)
{
_next
=
v
;
assert
(
v
!=
this
,
"Boom"
);}
void
set_prev
(
G1CodeRootChunk
*
v
)
{
_prev
=
v
;
assert
(
v
!=
this
,
"Boom"
);}
void
clear_next
()
{
set_next
(
NULL
);
}
void
clear_prev
()
{
set_prev
(
NULL
);
}
size_t
size
()
const
{
return
word_size
();
}
void
link_next
(
G1CodeRootChunk
*
ptr
)
{
set_next
(
ptr
);
}
void
link_prev
(
G1CodeRootChunk
*
ptr
)
{
set_prev
(
ptr
);
}
void
link_after
(
G1CodeRootChunk
*
ptr
)
{
link_next
(
ptr
);
if
(
ptr
!=
NULL
)
ptr
->
link_prev
((
G1CodeRootChunk
*
)
this
);
}
bool
is_free
()
{
return
true
;
}
// New G1CodeRootChunk routines
void
reset
();
bool
is_empty
()
const
{
return
_top
==
bottom
();
}
bool
is_full
()
const
{
return
_top
==
end
()
&&
_free
==
NULL
;
}
bool
contains
(
nmethod
*
method
)
{
NmethodOrLink
*
cur
=
bottom
();
while
(
cur
!=
_top
)
{
if
(
cur
->
_nmethod
==
method
)
return
true
;
cur
++
;
}
return
false
;
}
bool
add
(
nmethod
*
method
)
{
if
(
is_full
())
{
return
false
;
}
if
(
_free
!=
NULL
)
{
// Take from internally chained free list
NmethodOrLink
*
first_free
=
(
NmethodOrLink
*
)
_free
;
_free
=
(
NmethodOrLink
*
)
_free
->
_link
;
first_free
->
_nmethod
=
method
;
}
else
{
// Take from top.
_top
->
_nmethod
=
method
;
_top
++
;
}
return
true
;
}
bool
remove_lock_free
(
nmethod
*
method
);
void
nmethods_do
(
CodeBlobClosure
*
blk
);
nmethod
*
pop
()
{
if
(
_free
!=
NULL
)
{
// Kill the free list.
_free
=
NULL
;
}
while
(
!
is_empty
())
{
_top
--
;
if
(
is_nmethod
(
_top
))
{
return
_top
->
_nmethod
;
}
}
return
NULL
;
}
};
// Manages free chunks.
class
G1CodeRootChunkManager
VALUE_OBJ_CLASS_SPEC
{
private:
// Global free chunk list management
FreeList
<
G1CodeRootChunk
>
_free_list
;
// Total number of chunks handed out
size_t
_num_chunks_handed_out
;
public:
G1CodeRootChunkManager
();
G1CodeRootChunk
*
new_chunk
();
void
free_chunk
(
G1CodeRootChunk
*
chunk
);
// Free all elements of the given list.
void
free_all_chunks
(
FreeList
<
G1CodeRootChunk
>*
list
);
void
initialize
();
void
purge_chunks
(
size_t
keep_ratio
);
static
size_t
static_mem_size
();
size_t
fl_mem_size
();
#ifndef PRODUCT
size_t
num_chunks_handed_out
()
const
;
size_t
num_free_chunks
()
const
;
#endif
};
class
CodeRootSetTable
;
class
HeapRegion
;
class
nmethod
;
// Implements storage for a set of code roots.
// All methods that modify the set are not thread-safe except if otherwise noted.
class
G1CodeRootSet
VALUE_OBJ_CLASS_SPEC
{
friend
class
G1CodeRootSetTest
;
private:
// Global default free chunk manager instance.
static
G1CodeRootChunkManager
_default_chunk_manager
;
G1CodeRootChunk
*
new_chunk
()
{
return
_manager
->
new_chunk
();
}
void
free_chunk
(
G1CodeRootChunk
*
chunk
)
{
_manager
->
free_chunk
(
chunk
);
}
// Free all elements of the given list.
void
free_all_chunks
(
FreeList
<
G1CodeRootChunk
>*
list
)
{
_manager
->
free_all_chunks
(
list
);
}
const
static
size_t
SmallSize
=
32
;
const
static
size_t
Threshold
=
24
;
const
static
size_t
LargeSize
=
512
;
// Return the chunk that contains the given nmethod, NULL otherwise.
// Scans the list of chunks backwards, as this method is used to add new
// entries, which are typically added in bulk for a single nmethod.
G1CodeRootChunk
*
find
(
nmethod
*
method
);
void
free
(
G1CodeRootChunk
*
chunk
);
CodeRootSetTable
*
_table
;
CodeRootSetTable
*
load_acquire_table
();
size_t
_length
;
FreeList
<
G1CodeRootChunk
>
_list
;
G1CodeRootChunkManager
*
_manager
;
void
move_to_large
();
void
allocate_small_table
();
public:
// If an instance is initialized with a chunk manager of NULL, use the global
// default one.
G1CodeRootSet
(
G1CodeRootChunkManager
*
manager
=
NULL
);
G1CodeRootSet
()
:
_table
(
NULL
),
_length
(
0
)
{}
~
G1CodeRootSet
();
static
void
purge
_chunks
(
size_t
keep_ratio
);
static
void
purge
(
);
static
size_t
free_chunks_static_mem_size
();
static
size_t
free_chunks_mem_size
();
static
size_t
static_mem_size
();
// Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
// method is likely to be repeatedly called with the same nmethod.
void
add
(
nmethod
*
method
);
void
remove_lock_free
(
nmethod
*
method
);
nmethod
*
pop
();
bool
remove
(
nmethod
*
method
);
// Safe to call without synchronization, but may return false negatives.
bool
contains
(
nmethod
*
method
);
void
clear
();
void
nmethods_do
(
CodeBlobClosure
*
blk
)
const
;
bool
is_empty
()
{
return
length
()
==
0
;
}
// Remove all nmethods which no longer contain pointers into our "owner" region
void
clean
(
HeapRegion
*
owner
);
bool
is_empty
()
{
bool
empty
=
length
()
==
0
;
assert
(
empty
==
(
_table
==
NULL
),
"is empty only if table is deallocated"
);
return
empty
;
}
// Length in elements
size_t
length
()
const
{
return
_length
;
}
// Static data memory size in bytes of this set.
static
size_t
static_mem_size
();
// Memory size in bytes taken by this set.
size_t
mem_size
();
static
void
test
()
PRODUCT_RETURN
;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
054034d1
...
...
@@ -4580,6 +4580,56 @@ class G1KlassScanClosure : public KlassClosure {
}
};
class
G1CodeBlobClosure
:
public
CodeBlobClosure
{
class
HeapRegionGatheringOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1h
;
OopClosure
*
_work
;
nmethod
*
_nm
;
template
<
typename
T
>
void
do_oop_work
(
T
*
p
)
{
_work
->
do_oop
(
p
);
T
oop_or_narrowoop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
oop_or_narrowoop
))
{
oop
o
=
oopDesc
::
decode_heap_oop_not_null
(
oop_or_narrowoop
);
HeapRegion
*
hr
=
_g1h
->
heap_region_containing_raw
(
o
);
assert
(
!
_g1h
->
obj_in_cs
(
o
)
||
hr
->
rem_set
()
->
strong_code_roots_list_contains
(
_nm
),
"if o still in CS then evacuation failed and nm must already be in the remset"
);
hr
->
add_strong_code_root
(
_nm
);
}
}
public:
HeapRegionGatheringOopClosure
(
OopClosure
*
oc
)
:
_g1h
(
G1CollectedHeap
::
heap
()),
_work
(
oc
),
_nm
(
NULL
)
{}
void
do_oop
(
oop
*
o
)
{
do_oop_work
(
o
);
}
void
do_oop
(
narrowOop
*
o
)
{
do_oop_work
(
o
);
}
void
set_nm
(
nmethod
*
nm
)
{
_nm
=
nm
;
}
};
HeapRegionGatheringOopClosure
_oc
;
public:
G1CodeBlobClosure
(
OopClosure
*
oc
)
:
_oc
(
oc
)
{}
void
do_code_blob
(
CodeBlob
*
cb
)
{
nmethod
*
nm
=
cb
->
as_nmethod_or_null
();
if
(
nm
!=
NULL
)
{
if
(
!
nm
->
test_set_oops_do_mark
())
{
_oc
.
set_nm
(
nm
);
nm
->
oops_do
(
&
_oc
);
nm
->
fix_oop_relocations
();
}
}
}
};
class
G1ParTask
:
public
AbstractGangTask
{
protected:
G1CollectedHeap
*
_g1h
;
...
...
@@ -4648,22 +4698,6 @@ public:
}
};
class
G1CodeBlobClosure
:
public
CodeBlobClosure
{
OopClosure
*
_f
;
public:
G1CodeBlobClosure
(
OopClosure
*
f
)
:
_f
(
f
)
{}
void
do_code_blob
(
CodeBlob
*
blob
)
{
nmethod
*
that
=
blob
->
as_nmethod_or_null
();
if
(
that
!=
NULL
)
{
if
(
!
that
->
test_set_oops_do_mark
())
{
that
->
oops_do
(
_f
);
that
->
fix_oop_relocations
();
}
}
}
};
void
work
(
uint
worker_id
)
{
if
(
worker_id
>=
_n_workers
)
return
;
// no work needed this round
...
...
@@ -4854,7 +4888,7 @@ g1_process_roots(OopClosure* scan_non_heap_roots,
g1_policy
()
->
phase_times
()
->
record_satb_filtering_time
(
worker_i
,
satb_filtering_ms
);
// Now scan the complement of the collection set.
MarkingCodeBlobClosure
scavenge_cs_nmethods
(
scan_non_heap_weak_roots
,
CodeBlobToOopClosure
::
FixRelocation
s
);
G1CodeBlobClosure
scavenge_cs_nmethods
(
scan_non_heap_weak_root
s
);
g1_rem_set
()
->
oops_into_collection_set_do
(
scan_rs
,
&
scavenge_cs_nmethods
,
worker_i
);
...
...
@@ -5901,12 +5935,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
hot_card_cache
->
reset_hot_cache
();
hot_card_cache
->
set_use_cache
(
true
);
// Migrate the strong code roots attached to each region in
// the collection set. Ideally we would like to do this
// after we have finished the scanning/evacuation of the
// strong code roots for a particular heap region.
migrate_strong_code_roots
();
purge_code_root_memory
();
if
(
g1_policy
()
->
during_initial_mark_pause
())
{
...
...
@@ -6902,13 +6930,8 @@ class RegisterNMethodOopClosure: public OopClosure {
" starting at "
HR_FORMAT
,
_nm
,
HR_FORMAT_PARAMS
(
hr
),
HR_FORMAT_PARAMS
(
hr
->
humongous_start_region
())));
// HeapRegion::add_strong_code_root() avoids adding duplicate
// entries but having duplicates is OK since we "mark" nmethods
// as visited when we scan the strong code root lists during the GC.
hr
->
add_strong_code_root
(
_nm
);
assert
(
hr
->
rem_set
()
->
strong_code_roots_list_contains
(
_nm
),
err_msg
(
"failed to add code root "
PTR_FORMAT
" to remembered set of region "
HR_FORMAT
,
_nm
,
HR_FORMAT_PARAMS
(
hr
)));
// HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
hr
->
add_strong_code_root_locked
(
_nm
);
}
}
...
...
@@ -6935,9 +6958,6 @@ class UnregisterNMethodOopClosure: public OopClosure {
_nm
,
HR_FORMAT_PARAMS
(
hr
),
HR_FORMAT_PARAMS
(
hr
->
humongous_start_region
())));
hr
->
remove_strong_code_root
(
_nm
);
assert
(
!
hr
->
rem_set
()
->
strong_code_roots_list_contains
(
_nm
),
err_msg
(
"failed to remove code root "
PTR_FORMAT
" of region "
HR_FORMAT
,
_nm
,
HR_FORMAT_PARAMS
(
hr
)));
}
}
...
...
@@ -6965,28 +6985,9 @@ void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
nm
->
oops_do
(
&
reg_cl
,
true
);
}
class
MigrateCodeRootsHeapRegionClosure
:
public
HeapRegionClosure
{
public:
bool
doHeapRegion
(
HeapRegion
*
hr
)
{
assert
(
!
hr
->
isHumongous
(),
err_msg
(
"humongous region "
HR_FORMAT
" should not have been added to collection set"
,
HR_FORMAT_PARAMS
(
hr
)));
hr
->
migrate_strong_code_roots
();
return
false
;
}
};
void
G1CollectedHeap
::
migrate_strong_code_roots
()
{
MigrateCodeRootsHeapRegionClosure
cl
;
double
migrate_start
=
os
::
elapsedTime
();
collection_set_iterate
(
&
cl
);
double
migration_time_ms
=
(
os
::
elapsedTime
()
-
migrate_start
)
*
1000.0
;
g1_policy
()
->
phase_times
()
->
record_strong_code_root_migration_time
(
migration_time_ms
);
}
void
G1CollectedHeap
::
purge_code_root_memory
()
{
double
purge_start
=
os
::
elapsedTime
();
G1CodeRootSet
::
purge
_chunks
(
G1CodeRootsChunkCacheKeepPercent
);
G1CodeRootSet
::
purge
(
);
double
purge_time_ms
=
(
os
::
elapsedTime
()
-
purge_start
)
*
1000.0
;
g1_policy
()
->
phase_times
()
->
record_strong_code_root_purge_time
(
purge_time_ms
);
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
054034d1
/*
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
...
...
@@ -1633,12 +1633,6 @@ public:
// Unregister the given nmethod from the G1 heap
virtual
void
unregister_nmethod
(
nmethod
*
nm
);
// Migrate the nmethods in the code root lists of the regions
// in the collection set to regions in to-space. In the event
// of an evacuation failure, nmethods that reference objects
// that were not successfullly evacuated are not migrated.
void
migrate_strong_code_roots
();
// Free up superfluous code root memory.
void
purge_code_root_memory
();
...
...
src/share/vm/gc_implementation/g1/g1EvacFailure.hpp
浏览文件 @
054034d1
...
...
@@ -217,6 +217,8 @@ public:
_update_rset_cl
->
set_region
(
hr
);
hr
->
object_iterate
(
&
rspc
);
hr
->
rem_set
()
->
clean_strong_code_roots
(
hr
);
hr
->
note_self_forwarding_removal_end
(
during_initial_mark
,
during_conc_mark
,
rspc
.
marked_bytes
());
...
...
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
浏览文件 @
054034d1
...
...
@@ -274,9 +274,6 @@ double G1GCPhaseTimes::accounted_time_ms() {
// Now subtract the time taken to fix up roots in generated code
misc_time_ms
+=
_cur_collection_code_root_fixup_time_ms
;
// Strong code root migration time
misc_time_ms
+=
_cur_strong_code_root_migration_time_ms
;
// Strong code root purge time
misc_time_ms
+=
_cur_strong_code_root_purge_time_ms
;
...
...
@@ -327,7 +324,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
_last_obj_copy_times_ms
.
print
(
1
,
"Object Copy (ms)"
);
}
print_stats
(
1
,
"Code Root Fixup"
,
_cur_collection_code_root_fixup_time_ms
);
print_stats
(
1
,
"Code Root Migration"
,
_cur_strong_code_root_migration_time_ms
);
print_stats
(
1
,
"Code Root Purge"
,
_cur_strong_code_root_purge_time_ms
);
if
(
G1StringDedup
::
is_enabled
())
{
print_stats
(
1
,
"String Dedup Fixup"
,
_cur_string_dedup_fixup_time_ms
,
_active_gc_threads
);
...
...
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
浏览文件 @
054034d1
...
...
@@ -129,7 +129,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double
_cur_collection_par_time_ms
;
double
_cur_collection_code_root_fixup_time_ms
;
double
_cur_strong_code_root_migration_time_ms
;
double
_cur_strong_code_root_purge_time_ms
;
double
_cur_evac_fail_recalc_used
;
...
...
@@ -233,10 +232,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_collection_code_root_fixup_time_ms
=
ms
;
}
void
record_strong_code_root_migration_time
(
double
ms
)
{
_cur_strong_code_root_migration_time_ms
=
ms
;
}
void
record_strong_code_root_purge_time
(
double
ms
)
{
_cur_strong_code_root_purge_time_ms
=
ms
;
}
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
054034d1
...
...
@@ -109,7 +109,7 @@ class ScanRSClosure : public HeapRegionClosure {
G1CollectedHeap
*
_g1h
;
OopsInHeapRegionClosure
*
_oc
;
CodeBlob
ToOop
Closure
*
_code_root_cl
;
CodeBlobClosure
*
_code_root_cl
;
G1BlockOffsetSharedArray
*
_bot_shared
;
G1SATBCardTableModRefBS
*
_ct_bs
;
...
...
@@ -121,7 +121,7 @@ class ScanRSClosure : public HeapRegionClosure {
public:
ScanRSClosure
(
OopsInHeapRegionClosure
*
oc
,
CodeBlob
ToOop
Closure
*
code_root_cl
,
CodeBlobClosure
*
code_root_cl
,
uint
worker_i
)
:
_oc
(
oc
),
_code_root_cl
(
code_root_cl
),
...
...
@@ -241,7 +241,7 @@ public:
};
void
G1RemSet
::
scanRS
(
OopsInHeapRegionClosure
*
oc
,
CodeBlob
ToOop
Closure
*
code_root_cl
,
CodeBlobClosure
*
code_root_cl
,
uint
worker_i
)
{
double
rs_time_start
=
os
::
elapsedTime
();
HeapRegion
*
startRegion
=
_g1
->
start_cset_region_for_worker
(
worker_i
);
...
...
@@ -320,7 +320,7 @@ void G1RemSet::cleanupHRRS() {
}
void
G1RemSet
::
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
oc
,
CodeBlob
ToOop
Closure
*
code_root_cl
,
CodeBlobClosure
*
code_root_cl
,
uint
worker_i
)
{
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset
();
...
...
src/share/vm/gc_implementation/g1/g1RemSet.hpp
浏览文件 @
054034d1
...
...
@@ -96,7 +96,7 @@ public:
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
void
oops_into_collection_set_do
(
OopsInHeapRegionClosure
*
blk
,
CodeBlob
ToOop
Closure
*
code_root_cl
,
CodeBlobClosure
*
code_root_cl
,
uint
worker_i
);
// Prepare for and cleanup after an oops_into_collection_set_do
...
...
@@ -108,7 +108,7 @@ public:
void
cleanup_after_oops_into_collection_set_do
();
void
scanRS
(
OopsInHeapRegionClosure
*
oc
,
CodeBlob
ToOop
Closure
*
code_root_cl
,
CodeBlobClosure
*
code_root_cl
,
uint
worker_i
);
void
updateRS
(
DirtyCardQueue
*
into_cset_dcq
,
uint
worker_i
);
...
...
src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp
浏览文件 @
054034d1
...
...
@@ -253,6 +253,7 @@ public:
size_t
occupied_cards
=
hrrs
->
occupied
();
size_t
code_root_mem_sz
=
hrrs
->
strong_code_roots_mem_size
();
if
(
code_root_mem_sz
>
max_code_root_mem_sz
())
{
_max_code_root_mem_sz
=
code_root_mem_sz
;
_max_code_root_mem_sz_region
=
r
;
}
size_t
code_root_elems
=
hrrs
->
strong_code_roots_list_length
();
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
054034d1
...
...
@@ -285,10 +285,6 @@
product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \
\
experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \
"The amount of code root chunks that should be kept at most " \
"as percentage of already allocated.") \
\
experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \
"Try to reclaim dead large objects at every young GC.") \
\
...
...
src/share/vm/gc_implementation/g1/heapRegion.cpp
浏览文件 @
054034d1
...
...
@@ -549,19 +549,15 @@ void HeapRegion::add_strong_code_root(nmethod* nm) {
hrrs
->
add_strong_code_root
(
nm
);
}
void
HeapRegion
::
remove_strong_code_root
(
nmethod
*
nm
)
{
void
HeapRegion
::
add_strong_code_root_locked
(
nmethod
*
nm
)
{
assert_locked_or_safepoint
(
CodeCache_lock
);
HeapRegionRemSet
*
hrrs
=
rem_set
();
hrrs
->
remove_strong_code_root
(
nm
);
hrrs
->
add_strong_code_root_locked
(
nm
);
}
void
HeapRegion
::
migrate_strong_code_roots
()
{
assert
(
in_collection_set
(),
"only collection set regions"
);
assert
(
!
isHumongous
(),
err_msg
(
"humongous region "
HR_FORMAT
" should not have been added to collection set"
,
HR_FORMAT_PARAMS
(
this
)));
void
HeapRegion
::
remove_strong_code_root
(
nmethod
*
nm
)
{
HeapRegionRemSet
*
hrrs
=
rem_set
();
hrrs
->
migrate_strong_code_roots
(
);
hrrs
->
remove_strong_code_root
(
nm
);
}
void
HeapRegion
::
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
{
...
...
src/share/vm/gc_implementation/g1/heapRegion.hpp
浏览文件 @
054034d1
...
...
@@ -772,14 +772,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void
add_strong_code_root
(
nmethod
*
nm
);
void
add_strong_code_root_locked
(
nmethod
*
nm
);
void
remove_strong_code_root
(
nmethod
*
nm
);
// During a collection, migrate the successfully evacuated
// strong code roots that referenced into this region to the
// new regions that they now point into. Unsuccessfully
// evacuated code roots are not migrated.
void
migrate_strong_code_roots
();
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list for this region
void
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
;
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
浏览文件 @
054034d1
...
...
@@ -923,8 +923,24 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
}
// Code roots support
//
// The code root set is protected by two separate locking schemes
// When at safepoint the per-hrrs lock must be held during modifications
// except when doing a full gc.
// When not at safepoint the CodeCache_lock must be held during modifications.
// When concurrent readers access the contains() function
// (during the evacuation phase) no removals are allowed.
void
HeapRegionRemSet
::
add_strong_code_root
(
nmethod
*
nm
)
{
assert
(
nm
!=
NULL
,
"sanity"
);
// Optimistic unlocked contains-check
if
(
!
_code_roots
.
contains
(
nm
))
{
MutexLockerEx
ml
(
&
_m
,
Mutex
::
_no_safepoint_check_flag
);
add_strong_code_root_locked
(
nm
);
}
}
void
HeapRegionRemSet
::
add_strong_code_root_locked
(
nmethod
*
nm
)
{
assert
(
nm
!=
NULL
,
"sanity"
);
_code_roots
.
add
(
nm
);
}
...
...
@@ -933,98 +949,21 @@ void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
assert
(
nm
!=
NULL
,
"sanity"
);
assert_locked_or_safepoint
(
CodeCache_lock
);
_code_roots
.
remove_lock_free
(
nm
);
MutexLockerEx
ml
(
CodeCache_lock
->
owned_by_self
()
?
NULL
:
&
_m
,
Mutex
::
_no_safepoint_check_flag
);
_code_roots
.
remove
(
nm
);
// Check that there were no duplicates
guarantee
(
!
_code_roots
.
contains
(
nm
),
"duplicate entry found"
);
}
class
NMethodMigrationOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1h
;
HeapRegion
*
_from
;
nmethod
*
_nm
;
uint
_num_self_forwarded
;
template
<
class
T
>
void
do_oop_work
(
T
*
p
)
{
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
if
(
_from
->
is_in
(
obj
))
{
// Reference still points into the source region.
// Since roots are immediately evacuated this means that
// we must have self forwarded the object
assert
(
obj
->
is_forwarded
(),
err_msg
(
"code roots should be immediately evacuated. "
"Ref: "
PTR_FORMAT
", "
"Obj: "
PTR_FORMAT
", "
"Region: "
HR_FORMAT
,
p
,
(
void
*
)
obj
,
HR_FORMAT_PARAMS
(
_from
)));
assert
(
obj
->
forwardee
()
==
obj
,
err_msg
(
"not self forwarded? obj = "
PTR_FORMAT
,
(
void
*
)
obj
));
// The object has been self forwarded.
// Note, if we're during an initial mark pause, there is
// no need to explicitly mark object. It will be marked
// during the regular evacuation failure handling code.
_num_self_forwarded
++
;
}
else
{
// The reference points into a promotion or to-space region
HeapRegion
*
to
=
_g1h
->
heap_region_containing
(
obj
);
to
->
rem_set
()
->
add_strong_code_root
(
_nm
);
}
}
}
public:
NMethodMigrationOopClosure
(
G1CollectedHeap
*
g1h
,
HeapRegion
*
from
,
nmethod
*
nm
)
:
_g1h
(
g1h
),
_from
(
from
),
_nm
(
nm
),
_num_self_forwarded
(
0
)
{}
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
uint
retain
()
{
return
_num_self_forwarded
>
0
;
}
};
void
HeapRegionRemSet
::
migrate_strong_code_roots
()
{
assert
(
hr
()
->
in_collection_set
(),
"only collection set regions"
);
assert
(
!
hr
()
->
isHumongous
(),
err_msg
(
"humongous region "
HR_FORMAT
" should not have been added to the collection set"
,
HR_FORMAT_PARAMS
(
hr
())));
ResourceMark
rm
;
// List of code blobs to retain for this region
GrowableArray
<
nmethod
*>
to_be_retained
(
10
);
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
while
(
!
_code_roots
.
is_empty
())
{
nmethod
*
nm
=
_code_roots
.
pop
();
if
(
nm
!=
NULL
)
{
NMethodMigrationOopClosure
oop_cl
(
g1h
,
hr
(),
nm
);
nm
->
oops_do
(
&
oop_cl
);
if
(
oop_cl
.
retain
())
{
to_be_retained
.
push
(
nm
);
}
}
}
// Now push any code roots we need to retain
assert
(
to_be_retained
.
is_empty
()
||
hr
()
->
evacuation_failed
(),
"Retained nmethod list must be empty or "
"evacuation of this region failed"
);
while
(
to_be_retained
.
is_nonempty
())
{
nmethod
*
nm
=
to_be_retained
.
pop
();
assert
(
nm
!=
NULL
,
"sanity"
);
add_strong_code_root
(
nm
);
}
}
void
HeapRegionRemSet
::
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
{
_code_roots
.
nmethods_do
(
blk
);
}
void
HeapRegionRemSet
::
clean_strong_code_roots
(
HeapRegion
*
hr
)
{
_code_roots
.
clean
(
hr
);
}
size_t
HeapRegionRemSet
::
strong_code_roots_mem_size
()
{
return
_code_roots
.
mem_size
();
}
...
...
src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
浏览文件 @
054034d1
...
...
@@ -353,13 +353,13 @@ public:
// Returns the memory occupancy of all static data structures associated
// with remembered sets.
static
size_t
static_mem_size
()
{
return
OtherRegionsTable
::
static_mem_size
()
+
G1CodeRootSet
::
free_chunks_
static_mem_size
();
return
OtherRegionsTable
::
static_mem_size
()
+
G1CodeRootSet
::
static_mem_size
();
}
// Returns the memory occupancy of all free_list data structures associated
// with remembered sets.
static
size_t
fl_mem_size
()
{
return
OtherRegionsTable
::
fl_mem_size
()
+
G1CodeRootSet
::
free_chunks_mem_size
()
;
return
OtherRegionsTable
::
fl_mem_size
();
}
bool
contains_reference
(
OopOrNarrowOopStar
from
)
const
{
...
...
@@ -369,18 +369,15 @@ public:
// Routines for managing the list of code roots that point into
// the heap region that owns this RSet.
void
add_strong_code_root
(
nmethod
*
nm
);
void
add_strong_code_root_locked
(
nmethod
*
nm
);
void
remove_strong_code_root
(
nmethod
*
nm
);
// During a collection, migrate the successfully evacuated strong
// code roots that referenced into the region that owns this RSet
// to the RSets of the new regions that they now point into.
// Unsuccessfully evacuated code roots are not migrated.
void
migrate_strong_code_roots
();
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list
void
strong_code_roots_do
(
CodeBlobClosure
*
blk
)
const
;
void
clean_strong_code_roots
(
HeapRegion
*
hr
);
// Returns the number of elements in the strong code roots list
size_t
strong_code_roots_list_length
()
const
{
return
_code_roots
.
length
();
...
...
src/share/vm/memory/freeList.cpp
浏览文件 @
054034d1
...
...
@@ -34,7 +34,6 @@
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#endif // INCLUDE_ALL_GCS
// Free list. A FreeList is used to access a linked list of chunks
...
...
@@ -333,5 +332,4 @@ template class FreeList<Metablock>;
template
class
FreeList
<
Metachunk
>;
#if INCLUDE_ALL_GCS
template
class
FreeList
<
FreeChunk
>;
template
class
FreeList
<
G1CodeRootChunk
>;
#endif // INCLUDE_ALL_GCS
src/share/vm/oops/method.cpp
浏览文件 @
054034d1
...
...
@@ -93,7 +93,7 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
set_hidden
(
false
);
set_dont_inline
(
false
);
set_method_data
(
NULL
);
set_method_counters
(
NULL
);
clear_method_counters
(
);
set_vtable_index
(
Method
::
garbage_vtable_index
);
// Fix and bury in Method*
...
...
@@ -117,7 +117,7 @@ void Method::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory
::
free_metadata
(
loader_data
,
method_data
());
set_method_data
(
NULL
);
MetadataFactory
::
free_metadata
(
loader_data
,
method_counters
());
set_method_counters
(
NULL
);
clear_method_counters
(
);
// The nmethod will be gone when we get here.
if
(
code
()
!=
NULL
)
_code
=
NULL
;
}
...
...
@@ -388,9 +388,7 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
methodHandle
mh
(
m
);
ClassLoaderData
*
loader_data
=
mh
->
method_holder
()
->
class_loader_data
();
MethodCounters
*
counters
=
MethodCounters
::
allocate
(
loader_data
,
CHECK_NULL
);
if
(
mh
->
method_counters
()
==
NULL
)
{
mh
->
set_method_counters
(
counters
);
}
else
{
if
(
!
mh
->
init_method_counters
(
counters
))
{
MetadataFactory
::
free_metadata
(
loader_data
,
counters
);
}
return
mh
->
method_counters
();
...
...
@@ -852,7 +850,7 @@ void Method::unlink_method() {
assert
(
!
DumpSharedSpaces
||
_method_data
==
NULL
,
"unexpected method data?"
);
set_method_data
(
NULL
);
set_method_counters
(
NULL
);
clear_method_counters
(
);
}
// Called when the method_holder is getting linked. Setup entrypoints so the method
...
...
src/share/vm/oops/method.hpp
浏览文件 @
054034d1
...
...
@@ -365,11 +365,13 @@ class Method : public Metadata {
return
_method_counters
;
}
void
set_method_counters
(
MethodCounters
*
counters
)
{
// The store into method must be released. On platforms without
// total store order (TSO) the reference may become visible before
// the initialization of data otherwise.
OrderAccess
::
release_store_ptr
((
volatile
void
*
)
&
_method_counters
,
counters
);
void
clear_method_counters
()
{
_method_counters
=
NULL
;
}
bool
init_method_counters
(
MethodCounters
*
counters
)
{
// Try to install a pointer to MethodCounters, return true on success.
return
Atomic
::
cmpxchg_ptr
(
counters
,
(
volatile
void
*
)
&
_method_counters
,
NULL
)
==
NULL
;
}
#ifdef TIERED
...
...
src/share/vm/utilities/hashtable.cpp
浏览文件 @
054034d1
...
...
@@ -36,21 +36,22 @@
#include "utilities/numberSeq.hpp"
// This is a generic hashtable, designed to be used for the symbol
// and string tables.
//
// It is implemented as an open hash table with a fixed number of buckets.
//
// %note:
// - HashtableEntrys are allocated in blocks to reduce the space overhead.
template
<
MEMFLAGS
F
>
BasicHashtableEntry
<
F
>*
BasicHashtable
<
F
>::
new_entry
(
unsigned
int
hashValue
)
{
BasicHashtableEntry
<
F
>*
entry
;
// This hashtable is implemented as an open hash table with a fixed number of buckets.
if
(
_free_list
)
{
template
<
MEMFLAGS
F
>
BasicHashtableEntry
<
F
>*
BasicHashtable
<
F
>::
new_entry_free_list
()
{
BasicHashtableEntry
<
F
>*
entry
=
NULL
;
if
(
_free_list
!=
NULL
)
{
entry
=
_free_list
;
_free_list
=
_free_list
->
next
();
}
else
{
}
return
entry
;
}
// HashtableEntrys are allocated in blocks to reduce the space overhead.
template
<
MEMFLAGS
F
>
BasicHashtableEntry
<
F
>*
BasicHashtable
<
F
>::
new_entry
(
unsigned
int
hashValue
)
{
BasicHashtableEntry
<
F
>*
entry
=
new_entry_free_list
();
if
(
entry
==
NULL
)
{
if
(
_first_free_entry
+
_entry_size
>=
_end_block
)
{
int
block_size
=
MIN2
(
512
,
MAX2
((
int
)
_table_size
/
2
,
(
int
)
_number_of_entries
));
int
len
=
_entry_size
*
block_size
;
...
...
@@ -83,9 +84,9 @@ template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(
// This is somewhat an arbitrary heuristic but if one bucket gets to
// rehash_count which is currently 100, there's probably something wrong.
template
<
MEMFLAGS
F
>
bool
BasicHashtable
<
F
>::
check_rehash_table
(
int
count
)
{
assert
(
table_size
()
!=
0
,
"underflow"
);
if
(
count
>
(((
double
)
number_of_entries
()
/
(
double
)
table_size
())
*
rehash_multiple
))
{
template
<
class
T
,
MEMFLAGS
F
>
bool
RehashableHashtable
<
T
,
F
>::
check_rehash_table
(
int
count
)
{
assert
(
t
his
->
t
able_size
()
!=
0
,
"underflow"
);
if
(
count
>
(((
double
)
this
->
number_of_entries
()
/
(
double
)
this
->
table_size
())
*
rehash_multiple
))
{
// Set a flag for the next safepoint, which should be at some guaranteed
// safepoint interval.
return
true
;
...
...
@@ -93,13 +94,13 @@ template <MEMFLAGS F> bool BasicHashtable<F>::check_rehash_table(int count) {
return
false
;
}
template
<
class
T
,
MEMFLAGS
F
>
juint
Hashtable
<
T
,
F
>::
_seed
=
0
;
template
<
class
T
,
MEMFLAGS
F
>
juint
Rehashable
Hashtable
<
T
,
F
>::
_seed
=
0
;
// Create a new table and using alternate hash code, populate the new table
// with the existing elements. This can be used to change the hash code
// and could in the future change the size of the table.
template
<
class
T
,
MEMFLAGS
F
>
void
Hashtable
<
T
,
F
>::
move_to
(
Hashtable
<
T
,
F
>*
new_table
)
{
template
<
class
T
,
MEMFLAGS
F
>
void
RehashableHashtable
<
T
,
F
>::
move_to
(
Rehashable
Hashtable
<
T
,
F
>*
new_table
)
{
// Initialize the global seed for hashing.
_seed
=
AltHashing
::
compute_seed
();
...
...
@@ -109,7 +110,7 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::move_to(Hashtable<T, F>* ne
// Iterate through the table and create a new entry for the new table
for
(
int
i
=
0
;
i
<
new_table
->
table_size
();
++
i
)
{
for
(
HashtableEntry
<
T
,
F
>*
p
=
bucket
(
i
);
p
!=
NULL
;
)
{
for
(
HashtableEntry
<
T
,
F
>*
p
=
this
->
bucket
(
i
);
p
!=
NULL
;
)
{
HashtableEntry
<
T
,
F
>*
next
=
p
->
next
();
T
string
=
p
->
literal
();
// Use alternate hashing algorithm on the symbol in the first table
...
...
@@ -238,11 +239,11 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
}
}
template
<
class
T
,
MEMFLAGS
F
>
int
Hashtable
<
T
,
F
>::
literal_size
(
Symbol
*
symbol
)
{
template
<
class
T
,
MEMFLAGS
F
>
int
Rehashable
Hashtable
<
T
,
F
>::
literal_size
(
Symbol
*
symbol
)
{
return
symbol
->
size
()
*
HeapWordSize
;
}
template
<
class
T
,
MEMFLAGS
F
>
int
Hashtable
<
T
,
F
>::
literal_size
(
oop
oop
)
{
template
<
class
T
,
MEMFLAGS
F
>
int
Rehashable
Hashtable
<
T
,
F
>::
literal_size
(
oop
oop
)
{
// NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
// and the String.value array is shared by several Strings. However, starting from JDK8,
// the String.value array is not shared anymore.
...
...
@@ -255,12 +256,12 @@ template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(oop oop) {
// Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
// add a new function Hashtable<T, F>::literal_size(MyNewType lit)
template
<
class
T
,
MEMFLAGS
F
>
void
Hashtable
<
T
,
F
>::
dump_table
(
outputStream
*
st
,
const
char
*
table_name
)
{
template
<
class
T
,
MEMFLAGS
F
>
void
Rehashable
Hashtable
<
T
,
F
>::
dump_table
(
outputStream
*
st
,
const
char
*
table_name
)
{
NumberSeq
summary
;
int
literal_bytes
=
0
;
for
(
int
i
=
0
;
i
<
this
->
table_size
();
++
i
)
{
int
count
=
0
;
for
(
HashtableEntry
<
T
,
F
>*
e
=
bucket
(
i
);
for
(
HashtableEntry
<
T
,
F
>*
e
=
this
->
bucket
(
i
);
e
!=
NULL
;
e
=
e
->
next
())
{
count
++
;
literal_bytes
+=
literal_size
(
e
->
literal
());
...
...
@@ -270,7 +271,7 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::dump_table(outputStream* st
double
num_buckets
=
summary
.
num
();
double
num_entries
=
summary
.
sum
();
int
bucket_bytes
=
(
int
)
num_buckets
*
sizeof
(
bucket
(
0
)
);
int
bucket_bytes
=
(
int
)
num_buckets
*
sizeof
(
HashtableBucket
<
F
>
);
int
entry_bytes
=
(
int
)
num_entries
*
sizeof
(
HashtableEntry
<
T
,
F
>
);
int
total_bytes
=
literal_bytes
+
bucket_bytes
+
entry_bytes
;
...
...
@@ -352,12 +353,20 @@ template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load)
#endif
// Explicitly instantiate these types
#if INCLUDE_ALL_GCS
template
class
Hashtable
<
nmethod
*
,
mtGC
>;
template
class
HashtableEntry
<
nmethod
*
,
mtGC
>;
template
class
BasicHashtable
<
mtGC
>;
#endif
template
class
Hashtable
<
ConstantPool
*
,
mtClass
>;
template
class
RehashableHashtable
<
Symbol
*
,
mtSymbol
>;
template
class
RehashableHashtable
<
oopDesc
*
,
mtSymbol
>;
template
class
Hashtable
<
Symbol
*
,
mtSymbol
>;
template
class
Hashtable
<
Klass
*
,
mtClass
>;
template
class
Hashtable
<
oop
,
mtClass
>;
#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
template
class
Hashtable
<
oop
,
mtSymbol
>;
template
class
RehashableHashtable
<
oop
,
mtSymbol
>;
#endif // SOLARIS || CHECK_UNHANDLED_OOPS
template
class
Hashtable
<
oopDesc
*
,
mtSymbol
>;
template
class
Hashtable
<
Symbol
*
,
mtClass
>;
...
...
src/share/vm/utilities/hashtable.hpp
浏览文件 @
054034d1
...
...
@@ -178,11 +178,6 @@ protected:
void
verify_lookup_length
(
double
load
);
#endif
enum
{
rehash_count
=
100
,
rehash_multiple
=
60
};
void
initialize
(
int
table_size
,
int
entry_size
,
int
number_of_entries
);
// Accessor
...
...
@@ -194,12 +189,12 @@ protected:
// The following method is not MT-safe and must be done under lock.
BasicHashtableEntry
<
F
>**
bucket_addr
(
int
i
)
{
return
_buckets
[
i
].
entry_addr
();
}
// Attempt to get an entry from the free list
BasicHashtableEntry
<
F
>*
new_entry_free_list
();
// Table entry management
BasicHashtableEntry
<
F
>*
new_entry
(
unsigned
int
hashValue
);
// Check that the table is unbalanced
bool
check_rehash_table
(
int
count
);
// Used when moving the entry to another table
// Clean up links, but do not add to free_list
void
unlink_entry
(
BasicHashtableEntry
<
F
>*
entry
)
{
...
...
@@ -277,8 +272,30 @@ protected:
return
(
HashtableEntry
<
T
,
F
>**
)
BasicHashtable
<
F
>::
bucket_addr
(
i
);
}
};
template
<
class
T
,
MEMFLAGS
F
>
class
RehashableHashtable
:
public
Hashtable
<
T
,
F
>
{
protected:
enum
{
rehash_count
=
100
,
rehash_multiple
=
60
};
// Check that the table is unbalanced
bool
check_rehash_table
(
int
count
);
public:
RehashableHashtable
(
int
table_size
,
int
entry_size
)
:
Hashtable
<
T
,
F
>
(
table_size
,
entry_size
)
{
}
RehashableHashtable
(
int
table_size
,
int
entry_size
,
HashtableBucket
<
F
>*
buckets
,
int
number_of_entries
)
:
Hashtable
<
T
,
F
>
(
table_size
,
entry_size
,
buckets
,
number_of_entries
)
{
}
// Function to move these elements into the new table.
void
move_to
(
Hashtable
<
T
,
F
>*
new_table
);
void
move_to
(
Rehashable
Hashtable
<
T
,
F
>*
new_table
);
static
bool
use_alternate_hashcode
()
{
return
_seed
!=
0
;
}
static
juint
seed
()
{
return
_seed
;
}
...
...
@@ -292,7 +309,6 @@ protected:
static
int
literal_size
(
ConstantPool
*
cp
)
{
Unimplemented
();
return
0
;}
static
int
literal_size
(
Klass
*
k
)
{
Unimplemented
();
return
0
;}
public:
void
dump_table
(
outputStream
*
st
,
const
char
*
table_name
);
private:
...
...
test/Makefile
浏览文件 @
054034d1
...
...
@@ -180,8 +180,8 @@ ifdef TESTDIRS
JTREG_TESTDIRS
=
$(TESTDIRS)
endif
# Default JTREG to run
(win32 script works for everybody)
JTREG
=
$(JT_HOME)
/
win32/
bin/jtreg
# Default JTREG to run
JTREG
=
$(JT_HOME)
/bin/jtreg
# Option to tell jtreg to not run tests marked with "ignore"
ifeq
($(PLATFORM), windows)
...
...
test/compiler/unsafe/UnsafeRaw.java
0 → 100644
浏览文件 @
054034d1
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8058744
* @summary Invalid pattern-matching of address computations in raw unsafe
* @library /testlibrary
* @run main/othervm -Xbatch UnsafeRaw
*/
import
com.oracle.java.testlibrary.Utils
;
import
java.util.Random
;
public
class
UnsafeRaw
{
public
static
class
Tests
{
public
static
int
int_index
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
int
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
<<
2
));
}
public
static
int
long_index
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
long
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
<<
2
));
}
public
static
int
int_index_back_ashift
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
int
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
>>
2
));
}
public
static
int
int_index_back_lshift
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
int
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
>>>
2
));
}
public
static
int
long_index_back_ashift
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
long
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
>>
2
));
}
public
static
int
long_index_back_lshift
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
long
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
>>>
2
));
}
public
static
int
int_const_12345678_index
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
)
throws
Exception
{
int
idx4
=
0x12345678
;
return
unsafe
.
getInt
(
base
+
idx4
);
}
public
static
int
long_const_1234567890abcdef_index
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
)
throws
Exception
{
long
idx5
=
0x1234567890abcdef
L
;
return
unsafe
.
getInt
(
base
+
idx5
);
}
public
static
int
int_index_mul
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
int
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
*
4
));
}
public
static
int
long_index_mul
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
long
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
*
4
));
}
public
static
int
int_index_mul_scale_16
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
int
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
*
16
));
}
public
static
int
long_index_mul_scale_16
(
sun
.
misc
.
Unsafe
unsafe
,
long
base
,
long
index
)
throws
Exception
{
return
unsafe
.
getInt
(
base
+
(
index
*
16
));
}
}
public
static
void
main
(
String
[]
args
)
throws
Exception
{
sun
.
misc
.
Unsafe
unsafe
=
Utils
.
getUnsafe
();
final
int
array_size
=
128
;
final
int
element_size
=
4
;
final
int
magic
=
0x12345678
;
Random
rnd
=
new
Random
();
long
array
=
unsafe
.
allocateMemory
(
array_size
*
element_size
);
// 128 ints
long
addr
=
array
+
array_size
*
element_size
/
2
;
// something in the middle to work with
unsafe
.
putInt
(
addr
,
magic
);
for
(
int
j
=
0
;
j
<
100000
;
j
++)
{
if
(
Tests
.
int_index
(
unsafe
,
addr
,
0
)
!=
magic
)
throw
new
Exception
();
if
(
Tests
.
long_index
(
unsafe
,
addr
,
0
)
!=
magic
)
throw
new
Exception
();
if
(
Tests
.
int_index_mul
(
unsafe
,
addr
,
0
)
!=
magic
)
throw
new
Exception
();
if
(
Tests
.
long_index_mul
(
unsafe
,
addr
,
0
)
!=
magic
)
throw
new
Exception
();
{
long
idx1
=
rnd
.
nextLong
();
long
addr1
=
addr
-
(
idx1
<<
2
);
if
(
Tests
.
long_index
(
unsafe
,
addr1
,
idx1
)
!=
magic
)
throw
new
Exception
();
}
{
long
idx2
=
rnd
.
nextLong
();
long
addr2
=
addr
-
(
idx2
>>
2
);
if
(
Tests
.
long_index_back_ashift
(
unsafe
,
addr2
,
idx2
)
!=
magic
)
throw
new
Exception
();
}
{
long
idx3
=
rnd
.
nextLong
();
long
addr3
=
addr
-
(
idx3
>>>
2
);
if
(
Tests
.
long_index_back_lshift
(
unsafe
,
addr3
,
idx3
)
!=
magic
)
throw
new
Exception
();
}
{
long
idx4
=
0x12345678
;
long
addr4
=
addr
-
idx4
;
if
(
Tests
.
int_const_12345678_index
(
unsafe
,
addr4
)
!=
magic
)
throw
new
Exception
();
}
{
long
idx5
=
0x1234567890abcdef
L
;
long
addr5
=
addr
-
idx5
;
if
(
Tests
.
long_const_1234567890abcdef_index
(
unsafe
,
addr5
)
!=
magic
)
throw
new
Exception
();
}
{
int
idx6
=
rnd
.
nextInt
();
long
addr6
=
addr
-
(
idx6
>>
2
);
if
(
Tests
.
int_index_back_ashift
(
unsafe
,
addr6
,
idx6
)
!=
magic
)
throw
new
Exception
();
}
{
int
idx7
=
rnd
.
nextInt
();
long
addr7
=
addr
-
(
idx7
>>>
2
);
if
(
Tests
.
int_index_back_lshift
(
unsafe
,
addr7
,
idx7
)
!=
magic
)
throw
new
Exception
();
}
{
int
idx8
=
rnd
.
nextInt
();
long
addr8
=
addr
-
(
idx8
*
16
);
if
(
Tests
.
int_index_mul_scale_16
(
unsafe
,
addr8
,
idx8
)
!=
magic
)
throw
new
Exception
();
}
{
long
idx9
=
rnd
.
nextLong
();
long
addr9
=
addr
-
(
idx9
*
16
);
if
(
Tests
.
long_index_mul_scale_16
(
unsafe
,
addr9
,
idx9
)
!=
magic
)
throw
new
Exception
();
}
}
}
}
test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java
0 → 100644
浏览文件 @
054034d1
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test ArchiveDoesNotExist
* @summary Test how VM handles "file does not exist" situation while
* attempting to use CDS archive. JVM should exit gracefully
* when sharing mode is ON, and continue w/o sharing if sharing
* mode is AUTO.
* @library /testlibrary
* @run main ArchiveDoesNotExist
*/
import
com.oracle.java.testlibrary.*
;
import
java.io.File
;
public
class
ArchiveDoesNotExist
{
public
static
void
main
(
String
[]
args
)
throws
Exception
{
String
fileName
=
"test.jsa"
;
File
cdsFile
=
new
File
(
fileName
);
if
(
cdsFile
.
exists
())
throw
new
RuntimeException
(
"Test error: cds file already exists"
);
// Sharing: on
ProcessBuilder
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=./"
+
fileName
,
"-Xshare:on"
,
"-version"
);
OutputAnalyzer
output
=
new
OutputAnalyzer
(
pb
.
start
());
output
.
shouldContain
(
"Specified shared archive not found"
);
output
.
shouldHaveExitValue
(
1
);
// Sharing: auto
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=./"
+
fileName
,
"-Xshare:auto"
,
"-version"
);
output
=
new
OutputAnalyzer
(
pb
.
start
());
output
.
shouldMatch
(
"(java|openjdk) version"
);
output
.
shouldNotContain
(
"sharing"
);
output
.
shouldHaveExitValue
(
0
);
}
}
test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java
浏览文件 @
054034d1
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013,
2014,
Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -29,6 +29,7 @@
* is different from object alignment for creating a CDS file
* should fail when loading.
* @library /testlibrary
* @bug 8025642
*/
import
com.oracle.java.testlibrary.*
;
...
...
@@ -82,7 +83,11 @@ public class CdsDifferentObjectAlignment {
createAlignment
,
loadAlignment
);
output
.
shouldContain
(
expectedErrorMsg
);
try
{
output
.
shouldContain
(
expectedErrorMsg
);
}
catch
(
RuntimeException
e
)
{
output
.
shouldContain
(
"Unable to use shared archive"
);
}
output
.
shouldHaveExitValue
(
1
);
}
}
test/runtime/SharedArchiveFile/DefaultUseWithClient.java
0 → 100644
浏览文件 @
054034d1
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test DefaultUseWithClient
* @summary Test default behavior of sharing with -client
* @library /testlibrary
* @run main DefaultUseWithClient
* @bug 8032224
*/
import
com.oracle.java.testlibrary.*
;
import
java.io.File
;
public
class
DefaultUseWithClient
{
public
static
void
main
(
String
[]
args
)
throws
Exception
{
String
fileName
=
"test.jsa"
;
// On 32-bit windows CDS should be on by default in "-client" config
// Skip this test on any other platform
boolean
is32BitWindows
=
(
Platform
.
isWindows
()
&&
Platform
.
is32bit
());
if
(!
is32BitWindows
)
{
System
.
out
.
println
(
"Test only applicable on 32-bit Windows. Skipping"
);
return
;
}
// create the archive
ProcessBuilder
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=./"
+
fileName
,
"-Xshare:dump"
);
OutputAnalyzer
output
=
new
OutputAnalyzer
(
pb
.
start
());
output
.
shouldHaveExitValue
(
0
);
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=./"
+
fileName
,
"-client"
,
"-XX:+PrintSharedSpaces"
,
"-version"
);
output
=
new
OutputAnalyzer
(
pb
.
start
());
try
{
output
.
shouldContain
(
"sharing"
);
}
catch
(
RuntimeException
e
)
{
// if sharing failed due to ASLR or similar reasons,
// check whether sharing was attempted at all (UseSharedSpaces)
output
.
shouldContain
(
"UseSharedSpaces:"
);
}
output
.
shouldHaveExitValue
(
0
);
}
}
test/runtime/SharedArchiveFile/LimitSharedSizes.java
0 → 100644
浏览文件 @
054034d1
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test LimitSharedSizes
* @summary Test handling of limits on shared space size
* @library /testlibrary
* @run main LimitSharedSizes
*/
import
com.oracle.java.testlibrary.*
;
public
class
LimitSharedSizes
{
private
static
class
SharedSizeTestData
{
public
String
optionName
;
public
String
optionValue
;
public
String
expectedErrorMsg
;
public
SharedSizeTestData
(
String
name
,
String
value
,
String
msg
)
{
optionName
=
name
;
optionValue
=
value
;
expectedErrorMsg
=
msg
;
}
}
private
static
final
SharedSizeTestData
[]
testTable
=
{
// values in this part of the test table should cause failure
// (shared space sizes are deliberately too small)
new
SharedSizeTestData
(
"-XX:SharedReadOnlySize"
,
"4M"
,
"read only"
),
new
SharedSizeTestData
(
"-XX:SharedReadWriteSize"
,
"4M"
,
"read write"
),
// Known issue, JDK-8038422 (assert() on Windows)
// new SharedSizeTestData("-XX:SharedMiscDataSize", "500k", "miscellaneous data"),
// This will cause a VM crash; commenting out for now; see bug JDK-8038268
// @ignore JDK-8038268
// new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"),
// these values are larger than default ones, but should
// be acceptable and not cause failure
new
SharedSizeTestData
(
"-XX:SharedReadOnlySize"
,
"20M"
,
null
),
new
SharedSizeTestData
(
"-XX:SharedReadWriteSize"
,
"20M"
,
null
),
new
SharedSizeTestData
(
"-XX:SharedMiscDataSize"
,
"20M"
,
null
),
new
SharedSizeTestData
(
"-XX:SharedMiscCodeSize"
,
"20M"
,
null
)
};
public
static
void
main
(
String
[]
args
)
throws
Exception
{
String
fileName
=
"test.jsa"
;
for
(
SharedSizeTestData
td
:
testTable
)
{
String
option
=
td
.
optionName
+
"="
+
td
.
optionValue
;
System
.
out
.
println
(
"testing option <"
+
option
+
">"
);
ProcessBuilder
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=./"
+
fileName
,
option
,
"-Xshare:dump"
);
OutputAnalyzer
output
=
new
OutputAnalyzer
(
pb
.
start
());
if
(
td
.
expectedErrorMsg
!=
null
)
{
output
.
shouldContain
(
"The shared "
+
td
.
expectedErrorMsg
+
" space is not large enough"
);
output
.
shouldHaveExitValue
(
2
);
}
else
{
output
.
shouldNotContain
(
"space is not large enough"
);
output
.
shouldHaveExitValue
(
0
);
}
}
}
}
test/runtime/SharedArchiveFile/SharedBaseAddress.java
0 → 100644
浏览文件 @
054034d1
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test SharedBaseAddress
* @summary Test variety of values for SharedBaseAddress, making sure
* VM handles normal values as well as edge values w/o a crash.
* @library /testlibrary
* @run main SharedBaseAddress
*/
import
com.oracle.java.testlibrary.*
;
public
class
SharedBaseAddress
{
// shared base address test table
private
static
final
String
[]
testTable
=
{
"1g"
,
"8g"
,
"64g"
,
"512g"
,
"4t"
,
"32t"
,
"128t"
,
"0"
,
"1"
,
"64k"
,
"64M"
};
public
static
void
main
(
String
[]
args
)
throws
Exception
{
// Known issue on Solaris-Sparc
// @ignore JDK-8044600
if
(
Platform
.
isSolaris
()
&&
Platform
.
isSparc
())
return
;
for
(
String
testEntry
:
testTable
)
{
System
.
out
.
println
(
"sharedBaseAddress = "
+
testEntry
);
ProcessBuilder
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=test.jsa"
,
"-XX:SharedBaseAddress="
+
testEntry
,
"-Xshare:dump"
);
OutputAnalyzer
output
=
new
OutputAnalyzer
(
pb
.
start
());
output
.
shouldContain
(
"Loading classes to share"
);
try
{
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=test.jsa"
,
"-Xshare:on"
,
"-version"
);
output
=
new
OutputAnalyzer
(
pb
.
start
());
output
.
shouldContain
(
"sharing"
);
output
.
shouldHaveExitValue
(
0
);
}
catch
(
RuntimeException
e
)
{
output
.
shouldContain
(
"Unable to use shared archive"
);
output
.
shouldHaveExitValue
(
1
);
}
}
}
}
test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java
0 → 100644
浏览文件 @
054034d1
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test SpaceUtilizationCheck
* @summary Check if the space utilization for shared spaces is adequate
* @library /testlibrary
* @run main SpaceUtilizationCheck
*/
import
com.oracle.java.testlibrary.*
;
import
java.util.regex.Pattern
;
import
java.util.regex.Matcher
;
import
java.util.ArrayList
;
import
java.lang.Integer
;
public
class
SpaceUtilizationCheck
{
// Minimum allowed utilization value (percent)
// The goal is to have this number to be 50% for RO and RW regions
// Once that feature is implemented, increase the MIN_UTILIZATION to 50
private
static
final
int
MIN_UTILIZATION
=
30
;
// Only RO and RW regions are considered for this check, since they
// currently account for the bulk of the shared space
private
static
final
int
NUMBER_OF_CHECKED_SHARED_REGIONS
=
2
;
public
static
void
main
(
String
[]
args
)
throws
Exception
{
ProcessBuilder
pb
=
ProcessTools
.
createJavaProcessBuilder
(
"-XX:+UnlockDiagnosticVMOptions"
,
"-XX:SharedArchiveFile=./test.jsa"
,
"-Xshare:dump"
);
OutputAnalyzer
output
=
new
OutputAnalyzer
(
pb
.
start
());
String
stdout
=
output
.
getStdout
();
ArrayList
<
String
>
utilization
=
findUtilization
(
stdout
);
if
(
utilization
.
size
()
!=
NUMBER_OF_CHECKED_SHARED_REGIONS
)
throw
new
RuntimeException
(
"The output format of sharing summary has changed"
);
for
(
String
str
:
utilization
)
{
int
value
=
Integer
.
parseInt
(
str
);
if
(
value
<
MIN_UTILIZATION
)
{
System
.
out
.
println
(
stdout
);
throw
new
RuntimeException
(
"Utilization for one of the regions"
+
"is below a threshold of "
+
MIN_UTILIZATION
+
"%"
);
}
}
}
public
static
ArrayList
<
String
>
findUtilization
(
String
input
)
{
ArrayList
<
String
>
regions
=
filterRegionsOfInterest
(
input
.
split
(
"\n"
));
return
filterByPattern
(
filterByPattern
(
regions
,
"bytes \\[.*% used\\]"
),
"\\d+"
);
}
private
static
ArrayList
<
String
>
filterByPattern
(
Iterable
<
String
>
input
,
String
pattern
)
{
ArrayList
<
String
>
result
=
new
ArrayList
<
String
>();
for
(
String
str
:
input
)
{
Matcher
matcher
=
Pattern
.
compile
(
pattern
).
matcher
(
str
);
if
(
matcher
.
find
())
{
result
.
add
(
matcher
.
group
());
}
}
return
result
;
}
private
static
ArrayList
<
String
>
filterRegionsOfInterest
(
String
[]
inputLines
)
{
ArrayList
<
String
>
result
=
new
ArrayList
<
String
>();
for
(
String
str
:
inputLines
)
{
if
(
str
.
contains
(
"ro space:"
)
||
str
.
contains
(
"rw space:"
))
{
result
.
add
(
str
);
}
}
return
result
;
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录