Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
25cac386
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
25cac386
编写于
3月 30, 2012
作者:
A
amurillo
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
54c0dc28
76e1c94b
变更
29
隐藏空白更改
内联
并排
Showing
29 changed file
with
465 addition
and
257 deletion
+465
-257
agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
...src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
+18
-2
make/hotspot_version
make/hotspot_version
+1
-1
make/jprt.properties
make/jprt.properties
+2
-0
src/share/vm/classfile/classFileParser.cpp
src/share/vm/classfile/classFileParser.cpp
+66
-21
src/share/vm/classfile/classFileParser.hpp
src/share/vm/classfile/classFileParser.hpp
+5
-1
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
+1
-1
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
+2
-2
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+9
-7
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+1
-1
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+3
-4
src/share/vm/gc_implementation/g1/g1_globals.hpp
src/share/vm/gc_implementation/g1/g1_globals.hpp
+6
-7
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+53
-8
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
+10
-5
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
...ementation/parallelScavenge/psPromotionManager.inline.hpp
+1
-0
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
+29
-21
src/share/vm/gc_interface/collectedHeap.cpp
src/share/vm/gc_interface/collectedHeap.cpp
+5
-9
src/share/vm/gc_interface/collectedHeap.hpp
src/share/vm/gc_interface/collectedHeap.hpp
+9
-16
src/share/vm/gc_interface/collectedHeap.inline.hpp
src/share/vm/gc_interface/collectedHeap.inline.hpp
+15
-20
src/share/vm/memory/dump.cpp
src/share/vm/memory/dump.cpp
+8
-10
src/share/vm/oops/instanceKlass.cpp
src/share/vm/oops/instanceKlass.cpp
+45
-22
src/share/vm/oops/instanceKlass.hpp
src/share/vm/oops/instanceKlass.hpp
+104
-7
src/share/vm/oops/instanceKlassKlass.cpp
src/share/vm/oops/instanceKlassKlass.cpp
+0
-1
src/share/vm/oops/klass.cpp
src/share/vm/oops/klass.cpp
+3
-4
src/share/vm/oops/klass.hpp
src/share/vm/oops/klass.hpp
+3
-3
src/share/vm/prims/jvm.cpp
src/share/vm/prims/jvm.cpp
+12
-19
src/share/vm/prims/jvmtiClassFileReconstituter.cpp
src/share/vm/prims/jvmtiClassFileReconstituter.cpp
+9
-15
src/share/vm/prims/jvmtiRedefineClasses.cpp
src/share/vm/prims/jvmtiRedefineClasses.cpp
+27
-38
src/share/vm/runtime/reflection.cpp
src/share/vm/runtime/reflection.cpp
+4
-7
test/Makefile
test/Makefile
+14
-5
未找到文件。
agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
浏览文件 @
25cac386
...
...
@@ -359,6 +359,12 @@ public class InstanceKlass extends Klass {
public
static
final
int
innerClassNextOffset
=
4
;
};
public
static
interface
EnclosingMethodAttributeOffset
{
public
static
final
int
enclosing_method_class_index_offset
=
0
;
public
static
final
int
enclosing_method_method_index_offset
=
1
;
public
static
final
int
enclosing_method_attribute_size
=
2
;
};
// refer to compute_modifier_flags in VM code.
public
long
computeModifierFlags
()
{
long
access
=
getAccessFlags
();
...
...
@@ -367,9 +373,14 @@ public class InstanceKlass extends Klass {
int
length
=
(
innerClassList
==
null
)?
0
:
(
int
)
innerClassList
.
getLength
();
if
(
length
>
0
)
{
if
(
Assert
.
ASSERTS_ENABLED
)
{
Assert
.
that
(
length
%
InnerClassAttributeOffset
.
innerClassNextOffset
==
0
,
"just checking"
);
Assert
.
that
(
length
%
InnerClassAttributeOffset
.
innerClassNextOffset
==
0
||
length
%
InnerClassAttributeOffset
.
innerClassNextOffset
==
EnclosingMethodAttributeOffset
.
enclosing_method_attribute_size
,
"just checking"
);
}
for
(
int
i
=
0
;
i
<
length
;
i
+=
InnerClassAttributeOffset
.
innerClassNextOffset
)
{
if
(
i
==
length
-
EnclosingMethodAttributeOffset
.
enclosing_method_attribute_size
)
{
break
;
}
int
ioff
=
innerClassList
.
getShortAt
(
i
+
InnerClassAttributeOffset
.
innerClassInnerClassInfoOffset
);
// 'ioff' can be zero.
...
...
@@ -419,9 +430,14 @@ public class InstanceKlass extends Klass {
int
length
=
(
innerClassList
==
null
)?
0
:
(
int
)
innerClassList
.
getLength
();
if
(
length
>
0
)
{
if
(
Assert
.
ASSERTS_ENABLED
)
{
Assert
.
that
(
length
%
InnerClassAttributeOffset
.
innerClassNextOffset
==
0
,
"just checking"
);
Assert
.
that
(
length
%
InnerClassAttributeOffset
.
innerClassNextOffset
==
0
||
length
%
InnerClassAttributeOffset
.
innerClassNextOffset
==
EnclosingMethodAttributeOffset
.
enclosing_method_attribute_size
,
"just checking"
);
}
for
(
int
i
=
0
;
i
<
length
;
i
+=
InnerClassAttributeOffset
.
innerClassNextOffset
)
{
if
(
i
==
length
-
EnclosingMethodAttributeOffset
.
enclosing_method_attribute_size
)
{
break
;
}
int
ioff
=
innerClassList
.
getShortAt
(
i
+
InnerClassAttributeOffset
.
innerClassInnerClassInfoOffset
);
// 'ioff' can be zero.
...
...
make/hotspot_version
浏览文件 @
25cac386
...
...
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=24
HS_MINOR_VER=0
HS_BUILD_NUMBER=0
5
HS_BUILD_NUMBER=0
6
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
...
...
make/jprt.properties
浏览文件 @
25cac386
...
...
@@ -446,6 +446,7 @@ jprt.test.targets.embedded= \
jprt.test.targets.jdk8
=
${jprt.test.targets.standard}
jprt.test.targets.jdk7
=
${jprt.test.targets.standard}
jprt.test.targets.jdk7u4
=
${jprt.test.targets.jdk7}
jprt.test.targets
=
${jprt.test.targets.${jprt.tools.default.release}}
# The default test/Makefile targets that should be run
...
...
@@ -505,5 +506,6 @@ jprt.make.rule.test.targets.embedded = \
jprt.make.rule.test.targets.jdk8
=
${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7
=
${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u4
=
${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets
=
${jprt.make.rule.test.targets.${jprt.tools.default.release}}
src/share/vm/classfile/classFileParser.cpp
浏览文件 @
25cac386
...
...
@@ -2315,13 +2315,32 @@ void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantP
#define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
// Return number of classes in the inner classes attribute table
u2
ClassFileParser
::
parse_classfile_inner_classes_attribute
(
constantPoolHandle
cp
,
instanceKlassHandle
k
,
TRAPS
)
{
u2
ClassFileParser
::
parse_classfile_inner_classes_attribute
(
u1
*
inner_classes_attribute_start
,
bool
parsed_enclosingmethod_attribute
,
u2
enclosing_method_class_index
,
u2
enclosing_method_method_index
,
constantPoolHandle
cp
,
instanceKlassHandle
k
,
TRAPS
)
{
ClassFileStream
*
cfs
=
stream
();
cfs
->
guarantee_more
(
2
,
CHECK_0
);
// length
u2
length
=
cfs
->
get_u2_fast
();
u1
*
current_mark
=
cfs
->
current
();
u2
length
=
0
;
if
(
inner_classes_attribute_start
!=
NULL
)
{
cfs
->
set_current
(
inner_classes_attribute_start
);
cfs
->
guarantee_more
(
2
,
CHECK_0
);
// length
length
=
cfs
->
get_u2_fast
();
}
// 4-tuples of shorts [inner_class_info_index, outer_class_info_index, inner_name_index, inner_class_access_flags]
typeArrayOop
ic
=
oopFactory
::
new_permanent_shortArray
(
length
*
4
,
CHECK_0
);
// 4-tuples of shorts of inner classes data and 2 shorts of enclosing
// method data:
// [inner_class_info_index,
// outer_class_info_index,
// inner_name_index,
// inner_class_access_flags,
// ...
// enclosing_method_class_index,
// enclosing_method_method_index]
int
size
=
length
*
4
+
(
parsed_enclosingmethod_attribute
?
2
:
0
);
typeArrayOop
ic
=
oopFactory
::
new_permanent_shortArray
(
size
,
CHECK_0
);
typeArrayHandle
inner_classes
(
THREAD
,
ic
);
int
index
=
0
;
int
cp_size
=
cp
->
length
();
...
...
@@ -2372,8 +2391,8 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle c
// 4347400: make sure there's no duplicate entry in the classes array
if
(
_need_verify
&&
_major_version
>=
JAVA_1_5_VERSION
)
{
for
(
int
i
=
0
;
i
<
inner_classes
->
length
()
;
i
+=
4
)
{
for
(
int
j
=
i
+
4
;
j
<
inner_classes
->
length
()
;
j
+=
4
)
{
for
(
int
i
=
0
;
i
<
length
*
4
;
i
+=
4
)
{
for
(
int
j
=
i
+
4
;
j
<
length
*
4
;
j
+=
4
)
{
guarantee_property
((
inner_classes
->
ushort_at
(
i
)
!=
inner_classes
->
ushort_at
(
j
)
||
inner_classes
->
ushort_at
(
i
+
1
)
!=
inner_classes
->
ushort_at
(
j
+
1
)
||
inner_classes
->
ushort_at
(
i
+
2
)
!=
inner_classes
->
ushort_at
(
j
+
2
)
||
...
...
@@ -2384,8 +2403,19 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle c
}
}
// Set EnclosingMethod class and method indexes.
if
(
parsed_enclosingmethod_attribute
)
{
inner_classes
->
short_at_put
(
index
++
,
enclosing_method_class_index
);
inner_classes
->
short_at_put
(
index
++
,
enclosing_method_method_index
);
}
assert
(
index
==
size
,
"wrong size"
);
// Update instanceKlass with inner class info.
k
->
set_inner_classes
(
inner_classes
());
// Restore buffer's current position.
cfs
->
set_current
(
current_mark
);
return
length
;
}
...
...
@@ -2490,6 +2520,10 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
int
runtime_visible_annotations_length
=
0
;
u1
*
runtime_invisible_annotations
=
NULL
;
int
runtime_invisible_annotations_length
=
0
;
u1
*
inner_classes_attribute_start
=
NULL
;
u4
inner_classes_attribute_length
=
0
;
u2
enclosing_method_class_index
=
0
;
u2
enclosing_method_method_index
=
0
;
// Iterate over attributes
while
(
attributes_count
--
)
{
cfs
->
guarantee_more
(
6
,
CHECK
);
// attribute_name_index, attribute_length
...
...
@@ -2522,11 +2556,9 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
}
else
{
parsed_innerclasses_attribute
=
true
;
}
u2
num_of_classes
=
parse_classfile_inner_classes_attribute
(
cp
,
k
,
CHECK
);
if
(
_need_verify
&&
_major_version
>=
JAVA_1_5_VERSION
)
{
guarantee_property
(
attribute_length
==
sizeof
(
num_of_classes
)
+
4
*
sizeof
(
u2
)
*
num_of_classes
,
"Wrong InnerClasses attribute length in class file %s"
,
CHECK
);
}
inner_classes_attribute_start
=
cfs
->
get_u1_buffer
();
inner_classes_attribute_length
=
attribute_length
;
cfs
->
skip_u1
(
inner_classes_attribute_length
,
CHECK
);
}
else
if
(
tag
==
vmSymbols
::
tag_synthetic
())
{
// Check for Synthetic tag
// Shouldn't we check that the synthetic flags wasn't already set? - not required in spec
...
...
@@ -2568,22 +2600,21 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
parsed_enclosingmethod_attribute
=
true
;
}
cfs
->
guarantee_more
(
4
,
CHECK
);
// class_index, method_index
u2
class_index
=
cfs
->
get_u2_fast
();
u2
method_index
=
cfs
->
get_u2_fast
();
if
(
class_index
==
0
)
{
enclosing_method_
class_index
=
cfs
->
get_u2_fast
();
enclosing_method_
method_index
=
cfs
->
get_u2_fast
();
if
(
enclosing_method_
class_index
==
0
)
{
classfile_parse_error
(
"Invalid class index in EnclosingMethod attribute in class file %s"
,
CHECK
);
}
// Validate the constant pool indices and types
if
(
!
cp
->
is_within_bounds
(
class_index
)
||
!
is_klass_reference
(
cp
,
class_index
))
{
if
(
!
cp
->
is_within_bounds
(
enclosing_method_
class_index
)
||
!
is_klass_reference
(
cp
,
enclosing_method_
class_index
))
{
classfile_parse_error
(
"Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s"
,
CHECK
);
}
if
(
method_index
!=
0
&&
(
!
cp
->
is_within_bounds
(
method_index
)
||
!
cp
->
tag_at
(
method_index
).
is_name_and_type
()))
{
if
(
enclosing_method_
method_index
!=
0
&&
(
!
cp
->
is_within_bounds
(
enclosing_method_
method_index
)
||
!
cp
->
tag_at
(
enclosing_method_
method_index
).
is_name_and_type
()))
{
classfile_parse_error
(
"Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s"
,
CHECK
);
}
k
->
set_enclosing_method_indices
(
class_index
,
method_index
);
}
else
if
(
tag
==
vmSymbols
::
tag_bootstrap_methods
()
&&
_major_version
>=
Verifier
::
INVOKEDYNAMIC_MAJOR_VERSION
)
{
if
(
parsed_bootstrap_methods_attribute
)
...
...
@@ -2606,6 +2637,20 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
CHECK
);
k
->
set_class_annotations
(
annotations
());
if
(
parsed_innerclasses_attribute
||
parsed_enclosingmethod_attribute
)
{
u2
num_of_classes
=
parse_classfile_inner_classes_attribute
(
inner_classes_attribute_start
,
parsed_innerclasses_attribute
,
enclosing_method_class_index
,
enclosing_method_method_index
,
cp
,
k
,
CHECK
);
if
(
parsed_innerclasses_attribute
&&
_need_verify
&&
_major_version
>=
JAVA_1_5_VERSION
)
{
guarantee_property
(
inner_classes_attribute_length
==
sizeof
(
num_of_classes
)
+
4
*
sizeof
(
u2
)
*
num_of_classes
,
"Wrong InnerClasses attribute length in class file %s"
,
CHECK
);
}
}
if
(
_max_bootstrap_specifier_index
>=
0
)
{
guarantee_property
(
parsed_bootstrap_methods_attribute
,
"Missing BootstrapMethods attribute in class file %s"
,
CHECK
);
...
...
src/share/vm/classfile/classFileParser.hpp
浏览文件 @
25cac386
...
...
@@ -130,7 +130,11 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
void
parse_classfile_sourcefile_attribute
(
constantPoolHandle
cp
,
instanceKlassHandle
k
,
TRAPS
);
void
parse_classfile_source_debug_extension_attribute
(
constantPoolHandle
cp
,
instanceKlassHandle
k
,
int
length
,
TRAPS
);
u2
parse_classfile_inner_classes_attribute
(
constantPoolHandle
cp
,
u2
parse_classfile_inner_classes_attribute
(
u1
*
inner_classes_attribute_start
,
bool
parsed_enclosingmethod_attribute
,
u2
enclosing_method_class_index
,
u2
enclosing_method_method_index
,
constantPoolHandle
cp
,
instanceKlassHandle
k
,
TRAPS
);
void
parse_classfile_attributes
(
constantPoolHandle
cp
,
instanceKlassHandle
k
,
TRAPS
);
void
parse_classfile_synthetic_attribute
(
constantPoolHandle
cp
,
instanceKlassHandle
k
,
TRAPS
);
...
...
src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
浏览文件 @
25cac386
...
...
@@ -285,7 +285,7 @@ size_t CollectionSetChooser::calcMinOldCSetLength() {
// that the result is the same during all mixed GCs that follow a cycle.
const
size_t
region_num
=
(
size_t
)
_length
;
const
size_t
gc_num
=
(
size_t
)
G1M
axMixedGCNum
;
const
size_t
gc_num
=
(
size_t
)
G1M
ixedGCCountTarget
;
size_t
result
=
region_num
/
gc_num
;
// emulate ceiling
if
(
result
*
gc_num
<
region_num
)
{
...
...
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp
浏览文件 @
25cac386
...
...
@@ -155,7 +155,7 @@ void ConcurrentMarkThread::run() {
CMCheckpointRootsFinalClosure
final_cl
(
_cm
);
sprintf
(
verbose_str
,
"GC remark"
);
VM_CGC_Operation
op
(
&
final_cl
,
verbose_str
);
VM_CGC_Operation
op
(
&
final_cl
,
verbose_str
,
true
/* needs_pll */
);
VMThread
::
execute
(
&
op
);
}
if
(
cm
()
->
restart_for_overflow
()
&&
...
...
@@ -189,7 +189,7 @@ void ConcurrentMarkThread::run() {
CMCleanUp
cl_cl
(
_cm
);
sprintf
(
verbose_str
,
"GC cleanup"
);
VM_CGC_Operation
op
(
&
cl_cl
,
verbose_str
);
VM_CGC_Operation
op
(
&
cl_cl
,
verbose_str
,
false
/* needs_pll */
);
VMThread
::
execute
(
&
op
);
}
else
{
// We don't want to update the marking status if a GC pause
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
25cac386
...
...
@@ -993,7 +993,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// iteration (after taking the Heap_lock).
result
=
_mutator_alloc_region
.
attempt_allocation
(
word_size
,
false
/* bot_updates */
);
if
(
result
!=
NULL
)
{
if
(
result
!=
NULL
)
{
return
result
;
}
...
...
@@ -2437,20 +2437,22 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
true
,
/* should_initiate_conc_mark */
g1_policy
()
->
max_pause_time_ms
(),
cause
);
VMThread
::
execute
(
&
op
);
if
(
!
op
.
pause_succeeded
())
{
// Another GC got scheduled and prevented us from scheduling
// the initial-mark GC. It's unlikely that the GC that
// pre-empted us was also an initial-mark GC. So, we'll retry
// the initial-mark GC.
if
(
full_gc_count_before
==
total_full_collections
())
{
retry_gc
=
true
;
retry_gc
=
op
.
should_retry_gc
()
;
}
else
{
// A Full GC happened while we were trying to schedule the
// initial-mark GC. No point in starting a new cycle given
// that the whole heap was collected anyway.
}
if
(
retry_gc
)
{
if
(
GC_locker
::
is_active_and_needs_gc
())
{
GC_locker
::
stall_until_clear
();
}
}
}
}
else
{
if
(
cause
==
GCCause
::
_gc_locker
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
25cac386
...
...
@@ -2608,7 +2608,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
size_t
reclaimable_bytes
=
cset_chooser
->
remainingReclaimableBytes
();
size_t
capacity_bytes
=
_g1
->
capacity
();
double
perc
=
(
double
)
reclaimable_bytes
*
100.0
/
(
double
)
capacity_bytes
;
double
threshold
=
(
double
)
G1
OldReclaimableThreshold
Percent
;
double
threshold
=
(
double
)
G1
HeapWaste
Percent
;
if
(
perc
<
threshold
)
{
ergo_verbose4
(
ErgoMixedGCs
,
false_action_str
,
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
浏览文件 @
25cac386
...
...
@@ -940,10 +940,9 @@ public:
return
_bytes_copied_during_gc
;
}
// Determine whether the next GC should be mixed. Called to determine
// whether to start mixed GCs or whether to carry on doing mixed
// GCs. The two action strings are used in the ergo output when the
// method returns true or false.
// Determine whether there are candidate regions so that the
// next GC should be mixed. The two action strings are used
// in the ergo output when the method returns true or false.
bool
next_gc_should_be_mixed
(
const
char
*
true_action_str
,
const
char
*
false_action_str
);
...
...
src/share/vm/gc_implementation/g1/g1_globals.hpp
浏览文件 @
25cac386
...
...
@@ -299,17 +299,16 @@
"Percentage (0-100) of the heap size to use as maximum " \
"young gen size.") \
\
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 9
5
, \
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 9
0
, \
"Threshold for regions to be added to the collection set. " \
"Regions with more live bytes that this will not be collected.") \
\
develop(uintx, G1OldReclaimableThresholdPercent, 1, \
"Threshold for the remaining old reclaimable bytes, expressed " \
"as a percentage of the heap size. If the old reclaimable bytes " \
"are under this we will not collect them with more mixed GCs.") \
product(uintx, G1HeapWastePercent, 5, \
"Amount of space, expressed as a percentage of the heap size, " \
"that G1 is willing not to collect to avoid expensive GCs.") \
\
develop(uintx, G1MaxMixedGCNum, 4,
\
"The
maximum desired number of mixed GCs after a marking cycle.")
\
product(uintx, G1MixedGCCountTarget, 4,
\
"The
target number of mixed GCs after a marking cycle.")
\
\
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
浏览文件 @
25cac386
...
...
@@ -34,7 +34,8 @@
VM_G1CollectForAllocation
::
VM_G1CollectForAllocation
(
unsigned
int
gc_count_before
,
size_t
word_size
)
:
VM_G1OperationWithAllocRequest
(
gc_count_before
,
word_size
)
{
:
VM_G1OperationWithAllocRequest
(
gc_count_before
,
word_size
,
GCCause
::
_allocation_failure
)
{
guarantee
(
word_size
>
0
,
"an allocation should always be requested"
);
}
...
...
@@ -57,9 +58,10 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
bool
should_initiate_conc_mark
,
double
target_pause_time_ms
,
GCCause
::
Cause
gc_cause
)
:
VM_G1OperationWithAllocRequest
(
gc_count_before
,
word_size
),
:
VM_G1OperationWithAllocRequest
(
gc_count_before
,
word_size
,
gc_cause
),
_should_initiate_conc_mark
(
should_initiate_conc_mark
),
_target_pause_time_ms
(
target_pause_time_ms
),
_should_retry_gc
(
false
),
_full_collections_completed_before
(
0
)
{
guarantee
(
target_pause_time_ms
>
0.0
,
err_msg
(
"target_pause_time_ms = %1.6lf should be positive"
,
...
...
@@ -70,6 +72,22 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
_gc_cause
=
gc_cause
;
}
bool
VM_G1IncCollectionPause
::
doit_prologue
()
{
bool
res
=
VM_GC_Operation
::
doit_prologue
();
if
(
!
res
)
{
if
(
_should_initiate_conc_mark
)
{
// The prologue can fail for a couple of reasons. The first is that another GC
// got scheduled and prevented the scheduling of the initial mark GC. The
// second is that the GC locker may be active and the heap can't be expanded.
// In both cases we want to retry the GC so that the initial mark pause is
// actually scheduled. In the second case, however, we should stall until
// until the GC locker is no longer active and then retry the initial mark GC.
_should_retry_gc
=
true
;
}
}
return
res
;
}
void
VM_G1IncCollectionPause
::
doit
()
{
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
assert
(
!
_should_initiate_conc_mark
||
...
...
@@ -106,11 +124,25 @@ void VM_G1IncCollectionPause::doit() {
// next GC pause to be an initial mark; it returns false if a
// marking cycle is already in progress.
//
// If a marking cycle is already in progress just return and skip
// the pause - the requesting thread should block in doit_epilogue
// until the marking cycle is complete.
// If a marking cycle is already in progress just return and skip the
// pause below - if the reason for requesting this initial mark pause
// was due to a System.gc() then the requesting thread should block in
// doit_epilogue() until the marking cycle is complete.
//
// If this initial mark pause was requested as part of a humongous
// allocation then we know that the marking cycle must just have
// been started by another thread (possibly also allocating a humongous
// object) as there was no active marking cycle when the requesting
// thread checked before calling collect() in
// attempt_allocation_humongous(). Retrying the GC, in this case,
// will cause the requesting thread to spin inside collect() until the
// just started marking cycle is complete - which may be a while. So
// we do NOT retry the GC.
if
(
!
res
)
{
assert
(
_word_size
==
0
,
"ExplicitGCInvokesConcurrent shouldn't be allocating"
);
assert
(
_word_size
==
0
,
"Concurrent Full GC/Humongous Object IM shouldn't be allocating"
);
if
(
_gc_cause
!=
GCCause
::
_g1_humongous_allocation
)
{
_should_retry_gc
=
true
;
}
return
;
}
}
...
...
@@ -123,6 +155,13 @@ void VM_G1IncCollectionPause::doit() {
true
/* expect_null_cur_alloc_region */
);
}
else
{
assert
(
_result
==
NULL
,
"invariant"
);
if
(
!
_pause_succeeded
)
{
// Another possible reason reason for the pause to not be successful
// is that, again, the GC locker is active (and has become active
// since the prologue was executed). In this case we should retry
// the pause after waiting for the GC locker to become inactive.
_should_retry_gc
=
true
;
}
}
}
...
...
@@ -168,6 +207,7 @@ void VM_G1IncCollectionPause::doit_epilogue() {
}
void
VM_CGC_Operation
::
acquire_pending_list_lock
()
{
assert
(
_needs_pll
,
"don't call this otherwise"
);
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
ConcurrentMarkThread
::
slt
()
->
...
...
@@ -175,6 +215,7 @@ void VM_CGC_Operation::acquire_pending_list_lock() {
}
void
VM_CGC_Operation
::
release_and_notify_pending_list_lock
()
{
assert
(
_needs_pll
,
"don't call this otherwise"
);
// The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL.
ConcurrentMarkThread
::
slt
()
->
...
...
@@ -198,7 +239,9 @@ void VM_CGC_Operation::doit() {
bool
VM_CGC_Operation
::
doit_prologue
()
{
// Note the relative order of the locks must match that in
// VM_GC_Operation::doit_prologue() or deadlocks can occur
acquire_pending_list_lock
();
if
(
_needs_pll
)
{
acquire_pending_list_lock
();
}
Heap_lock
->
lock
();
SharedHeap
::
heap
()
->
_thread_holds_heap_lock_for_gc
=
true
;
...
...
@@ -210,5 +253,7 @@ void VM_CGC_Operation::doit_epilogue() {
// VM_GC_Operation::doit_epilogue()
SharedHeap
::
heap
()
->
_thread_holds_heap_lock_for_gc
=
false
;
Heap_lock
->
unlock
();
release_and_notify_pending_list_lock
();
if
(
_needs_pll
)
{
release_and_notify_pending_list_lock
();
}
}
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
浏览文件 @
25cac386
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -43,8 +43,9 @@ protected:
public:
VM_G1OperationWithAllocRequest
(
unsigned
int
gc_count_before
,
size_t
word_size
)
:
VM_GC_Operation
(
gc_count_before
,
GCCause
::
_allocation_failure
),
size_t
word_size
,
GCCause
::
Cause
gc_cause
)
:
VM_GC_Operation
(
gc_count_before
,
gc_cause
),
_word_size
(
word_size
),
_result
(
NULL
),
_pause_succeeded
(
false
)
{
}
HeapWord
*
result
()
{
return
_result
;
}
bool
pause_succeeded
()
{
return
_pause_succeeded
;
}
...
...
@@ -77,6 +78,7 @@ public:
class
VM_G1IncCollectionPause
:
public
VM_G1OperationWithAllocRequest
{
private:
bool
_should_initiate_conc_mark
;
bool
_should_retry_gc
;
double
_target_pause_time_ms
;
unsigned
int
_full_collections_completed_before
;
public:
...
...
@@ -86,11 +88,13 @@ public:
double
target_pause_time_ms
,
GCCause
::
Cause
gc_cause
);
virtual
VMOp_Type
type
()
const
{
return
VMOp_G1IncCollectionPause
;
}
virtual
bool
doit_prologue
();
virtual
void
doit
();
virtual
void
doit_epilogue
();
virtual
const
char
*
name
()
const
{
return
"garbage-first incremental collection pause"
;
}
bool
should_retry_gc
()
const
{
return
_should_retry_gc
;
}
};
// Concurrent GC stop-the-world operations such as remark and cleanup;
...
...
@@ -98,6 +102,7 @@ public:
class
VM_CGC_Operation
:
public
VM_Operation
{
VoidClosure
*
_cl
;
const
char
*
_printGCMessage
;
bool
_needs_pll
;
protected:
// java.lang.ref.Reference support
...
...
@@ -105,8 +110,8 @@ protected:
void
release_and_notify_pending_list_lock
();
public:
VM_CGC_Operation
(
VoidClosure
*
cl
,
const
char
*
printGCMsg
)
:
_cl
(
cl
),
_printGCMessage
(
printGCMsg
)
{
}
VM_CGC_Operation
(
VoidClosure
*
cl
,
const
char
*
printGCMsg
,
bool
needs_pll
)
:
_cl
(
cl
),
_printGCMessage
(
printGCMsg
)
,
_needs_pll
(
needs_pll
)
{
}
virtual
VMOp_Type
type
()
const
{
return
VMOp_CGC_Operation
;
}
virtual
void
doit
();
virtual
bool
doit_prologue
();
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
浏览文件 @
25cac386
...
...
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp"
...
...
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
浏览文件 @
25cac386
/*
* Copyright (c) 2006, 201
0
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -91,29 +91,37 @@ void MutableNUMASpace::ensure_parsability() {
MutableSpace
*
s
=
ls
->
space
();
if
(
s
->
top
()
<
top
())
{
// For all spaces preceding the one containing top()
if
(
s
->
free_in_words
()
>
0
)
{
size_t
area_touched_words
=
pointer_delta
(
s
->
end
(),
s
->
top
());
CollectedHeap
::
fill_with_object
(
s
->
top
(),
area_touched_words
);
intptr_t
cur_top
=
(
intptr_t
)
s
->
top
();
size_t
words_left_to_fill
=
pointer_delta
(
s
->
end
(),
s
->
top
());;
while
(
words_left_to_fill
>
0
)
{
size_t
words_to_fill
=
MIN2
(
words_left_to_fill
,
CollectedHeap
::
filler_array_max_size
());
assert
(
words_to_fill
>=
CollectedHeap
::
min_fill_size
(),
err_msg
(
"Remaining size ("
SIZE_FORMAT
") is too small to fill (based on "
SIZE_FORMAT
" and "
SIZE_FORMAT
")"
,
words_to_fill
,
words_left_to_fill
,
CollectedHeap
::
filler_array_max_size
()));
CollectedHeap
::
fill_with_object
((
HeapWord
*
)
cur_top
,
words_to_fill
);
if
(
!
os
::
numa_has_static_binding
())
{
size_t
touched_words
=
words_to_fill
;
#ifndef ASSERT
if
(
!
ZapUnusedHeapArea
)
{
area_
touched_words
=
MIN2
((
size_t
)
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
)),
area_
touched_words
);
}
if
(
!
ZapUnusedHeapArea
)
{
touched_words
=
MIN2
((
size_t
)
align_object_size
(
typeArrayOopDesc
::
header_size
(
T_INT
)),
touched_words
);
}
#endif
if
(
!
os
::
numa_has_static_binding
())
{
MemRegion
invalid
;
HeapWord
*
crossing_start
=
(
HeapWord
*
)
round_to
((
intptr_t
)
s
->
top
(),
os
::
vm_page_size
());
HeapWord
*
crossing_end
=
(
HeapWord
*
)
round_to
((
intptr_t
)(
s
->
top
()
+
area_touched_words
),
os
::
vm_page_size
());
if
(
crossing_start
!=
crossing_end
)
{
// If object header crossed a small page boundary we mark the area
// as invalid rounding it to a page_size().
HeapWord
*
start
=
MAX2
((
HeapWord
*
)
round_down
((
intptr_t
)
s
->
top
(),
page_size
()),
s
->
bottom
());
HeapWord
*
end
=
MIN2
((
HeapWord
*
)
round_to
((
intptr_t
)(
s
->
top
()
+
area_touched_words
),
page_size
()),
s
->
end
());
invalid
=
MemRegion
(
start
,
end
);
}
MemRegion
invalid
;
HeapWord
*
crossing_start
=
(
HeapWord
*
)
round_to
(
cur_top
,
os
::
vm_page_size
());
HeapWord
*
crossing_end
=
(
HeapWord
*
)
round_to
(
cur_top
+
touched_words
,
os
::
vm_page_size
());
if
(
crossing_start
!=
crossing_end
)
{
// If object header crossed a small page boundary we mark the area
// as invalid rounding it to a page_size().
HeapWord
*
start
=
MAX2
((
HeapWord
*
)
round_down
(
cur_top
,
page_size
()),
s
->
bottom
());
HeapWord
*
end
=
MIN2
((
HeapWord
*
)
round_to
(
cur_top
+
touched_words
,
page_size
()),
s
->
end
());
invalid
=
MemRegion
(
start
,
end
);
}
ls
->
add_invalid_region
(
invalid
);
ls
->
add_invalid_region
(
invalid
);
}
cur_top
=
cur_top
+
(
words_to_fill
*
HeapWordSize
);
words_left_to_fill
-=
words_to_fill
;
}
}
}
else
{
...
...
src/share/vm/gc_interface/collectedHeap.cpp
浏览文件 @
25cac386
...
...
@@ -85,7 +85,7 @@ CollectedHeap::CollectedHeap() : _n_par_threads(0)
const
size_t
max_len
=
size_t
(
arrayOopDesc
::
max_array_length
(
T_INT
));
const
size_t
elements_per_word
=
HeapWordSize
/
sizeof
(
jint
);
_filler_array_max_size
=
align_object_size
(
filler_array_hdr_size
()
+
max_len
*
elements_per_word
);
max_len
/
elements_per_word
);
_barrier_set
=
NULL
;
_is_gc_active
=
false
;
...
...
@@ -303,10 +303,6 @@ size_t CollectedHeap::filler_array_min_size() {
return
align_object_size
(
filler_array_hdr_size
());
// align to MinObjAlignment
}
size_t
CollectedHeap
::
filler_array_max_size
()
{
return
_filler_array_max_size
;
}
#ifdef ASSERT
void
CollectedHeap
::
fill_args_check
(
HeapWord
*
start
,
size_t
words
)
{
...
...
@@ -333,10 +329,11 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
const
size_t
payload_size
=
words
-
filler_array_hdr_size
();
const
size_t
len
=
payload_size
*
HeapWordSize
/
sizeof
(
jint
);
assert
((
int
)
len
>=
0
,
err_msg
(
"size too large "
SIZE_FORMAT
" becomes %d"
,
words
,
(
int
)
len
));
// Set the length first for concurrent GC.
((
arrayOop
)
start
)
->
set_length
((
int
)
len
);
post_allocation_setup_common
(
Universe
::
intArrayKlassObj
(),
start
,
words
);
post_allocation_setup_common
(
Universe
::
intArrayKlassObj
(),
start
);
DEBUG_ONLY
(
zap_filler_array
(
start
,
words
,
zap
);)
}
...
...
@@ -349,8 +346,7 @@ CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
fill_with_array
(
start
,
words
,
zap
);
}
else
if
(
words
>
0
)
{
assert
(
words
==
min_fill_size
(),
"unaligned size"
);
post_allocation_setup_common
(
SystemDictionary
::
Object_klass
(),
start
,
words
);
post_allocation_setup_common
(
SystemDictionary
::
Object_klass
(),
start
);
}
}
...
...
@@ -480,7 +476,7 @@ oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle r
assert
(
ScavengeRootsInCode
>
0
,
"must be"
);
obj
=
common_mem_allocate_init
(
size
,
CHECK_NULL
);
}
post_allocation_setup_common
(
klass
,
obj
,
size
);
post_allocation_setup_common
(
klass
,
obj
);
assert
(
Universe
::
is_bootstrapping
()
||
!
((
oop
)
obj
)
->
blueprint
()
->
oop_is_array
(),
"must not be an array"
);
NOT_PRODUCT
(
Universe
::
heap
()
->
check_for_bad_heap_word_value
(
obj
,
size
));
...
...
src/share/vm/gc_interface/collectedHeap.hpp
浏览文件 @
25cac386
...
...
@@ -128,7 +128,6 @@ class CollectedHeap : public CHeapObj {
// Reinitialize tlabs before resuming mutators.
virtual
void
resize_all_tlabs
();
protected:
// Allocate from the current thread's TLAB, with broken-out slow path.
inline
static
HeapWord
*
allocate_from_tlab
(
Thread
*
thread
,
size_t
size
);
static
HeapWord
*
allocate_from_tlab_slow
(
Thread
*
thread
,
size_t
size
);
...
...
@@ -150,18 +149,14 @@ class CollectedHeap : public CHeapObj {
inline
static
HeapWord
*
common_permanent_mem_allocate_init
(
size_t
size
,
TRAPS
);
// Helper functions for (VM) allocation.
inline
static
void
post_allocation_setup_common
(
KlassHandle
klass
,
HeapWord
*
obj
,
size_t
size
);
inline
static
void
post_allocation_setup_common
(
KlassHandle
klass
,
HeapWord
*
obj
);
inline
static
void
post_allocation_setup_no_klass_install
(
KlassHandle
klass
,
HeapWord
*
objPtr
,
size_t
size
);
HeapWord
*
objPtr
);
inline
static
void
post_allocation_setup_obj
(
KlassHandle
klass
,
HeapWord
*
obj
,
size_t
size
);
inline
static
void
post_allocation_setup_obj
(
KlassHandle
klass
,
HeapWord
*
obj
);
inline
static
void
post_allocation_setup_array
(
KlassHandle
klass
,
HeapWord
*
obj
,
size_t
size
,
int
length
);
HeapWord
*
obj
,
int
length
);
// Clears an allocated object.
inline
static
void
init_obj
(
HeapWord
*
obj
,
size_t
size
);
...
...
@@ -169,7 +164,6 @@ class CollectedHeap : public CHeapObj {
// Filler object utilities.
static
inline
size_t
filler_array_hdr_size
();
static
inline
size_t
filler_array_min_size
();
static
inline
size_t
filler_array_max_size
();
DEBUG_ONLY
(
static
void
fill_args_check
(
HeapWord
*
start
,
size_t
words
);)
DEBUG_ONLY
(
static
void
zap_filler_array
(
HeapWord
*
start
,
size_t
words
,
bool
zap
=
true
);)
...
...
@@ -197,6 +191,10 @@ class CollectedHeap : public CHeapObj {
G1CollectedHeap
};
static
inline
size_t
filler_array_max_size
()
{
return
_filler_array_max_size
;
}
virtual
CollectedHeap
::
Name
kind
()
const
{
return
CollectedHeap
::
Abstract
;
}
/**
...
...
@@ -366,9 +364,7 @@ class CollectedHeap : public CHeapObj {
inline
static
oop
permanent_obj_allocate_no_klass_install
(
KlassHandle
klass
,
int
size
,
TRAPS
);
inline
static
void
post_allocation_install_obj_klass
(
KlassHandle
klass
,
oop
obj
,
int
size
);
inline
static
void
post_allocation_install_obj_klass
(
KlassHandle
klass
,
oop
obj
);
inline
static
oop
permanent_array_allocate
(
KlassHandle
klass
,
int
size
,
int
length
,
TRAPS
);
// Raw memory allocation facilities
...
...
@@ -662,9 +658,6 @@ class CollectedHeap : public CHeapObj {
}
}
// Allocate GCHeapLog during VM startup
static
void
initialize_heap_log
();
// Heap verification
virtual
void
verify
(
bool
allow_dirty
,
bool
silent
,
VerifyOption
option
)
=
0
;
...
...
src/share/vm/gc_interface/collectedHeap.inline.hpp
浏览文件 @
25cac386
/*
* Copyright (c) 2001, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -50,15 +50,13 @@
// Inline allocation implementations.
void
CollectedHeap
::
post_allocation_setup_common
(
KlassHandle
klass
,
HeapWord
*
obj
,
size_t
size
)
{
post_allocation_setup_no_klass_install
(
klass
,
obj
,
size
);
post_allocation_install_obj_klass
(
klass
,
oop
(
obj
),
(
int
)
size
);
HeapWord
*
obj
)
{
post_allocation_setup_no_klass_install
(
klass
,
obj
);
post_allocation_install_obj_klass
(
klass
,
oop
(
obj
));
}
void
CollectedHeap
::
post_allocation_setup_no_klass_install
(
KlassHandle
klass
,
HeapWord
*
objPtr
,
size_t
size
)
{
HeapWord
*
objPtr
)
{
oop
obj
=
(
oop
)
objPtr
;
assert
(
obj
!=
NULL
,
"NULL object pointer"
);
...
...
@@ -71,8 +69,7 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
}
void
CollectedHeap
::
post_allocation_install_obj_klass
(
KlassHandle
klass
,
oop
obj
,
int
size
)
{
oop
obj
)
{
// These asserts are kind of complicated because of klassKlass
// and the beginning of the world.
assert
(
klass
()
!=
NULL
||
!
Universe
::
is_fully_initialized
(),
"NULL klass"
);
...
...
@@ -101,9 +98,8 @@ inline void post_allocation_notify(KlassHandle klass, oop obj) {
}
void
CollectedHeap
::
post_allocation_setup_obj
(
KlassHandle
klass
,
HeapWord
*
obj
,
size_t
size
)
{
post_allocation_setup_common
(
klass
,
obj
,
size
);
HeapWord
*
obj
)
{
post_allocation_setup_common
(
klass
,
obj
);
assert
(
Universe
::
is_bootstrapping
()
||
!
((
oop
)
obj
)
->
blueprint
()
->
oop_is_array
(),
"must not be an array"
);
// notify jvmti and dtrace
...
...
@@ -112,14 +108,13 @@ void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
void
CollectedHeap
::
post_allocation_setup_array
(
KlassHandle
klass
,
HeapWord
*
obj
,
size_t
size
,
int
length
)
{
// Set array length before setting the _klass field
// in post_allocation_setup_common() because the klass field
// indicates that the object is parsable by concurrent GC.
assert
(
length
>=
0
,
"length should be non-negative"
);
((
arrayOop
)
obj
)
->
set_length
(
length
);
post_allocation_setup_common
(
klass
,
obj
,
size
);
post_allocation_setup_common
(
klass
,
obj
);
assert
(((
oop
)
obj
)
->
blueprint
()
->
oop_is_array
(),
"must be an array"
);
// notify jvmti and dtrace (must be after length is set for dtrace)
post_allocation_notify
(
klass
,
(
oop
)
obj
);
...
...
@@ -256,7 +251,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
assert
(
!
Universe
::
heap
()
->
is_gc_active
(),
"Allocation during gc not allowed"
);
assert
(
size
>=
0
,
"int won't convert to size_t"
);
HeapWord
*
obj
=
common_mem_allocate_init
(
size
,
CHECK_NULL
);
post_allocation_setup_obj
(
klass
,
obj
,
size
);
post_allocation_setup_obj
(
klass
,
obj
);
NOT_PRODUCT
(
Universe
::
heap
()
->
check_for_bad_heap_word_value
(
obj
,
size
));
return
(
oop
)
obj
;
}
...
...
@@ -269,7 +264,7 @@ oop CollectedHeap::array_allocate(KlassHandle klass,
assert
(
!
Universe
::
heap
()
->
is_gc_active
(),
"Allocation during gc not allowed"
);
assert
(
size
>=
0
,
"int won't convert to size_t"
);
HeapWord
*
obj
=
common_mem_allocate_init
(
size
,
CHECK_NULL
);
post_allocation_setup_array
(
klass
,
obj
,
size
,
length
);
post_allocation_setup_array
(
klass
,
obj
,
length
);
NOT_PRODUCT
(
Universe
::
heap
()
->
check_for_bad_heap_word_value
(
obj
,
size
));
return
(
oop
)
obj
;
}
...
...
@@ -283,7 +278,7 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
assert
(
size
>=
0
,
"int won't convert to size_t"
);
HeapWord
*
obj
=
common_mem_allocate_noinit
(
size
,
CHECK_NULL
);
((
oop
)
obj
)
->
set_klass_gap
(
0
);
post_allocation_setup_array
(
klass
,
obj
,
size
,
length
);
post_allocation_setup_array
(
klass
,
obj
,
length
);
#ifndef PRODUCT
const
size_t
hs
=
oopDesc
::
header_size
()
+
1
;
Universe
::
heap
()
->
check_for_non_bad_heap_word_value
(
obj
+
hs
,
size
-
hs
);
...
...
@@ -293,7 +288,7 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
oop
CollectedHeap
::
permanent_obj_allocate
(
KlassHandle
klass
,
int
size
,
TRAPS
)
{
oop
obj
=
permanent_obj_allocate_no_klass_install
(
klass
,
size
,
CHECK_NULL
);
post_allocation_install_obj_klass
(
klass
,
obj
,
size
);
post_allocation_install_obj_klass
(
klass
,
obj
);
NOT_PRODUCT
(
Universe
::
heap
()
->
check_for_bad_heap_word_value
((
HeapWord
*
)
obj
,
size
));
return
obj
;
...
...
@@ -306,7 +301,7 @@ oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
assert
(
!
Universe
::
heap
()
->
is_gc_active
(),
"Allocation during gc not allowed"
);
assert
(
size
>=
0
,
"int won't convert to size_t"
);
HeapWord
*
obj
=
common_permanent_mem_allocate_init
(
size
,
CHECK_NULL
);
post_allocation_setup_no_klass_install
(
klass
,
obj
,
size
);
post_allocation_setup_no_klass_install
(
klass
,
obj
);
#ifndef PRODUCT
const
size_t
hs
=
oopDesc
::
header_size
();
Universe
::
heap
()
->
check_for_bad_heap_word_value
(
obj
+
hs
,
size
-
hs
);
...
...
@@ -322,7 +317,7 @@ oop CollectedHeap::permanent_array_allocate(KlassHandle klass,
assert
(
!
Universe
::
heap
()
->
is_gc_active
(),
"Allocation during gc not allowed"
);
assert
(
size
>=
0
,
"int won't convert to size_t"
);
HeapWord
*
obj
=
common_permanent_mem_allocate_init
(
size
,
CHECK_NULL
);
post_allocation_setup_array
(
klass
,
obj
,
size
,
length
);
post_allocation_setup_array
(
klass
,
obj
,
length
);
NOT_PRODUCT
(
Universe
::
heap
()
->
check_for_bad_heap_word_value
(
obj
,
size
));
return
(
oop
)
obj
;
}
...
...
src/share/vm/memory/dump.cpp
浏览文件 @
25cac386
...
...
@@ -297,16 +297,14 @@ public:
if
(
obj
->
blueprint
()
->
oop_is_instanceKlass
())
{
instanceKlass
*
ik
=
instanceKlass
::
cast
((
klassOop
)
obj
);
typeArrayOop
inner_classes
=
ik
->
inner_classes
();
if
(
inner_classes
!=
NULL
)
{
constantPoolOop
constants
=
ik
->
constants
();
int
n
=
inner_classes
->
length
();
for
(
int
i
=
0
;
i
<
n
;
i
+=
instanceKlass
::
inner_class_next_offset
)
{
int
ioff
=
i
+
instanceKlass
::
inner_class_inner_name_offset
;
int
index
=
inner_classes
->
ushort_at
(
ioff
);
if
(
index
!=
0
)
{
_closure
->
do_symbol
(
constants
->
symbol_at_addr
(
index
));
}
instanceKlassHandle
ik_h
((
klassOop
)
obj
);
InnerClassesIterator
iter
(
ik_h
);
constantPoolOop
constants
=
ik
->
constants
();
for
(;
!
iter
.
done
();
iter
.
next
())
{
int
index
=
iter
.
inner_name_index
();
if
(
index
!=
0
)
{
_closure
->
do_symbol
(
constants
->
symbol_at_addr
(
index
));
}
}
}
...
...
src/share/vm/oops/instanceKlass.cpp
浏览文件 @
25cac386
...
...
@@ -1133,6 +1133,36 @@ JNIid* instanceKlass::jni_id_for(int offset) {
return
probe
;
}
u2
instanceKlass
::
enclosing_method_data
(
int
offset
)
{
typeArrayOop
inner_class_list
=
inner_classes
();
if
(
inner_class_list
==
NULL
)
{
return
0
;
}
int
length
=
inner_class_list
->
length
();
if
(
length
%
inner_class_next_offset
==
0
)
{
return
0
;
}
else
{
int
index
=
length
-
enclosing_method_attribute_size
;
typeArrayHandle
inner_class_list_h
(
inner_class_list
);
assert
(
offset
<
enclosing_method_attribute_size
,
"invalid offset"
);
return
inner_class_list_h
->
ushort_at
(
index
+
offset
);
}
}
void
instanceKlass
::
set_enclosing_method_indices
(
u2
class_index
,
u2
method_index
)
{
typeArrayOop
inner_class_list
=
inner_classes
();
assert
(
inner_class_list
!=
NULL
,
"_inner_classes list is not set up"
);
int
length
=
inner_class_list
->
length
();
if
(
length
%
inner_class_next_offset
==
enclosing_method_attribute_size
)
{
int
index
=
length
-
enclosing_method_attribute_size
;
typeArrayHandle
inner_class_list_h
(
inner_class_list
);
inner_class_list_h
->
ushort_at_put
(
index
+
enclosing_method_class_index_offset
,
class_index
);
inner_class_list_h
->
ushort_at_put
(
index
+
enclosing_method_method_index_offset
,
method_index
);
}
}
// Lookup or create a jmethodID.
// This code is called by the VMThread and JavaThreads so the
...
...
@@ -2107,28 +2137,21 @@ jint instanceKlass::compute_modifier_flags(TRAPS) const {
jint
access
=
access_flags
().
as_int
();
// But check if it happens to be member class.
typeArrayOop
inner_class_list
=
inner_classes
();
int
length
=
(
inner_class_list
==
NULL
)
?
0
:
inner_class_list
->
length
();
assert
(
length
%
instanceKlass
::
inner_class_next_offset
==
0
,
"just checking"
);
if
(
length
>
0
)
{
typeArrayHandle
inner_class_list_h
(
THREAD
,
inner_class_list
);
instanceKlassHandle
ik
(
THREAD
,
k
);
for
(
int
i
=
0
;
i
<
length
;
i
+=
instanceKlass
::
inner_class_next_offset
)
{
int
ioff
=
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_inner_class_info_offset
);
// Inner class attribute can be zero, skip it.
// Strange but true: JVM spec. allows null inner class refs.
if
(
ioff
==
0
)
continue
;
// only look at classes that are already loaded
// since we are looking for the flags for our self.
Symbol
*
inner_name
=
ik
->
constants
()
->
klass_name_at
(
ioff
);
if
((
ik
->
name
()
==
inner_name
))
{
// This is really a member class.
access
=
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_access_flags_offset
);
break
;
}
instanceKlassHandle
ik
(
THREAD
,
k
);
InnerClassesIterator
iter
(
ik
);
for
(;
!
iter
.
done
();
iter
.
next
())
{
int
ioff
=
iter
.
inner_class_info_index
();
// Inner class attribute can be zero, skip it.
// Strange but true: JVM spec. allows null inner class refs.
if
(
ioff
==
0
)
continue
;
// only look at classes that are already loaded
// since we are looking for the flags for our self.
Symbol
*
inner_name
=
ik
->
constants
()
->
klass_name_at
(
ioff
);
if
((
ik
->
name
()
==
inner_name
))
{
// This is really a member class.
access
=
iter
.
inner_access_flags
();
break
;
}
}
// Remember to strip ACC_SUPER bit
...
...
src/share/vm/oops/instanceKlass.hpp
浏览文件 @
25cac386
...
...
@@ -188,7 +188,17 @@ class instanceKlass: public Klass {
klassOop
_host_klass
;
// Class signers.
objArrayOop
_signers
;
// inner_classes attribute.
// The InnerClasses attribute and EnclosingMethod attribute. The
// _inner_classes is an array of shorts. If the class has InnerClasses
// attribute, then the _inner_classes array begins with 4-tuples of shorts
// [inner_class_info_index, outer_class_info_index,
// inner_name_index, inner_class_access_flags] for the InnerClasses
// attribute. If the EnclosingMethod attribute exists, it occupies the
// last two shorts [class_index, method_index] of the array. If only
// the InnerClasses attribute exists, the _inner_classes array length is
// number_of_inner_classes * 4. If the class has both InnerClasses
// and EnclosingMethod attributes the _inner_classes array length is
// number_of_inner_classes * 4 + enclosing_method_attribute_size.
typeArrayOop
_inner_classes
;
// Implementors of this interface (not valid if it overflows)
klassOop
_implementors
[
implementors_limit
];
...
...
@@ -251,8 +261,6 @@ class instanceKlass: public Klass {
// Array of interesting part(s) of the previous version(s) of this
// instanceKlass. See PreviousVersionWalker below.
GrowableArray
<
PreviousVersionNode
*>*
_previous_versions
;
u2
_enclosing_method_class_index
;
// Constant pool index for class of enclosing method, or 0 if none
u2
_enclosing_method_method_index
;
// Constant pool index for name and type of enclosing method, or 0 if none
// JVMTI fields can be moved to their own structure - see 6315920
unsigned
char
*
_cached_class_file_bytes
;
// JVMTI: cached class file, before retransformable agent modified it in CFLH
jint
_cached_class_file_len
;
// JVMTI: length of above
...
...
@@ -351,6 +359,12 @@ class instanceKlass: public Klass {
inner_class_next_offset
=
4
};
enum
EnclosingMethodAttributeOffset
{
enclosing_method_class_index_offset
=
0
,
enclosing_method_method_index_offset
=
1
,
enclosing_method_attribute_size
=
2
};
// method override check
bool
is_override
(
methodHandle
super_method
,
Handle
targetclassloader
,
Symbol
*
targetclassname
,
TRAPS
);
...
...
@@ -533,11 +547,15 @@ class instanceKlass: public Klass {
Symbol
*
generic_signature
()
const
{
return
_generic_signature
;
}
void
set_generic_signature
(
Symbol
*
sig
)
{
_generic_signature
=
sig
;
}
u2
enclosing_method_class_index
()
const
{
return
_enclosing_method_class_index
;
}
u2
enclosing_method_method_index
()
const
{
return
_enclosing_method_method_index
;
}
u2
enclosing_method_data
(
int
offset
);
u2
enclosing_method_class_index
()
{
return
enclosing_method_data
(
enclosing_method_class_index_offset
);
}
u2
enclosing_method_method_index
()
{
return
enclosing_method_data
(
enclosing_method_method_index_offset
);
}
void
set_enclosing_method_indices
(
u2
class_index
,
u2
method_index
)
{
_enclosing_method_class_index
=
class_index
;
_enclosing_method_method_index
=
method_index
;
}
u2
method_index
);
// jmethodID support
static
jmethodID
get_jmethod_id
(
instanceKlassHandle
ik_h
,
...
...
@@ -1053,4 +1071,83 @@ class nmethodBucket: public CHeapObj {
nmethod
*
get_nmethod
()
{
return
_nmethod
;
}
};
// An iterator that's used to access the inner classes indices in the
// instanceKlass::_inner_classes array.
class
InnerClassesIterator
:
public
StackObj
{
private:
typeArrayHandle
_inner_classes
;
int
_length
;
int
_idx
;
public:
InnerClassesIterator
(
instanceKlassHandle
k
)
{
_inner_classes
=
k
->
inner_classes
();
if
(
k
->
inner_classes
()
!=
NULL
)
{
_length
=
_inner_classes
->
length
();
// The inner class array's length should be the multiple of
// inner_class_next_offset if it only contains the InnerClasses
// attribute data, or it should be
// n*inner_class_next_offset+enclosing_method_attribute_size
// if it also contains the EnclosingMethod data.
assert
((
_length
%
instanceKlass
::
inner_class_next_offset
==
0
||
_length
%
instanceKlass
::
inner_class_next_offset
==
instanceKlass
::
enclosing_method_attribute_size
),
"just checking"
);
// Remove the enclosing_method portion if exists.
if
(
_length
%
instanceKlass
::
inner_class_next_offset
==
instanceKlass
::
enclosing_method_attribute_size
)
{
_length
-=
instanceKlass
::
enclosing_method_attribute_size
;
}
}
else
{
_length
=
0
;
}
_idx
=
0
;
}
int
length
()
const
{
return
_length
;
}
void
next
()
{
_idx
+=
instanceKlass
::
inner_class_next_offset
;
}
bool
done
()
const
{
return
(
_idx
>=
_length
);
}
u2
inner_class_info_index
()
const
{
return
_inner_classes
->
ushort_at
(
_idx
+
instanceKlass
::
inner_class_inner_class_info_offset
);
}
void
set_inner_class_info_index
(
u2
index
)
{
_inner_classes
->
ushort_at_put
(
_idx
+
instanceKlass
::
inner_class_inner_class_info_offset
,
index
);
}
u2
outer_class_info_index
()
const
{
return
_inner_classes
->
ushort_at
(
_idx
+
instanceKlass
::
inner_class_outer_class_info_offset
);
}
void
set_outer_class_info_index
(
u2
index
)
{
_inner_classes
->
ushort_at_put
(
_idx
+
instanceKlass
::
inner_class_outer_class_info_offset
,
index
);
}
u2
inner_name_index
()
const
{
return
_inner_classes
->
ushort_at
(
_idx
+
instanceKlass
::
inner_class_inner_name_offset
);
}
void
set_inner_name_index
(
u2
index
)
{
_inner_classes
->
ushort_at_put
(
_idx
+
instanceKlass
::
inner_class_inner_name_offset
,
index
);
}
u2
inner_access_flags
()
const
{
return
_inner_classes
->
ushort_at
(
_idx
+
instanceKlass
::
inner_class_access_flags_offset
);
}
};
#endif // SHARE_VM_OOPS_INSTANCEKLASS_HPP
src/share/vm/oops/instanceKlassKlass.cpp
浏览文件 @
25cac386
...
...
@@ -416,7 +416,6 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it
ik
->
set_methods_annotations
(
NULL
);
ik
->
set_methods_parameter_annotations
(
NULL
);
ik
->
set_methods_default_annotations
(
NULL
);
ik
->
set_enclosing_method_indices
(
0
,
0
);
ik
->
set_jvmti_cached_class_field_map
(
NULL
);
ik
->
set_initial_method_idnum
(
0
);
assert
(
k
()
->
is_parsable
(),
"should be parsable here."
);
...
...
src/share/vm/oops/klass.cpp
浏览文件 @
25cac386
/*
* Copyright (c) 1997, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -174,10 +174,9 @@ KlassHandle Klass::base_create_klass(KlassHandle& klass, int size,
}
void
Klass_vtbl
::
post_new_init_klass
(
KlassHandle
&
klass
,
klassOop
new_klass
,
int
size
)
const
{
klassOop
new_klass
)
const
{
assert
(
!
new_klass
->
klass_part
()
->
null_vtbl
(),
"Not a complete klass"
);
CollectedHeap
::
post_allocation_install_obj_klass
(
klass
,
new_klass
,
size
);
CollectedHeap
::
post_allocation_install_obj_klass
(
klass
,
new_klass
);
}
void
*
Klass_vtbl
::
operator
new
(
size_t
ignored
,
KlassHandle
&
klass
,
...
...
src/share/vm/oops/klass.hpp
浏览文件 @
25cac386
/*
* Copyright (c) 1997, 201
1
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 201
2
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -149,7 +149,7 @@ class Klass_vtbl {
// by the shared "base_create" subroutines.
//
virtual
void
*
allocate_permanent
(
KlassHandle
&
klass
,
int
size
,
TRAPS
)
const
=
0
;
void
post_new_init_klass
(
KlassHandle
&
klass
,
klassOop
obj
,
int
size
)
const
;
void
post_new_init_klass
(
KlassHandle
&
klass
,
klassOop
obj
)
const
;
// Every subclass on which vtbl_value is called must include this macro.
// Delay the installation of the klassKlass pointer until after the
...
...
@@ -160,7 +160,7 @@ class Klass_vtbl {
if (HAS_PENDING_EXCEPTION) return NULL; \
klassOop new_klass = ((Klass*) result)->as_klassOop(); \
OrderAccess::storestore(); \
post_new_init_klass(klass_klass, new_klass
, size);
\
post_new_init_klass(klass_klass, new_klass
);
\
return result; \
}
...
...
src/share/vm/prims/jvm.cpp
浏览文件 @
25cac386
...
...
@@ -1301,9 +1301,6 @@ JVM_END
// Inner class reflection ///////////////////////////////////////////////////////////////////////////////
JVM_ENTRY
(
jobjectArray
,
JVM_GetDeclaredClasses
(
JNIEnv
*
env
,
jclass
ofClass
))
const
int
inner_class_info_index
=
0
;
const
int
outer_class_info_index
=
1
;
JvmtiVMObjectAllocEventCollector
oam
;
// ofClass is a reference to a java_lang_Class object. The mirror object
// of an instanceKlass
...
...
@@ -1315,26 +1312,26 @@ JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
}
instanceKlassHandle
k
(
thread
,
java_lang_Class
::
as_klassOop
(
JNIHandles
::
resolve_non_null
(
ofClass
)));
InnerClassesIterator
iter
(
k
);
if
(
k
->
inner_classes
()
->
length
()
==
0
)
{
if
(
iter
.
length
()
==
0
)
{
// Neither an inner nor outer class
oop
result
=
oopFactory
::
new_objArray
(
SystemDictionary
::
Class_klass
(),
0
,
CHECK_NULL
);
return
(
jobjectArray
)
JNIHandles
::
make_local
(
env
,
result
);
}
// find inner class info
typeArrayHandle
icls
(
thread
,
k
->
inner_classes
());
constantPoolHandle
cp
(
thread
,
k
->
constants
());
int
length
=
i
cls
->
length
();
int
length
=
i
ter
.
length
();
// Allocate temp. result array
objArrayOop
r
=
oopFactory
::
new_objArray
(
SystemDictionary
::
Class_klass
(),
length
/
4
,
CHECK_NULL
);
objArrayHandle
result
(
THREAD
,
r
);
int
members
=
0
;
for
(
int
i
=
0
;
i
<
length
;
i
+=
4
)
{
int
ioff
=
i
cls
->
ushort_at
(
i
+
inner_class_info_index
);
int
ooff
=
i
cls
->
ushort_at
(
i
+
outer_class_info_index
);
for
(;
!
iter
.
done
();
iter
.
next
()
)
{
int
ioff
=
i
ter
.
inner_class_info_index
(
);
int
ooff
=
i
ter
.
outer_class_info_index
(
);
if
(
ioff
!=
0
&&
ooff
!=
0
)
{
// Check to see if the name matches the class we're looking for
...
...
@@ -1392,17 +1389,13 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
bool
*
inner_is_member
,
TRAPS
)
{
Thread
*
thread
=
THREAD
;
const
int
inner_class_info_index
=
inner_class_inner_class_info_offset
;
const
int
outer_class_info_index
=
inner_class_outer_class_info_offset
;
if
(
k
->
inner_classes
()
->
length
()
==
0
)
{
InnerClassesIterator
iter
(
k
);
if
(
iter
.
length
()
==
0
)
{
// No inner class info => no declaring class
return
NULL
;
}
typeArrayHandle
i_icls
(
thread
,
k
->
inner_classes
());
constantPoolHandle
i_cp
(
thread
,
k
->
constants
());
int
i_length
=
i_icls
->
length
();
bool
found
=
false
;
klassOop
ok
;
...
...
@@ -1410,10 +1403,10 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
*
inner_is_member
=
false
;
// Find inner_klass attribute
for
(
int
i
=
0
;
i
<
i_length
&&
!
found
;
i
+=
inner_class_next_offset
)
{
int
ioff
=
i
_icls
->
ushort_at
(
i
+
inner_class_info_index
);
int
ooff
=
i
_icls
->
ushort_at
(
i
+
outer_class_info_index
);
int
noff
=
i
_icls
->
ushort_at
(
i
+
inner_class_inner_name_offset
);
for
(
;
!
iter
.
done
()
&&
!
found
;
iter
.
next
()
)
{
int
ioff
=
i
ter
.
inner_class_info_index
(
);
int
ooff
=
i
ter
.
outer_class_info_index
(
);
int
noff
=
i
ter
.
inner_name_index
(
);
if
(
ioff
!=
0
)
{
// Check to see if the name matches the class we're looking for
// before attempting to find the class.
...
...
src/share/vm/prims/jvmtiClassFileReconstituter.cpp
浏览文件 @
25cac386
...
...
@@ -292,8 +292,8 @@ void JvmtiClassFileReconstituter::write_signature_attribute(u2 generic_signature
// Compute the number of entries in the InnerClasses attribute
u2
JvmtiClassFileReconstituter
::
inner_classes_attribute_length
()
{
typeArrayOop
inner_class_list
=
ikh
()
->
inner_classes
(
);
return
(
inner_class_list
==
NULL
)
?
0
:
inner_class_list
->
length
();
InnerClassesIterator
iter
(
ikh
()
);
return
iter
.
length
();
}
// Write an annotation attribute. The VM stores them in raw form, so all we need
...
...
@@ -324,26 +324,20 @@ void JvmtiClassFileReconstituter::write_annotations_attribute(const char* attr_n
// JVMSpec| } classes[number_of_classes];
// JVMSpec| }
void
JvmtiClassFileReconstituter
::
write_inner_classes_attribute
(
int
length
)
{
typeArrayOop
inner_class_list
=
ikh
()
->
inner_classes
(
);
guarantee
(
i
nner_class_list
!=
NULL
&&
inner_class_list
->
length
()
==
length
,
InnerClassesIterator
iter
(
ikh
()
);
guarantee
(
i
ter
.
length
()
!=
0
&&
iter
.
length
()
==
length
,
"caller must check"
);
typeArrayHandle
inner_class_list_h
(
thread
(),
inner_class_list
);
assert
(
length
%
instanceKlass
::
inner_class_next_offset
==
0
,
"just checking"
);
u2
entry_count
=
length
/
instanceKlass
::
inner_class_next_offset
;
u4
size
=
2
+
entry_count
*
(
2
+
2
+
2
+
2
);
write_attribute_name_index
(
"InnerClasses"
);
write_u4
(
size
);
write_u2
(
entry_count
);
for
(
int
i
=
0
;
i
<
length
;
i
+=
instanceKlass
::
inner_class_next_offset
)
{
write_u2
(
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_inner_class_info_offset
));
write_u2
(
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_outer_class_info_offset
));
write_u2
(
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_inner_name_offset
));
write_u2
(
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_access_flags_offset
));
for
(;
!
iter
.
done
();
iter
.
next
())
{
write_u2
(
iter
.
inner_class_info_index
());
write_u2
(
iter
.
outer_class_info_index
());
write_u2
(
iter
.
inner_name_index
());
write_u2
(
iter
.
inner_access_flags
());
}
}
...
...
src/share/vm/prims/jvmtiRedefineClasses.cpp
浏览文件 @
25cac386
...
...
@@ -2400,44 +2400,33 @@ void VM_RedefineClasses::set_new_constant_pool(
// new constant indices as needed. The inner classes info is a
// quadruple:
// (inner_class_info, outer_class_info, inner_name, inner_access_flags)
typeArrayOop
inner_class_list
=
scratch_class
->
inner_classes
();
int
icl_length
=
(
inner_class_list
==
NULL
)
?
0
:
inner_class_list
->
length
();
if
(
icl_length
>
0
)
{
typeArrayHandle
inner_class_list_h
(
THREAD
,
inner_class_list
);
for
(
int
i
=
0
;
i
<
icl_length
;
i
+=
instanceKlass
::
inner_class_next_offset
)
{
int
cur_index
=
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_inner_class_info_offset
);
if
(
cur_index
==
0
)
{
continue
;
// JVM spec. allows null inner class refs so skip it
}
int
new_index
=
find_new_index
(
cur_index
);
if
(
new_index
!=
0
)
{
RC_TRACE_WITH_THREAD
(
0x00080000
,
THREAD
,
(
"inner_class_info change: %d to %d"
,
cur_index
,
new_index
));
inner_class_list_h
->
ushort_at_put
(
i
+
instanceKlass
::
inner_class_inner_class_info_offset
,
new_index
);
}
cur_index
=
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_outer_class_info_offset
);
new_index
=
find_new_index
(
cur_index
);
if
(
new_index
!=
0
)
{
RC_TRACE_WITH_THREAD
(
0x00080000
,
THREAD
,
(
"outer_class_info change: %d to %d"
,
cur_index
,
new_index
));
inner_class_list_h
->
ushort_at_put
(
i
+
instanceKlass
::
inner_class_outer_class_info_offset
,
new_index
);
}
cur_index
=
inner_class_list_h
->
ushort_at
(
i
+
instanceKlass
::
inner_class_inner_name_offset
);
new_index
=
find_new_index
(
cur_index
);
if
(
new_index
!=
0
)
{
RC_TRACE_WITH_THREAD
(
0x00080000
,
THREAD
,
(
"inner_name change: %d to %d"
,
cur_index
,
new_index
));
inner_class_list_h
->
ushort_at_put
(
i
+
instanceKlass
::
inner_class_inner_name_offset
,
new_index
);
}
}
// end for each inner class
}
// end if we have inner classes
InnerClassesIterator
iter
(
scratch_class
);
for
(;
!
iter
.
done
();
iter
.
next
())
{
int
cur_index
=
iter
.
inner_class_info_index
();
if
(
cur_index
==
0
)
{
continue
;
// JVM spec. allows null inner class refs so skip it
}
int
new_index
=
find_new_index
(
cur_index
);
if
(
new_index
!=
0
)
{
RC_TRACE_WITH_THREAD
(
0x00080000
,
THREAD
,
(
"inner_class_info change: %d to %d"
,
cur_index
,
new_index
));
iter
.
set_inner_class_info_index
(
new_index
);
}
cur_index
=
iter
.
outer_class_info_index
();
new_index
=
find_new_index
(
cur_index
);
if
(
new_index
!=
0
)
{
RC_TRACE_WITH_THREAD
(
0x00080000
,
THREAD
,
(
"outer_class_info change: %d to %d"
,
cur_index
,
new_index
));
iter
.
set_outer_class_info_index
(
new_index
);
}
cur_index
=
iter
.
inner_name_index
();
new_index
=
find_new_index
(
cur_index
);
if
(
new_index
!=
0
)
{
RC_TRACE_WITH_THREAD
(
0x00080000
,
THREAD
,
(
"inner_name change: %d to %d"
,
cur_index
,
new_index
));
iter
.
set_inner_name_index
(
new_index
);
}
}
// end for each inner class
// Attach each method in klass to the new constant pool and update
// to use new constant pool indices as needed:
...
...
src/share/vm/runtime/reflection.cpp
浏览文件 @
25cac386
...
...
@@ -591,14 +591,11 @@ bool Reflection::is_same_package_member(klassOop class1, klassOop class2, TRAPS)
// Caller is responsible for figuring out in advance which case must be true.
void
Reflection
::
check_for_inner_class
(
instanceKlassHandle
outer
,
instanceKlassHandle
inner
,
bool
inner_is_member
,
TRAPS
)
{
const
int
inner_class_info_index
=
0
;
const
int
outer_class_info_index
=
1
;
typeArrayHandle
icls
(
THREAD
,
outer
->
inner_classes
());
InnerClassesIterator
iter
(
outer
);
constantPoolHandle
cp
(
THREAD
,
outer
->
constants
());
for
(
int
i
=
0
;
i
<
icls
->
length
();
i
+=
4
)
{
int
ioff
=
i
cls
->
ushort_at
(
i
+
inner_class_info_index
);
int
ooff
=
i
cls
->
ushort_at
(
i
+
outer_class_info_index
);
for
(;
!
iter
.
done
();
iter
.
next
()
)
{
int
ioff
=
i
ter
.
inner_class_info_index
(
);
int
ooff
=
i
ter
.
outer_class_info_index
(
);
if
(
inner_is_member
&&
ioff
!=
0
&&
ooff
!=
0
)
{
klassOop
o
=
cp
->
klass_at
(
ooff
,
CHECK
);
...
...
test/Makefile
浏览文件 @
25cac386
...
...
@@ -26,6 +26,8 @@
# Makefile to run various jdk tests
#
GETMIXEDPATH
=
echo
# Get OS/ARCH specifics
OSNAME
=
$(
shell
uname
-s
)
ifeq
($(OSNAME), SunOS)
...
...
@@ -60,7 +62,14 @@ ifeq ($(findstring BSD,$(OSNAME)), BSD)
ARCH
=
i586
endif
endif
ifeq
($(OSNAME), Windows_NT)
ifeq
($(PLATFORM),)
# detect wether we're running in MKS or cygwin
ifeq
($(OSNAME), Windows_NT)
# MKS
GETMIXEDPATH
=
dosname
-s
endif
ifeq
($(findstring CYGWIN,$(OSNAME)), CYGWIN)
GETMIXEDPATH
=
cygpath
-m
-s
endif
PLATFORM
=
windows
SLASH_JAVA
=
J:
ifeq
($(word 1, $(PROCESSOR_IDENTIFIER)),ia64)
...
...
@@ -234,11 +243,11 @@ wbapitest: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG)
$(JTREG)
-a
-v
:fail,error
\
$(JTREG_KEY_OPTION)
\
$(EXTRA_JTREG_OPTIONS)
\
-r
:
$(
ABS_TEST_OUTPUT_DIR
)
/JTreport
\
-w
:
$(
ABS_TEST_OUTPUT_DIR
)
/JTwork
\
-jdk
:
$(
PRODUCT_HOME
)
\
-r
:
$(
shell
$(GETMIXEDPATH)
"
$(ABS_TEST_OUTPUT_DIR)
"
)
/JTreport
\
-w
:
$(
shell
$(GETMIXEDPATH)
"
$(ABS_TEST_OUTPUT_DIR)
"
)
/JTwork
\
-jdk
:
$(
shell
$(GETMIXEDPATH)
"
$(PRODUCT_HOME)
"
)
\
$
(
JAVA_OPTIONS:%
=
-vmoption
:%
)
\
$(
TEST_ROOT
)
/sanity
\
$(
shell
$(GETMIXEDPATH)
"
$(TEST_ROOT)
"
)
/sanity
\
||
$(BUNDLE_UP_FAILED)
$(BUNDLE_UP)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录