Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
b339fe4c
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b339fe4c
编写于
7月 09, 2013
作者:
Z
zgu
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
2a09e09a
1039ad1b
变更
41
隐藏空白更改
内联
并排
Showing
41 changed file
with
19 addition
and
420 deletion
+19
-420
agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java
agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java
+0
-4
agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
+0
-4
make/bsd/makefiles/mapfile-vers-debug
make/bsd/makefiles/mapfile-vers-debug
+0
-1
make/bsd/makefiles/mapfile-vers-product
make/bsd/makefiles/mapfile-vers-product
+0
-1
make/linux/makefiles/mapfile-vers-debug
make/linux/makefiles/mapfile-vers-debug
+0
-1
make/linux/makefiles/mapfile-vers-product
make/linux/makefiles/mapfile-vers-product
+0
-1
make/solaris/makefiles/mapfile-vers
make/solaris/makefiles/mapfile-vers
+0
-1
src/share/vm/classfile/javaClasses.hpp
src/share/vm/classfile/javaClasses.hpp
+1
-1
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+0
-6
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.hpp
+0
-1
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+0
-20
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
...ion/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+0
-1
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+0
-8
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+0
-5
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+0
-1
src/share/vm/memory/allocation.cpp
src/share/vm/memory/allocation.cpp
+8
-1
src/share/vm/memory/allocation.hpp
src/share/vm/memory/allocation.hpp
+2
-1
src/share/vm/memory/defNewGeneration.cpp
src/share/vm/memory/defNewGeneration.cpp
+0
-5
src/share/vm/memory/defNewGeneration.hpp
src/share/vm/memory/defNewGeneration.hpp
+0
-1
src/share/vm/memory/genCollectedHeap.cpp
src/share/vm/memory/genCollectedHeap.cpp
+0
-9
src/share/vm/memory/genCollectedHeap.hpp
src/share/vm/memory/genCollectedHeap.hpp
+0
-1
src/share/vm/memory/generation.cpp
src/share/vm/memory/generation.cpp
+0
-10
src/share/vm/memory/generation.hpp
src/share/vm/memory/generation.hpp
+0
-7
src/share/vm/memory/sharedHeap.hpp
src/share/vm/memory/sharedHeap.hpp
+0
-5
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+0
-1
src/share/vm/oops/arrayKlass.cpp
src/share/vm/oops/arrayKlass.cpp
+0
-7
src/share/vm/oops/arrayKlass.hpp
src/share/vm/oops/arrayKlass.hpp
+0
-6
src/share/vm/oops/instanceKlass.cpp
src/share/vm/oops/instanceKlass.cpp
+0
-6
src/share/vm/oops/instanceKlass.hpp
src/share/vm/oops/instanceKlass.hpp
+0
-5
src/share/vm/oops/klass.cpp
src/share/vm/oops/klass.cpp
+0
-7
src/share/vm/oops/klass.hpp
src/share/vm/oops/klass.hpp
+0
-9
src/share/vm/prims/jvm.cpp
src/share/vm/prims/jvm.cpp
+0
-20
src/share/vm/prims/jvm.h
src/share/vm/prims/jvm.h
+0
-3
src/share/vm/runtime/aprofiler.cpp
src/share/vm/runtime/aprofiler.cpp
+0
-143
src/share/vm/runtime/aprofiler.hpp
src/share/vm/runtime/aprofiler.hpp
+0
-71
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+0
-21
src/share/vm/runtime/arguments.hpp
src/share/vm/runtime/arguments.hpp
+1
-3
src/share/vm/runtime/handles.hpp
src/share/vm/runtime/handles.hpp
+1
-1
src/share/vm/runtime/java.cpp
src/share/vm/runtime/java.cpp
+0
-11
src/share/vm/runtime/thread.cpp
src/share/vm/runtime/thread.cpp
+0
-2
src/share/vm/runtime/vmStructs.cpp
src/share/vm/runtime/vmStructs.cpp
+6
-8
未找到文件。
agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java
浏览文件 @
b339fe4c
...
...
@@ -49,7 +49,6 @@ public class ArrayKlass extends Klass {
higherDimension
=
new
MetadataField
(
type
.
getAddressField
(
"_higher_dimension"
),
0
);
lowerDimension
=
new
MetadataField
(
type
.
getAddressField
(
"_lower_dimension"
),
0
);
vtableLen
=
new
CIntField
(
type
.
getCIntegerField
(
"_vtable_len"
),
0
);
allocSize
=
new
CIntField
(
type
.
getCIntegerField
(
"_alloc_size"
),
0
);
componentMirror
=
new
OopField
(
type
.
getOopField
(
"_component_mirror"
),
0
);
javaLangCloneableName
=
null
;
javaLangObjectName
=
null
;
...
...
@@ -64,7 +63,6 @@ public class ArrayKlass extends Klass {
private
static
MetadataField
higherDimension
;
private
static
MetadataField
lowerDimension
;
private
static
CIntField
vtableLen
;
private
static
CIntField
allocSize
;
private
static
OopField
componentMirror
;
public
Klass
getJavaSuper
()
{
...
...
@@ -76,7 +74,6 @@ public class ArrayKlass extends Klass {
public
Klass
getHigherDimension
()
{
return
(
Klass
)
higherDimension
.
getValue
(
this
);
}
public
Klass
getLowerDimension
()
{
return
(
Klass
)
lowerDimension
.
getValue
(
this
);
}
public
long
getVtableLen
()
{
return
vtableLen
.
getValue
(
this
);
}
public
long
getAllocSize
()
{
return
allocSize
.
getValue
(
this
);
}
public
Oop
getComponentMirror
()
{
return
componentMirror
.
getValue
(
this
);
}
// constant class names - javaLangCloneable, javaIoSerializable, javaLangObject
...
...
@@ -147,7 +144,6 @@ public class ArrayKlass extends Klass {
visitor
.
doMetadata
(
higherDimension
,
true
);
visitor
.
doMetadata
(
lowerDimension
,
true
);
visitor
.
doCInt
(
vtableLen
,
true
);
visitor
.
doCInt
(
allocSize
,
true
);
visitor
.
doOop
(
componentMirror
,
true
);
}
}
agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java
浏览文件 @
b339fe4c
...
...
@@ -57,7 +57,6 @@ public class Klass extends Metadata implements ClassConstants {
accessFlags
=
new
CIntField
(
type
.
getCIntegerField
(
"_access_flags"
),
0
);
subklass
=
new
MetadataField
(
type
.
getAddressField
(
"_subklass"
),
0
);
nextSibling
=
new
MetadataField
(
type
.
getAddressField
(
"_next_sibling"
),
0
);
allocCount
=
new
CIntField
(
type
.
getCIntegerField
(
"_alloc_count"
),
0
);
LH_INSTANCE_SLOW_PATH_BIT
=
db
.
lookupIntConstant
(
"Klass::_lh_instance_slow_path_bit"
).
intValue
();
LH_LOG2_ELEMENT_SIZE_SHIFT
=
db
.
lookupIntConstant
(
"Klass::_lh_log2_element_size_shift"
).
intValue
();
...
...
@@ -87,7 +86,6 @@ public class Klass extends Metadata implements ClassConstants {
private
static
CIntField
accessFlags
;
private
static
MetadataField
subklass
;
private
static
MetadataField
nextSibling
;
private
static
CIntField
allocCount
;
private
Address
getValue
(
AddressField
field
)
{
return
addr
.
getAddressAt
(
field
.
getOffset
());
...
...
@@ -108,7 +106,6 @@ public class Klass extends Metadata implements ClassConstants {
public
AccessFlags
getAccessFlagsObj
(){
return
new
AccessFlags
(
getAccessFlags
());
}
public
Klass
getSubklassKlass
()
{
return
(
Klass
)
subklass
.
getValue
(
this
);
}
public
Klass
getNextSiblingKlass
()
{
return
(
Klass
)
nextSibling
.
getValue
(
this
);
}
public
long
getAllocCount
()
{
return
allocCount
.
getValue
(
this
);
}
// computed access flags - takes care of inner classes etc.
// This is closer to actual source level than getAccessFlags() etc.
...
...
@@ -172,7 +169,6 @@ public class Klass extends Metadata implements ClassConstants {
visitor
.
doCInt
(
accessFlags
,
true
);
visitor
.
doMetadata
(
subklass
,
true
);
visitor
.
doMetadata
(
nextSibling
,
true
);
visitor
.
doCInt
(
allocCount
,
true
);
}
public
long
getObjectSize
()
{
...
...
make/bsd/makefiles/mapfile-vers-debug
浏览文件 @
b339fe4c
...
...
@@ -221,7 +221,6 @@
_JVM_SetLength
_JVM_SetNativeThreadName
_JVM_SetPrimitiveArrayElement
_JVM_SetProtectionDomain
_JVM_SetSockOpt
_JVM_SetThreadPriority
_JVM_Sleep
...
...
make/bsd/makefiles/mapfile-vers-product
浏览文件 @
b339fe4c
...
...
@@ -221,7 +221,6 @@
_JVM_SetLength
_JVM_SetNativeThreadName
_JVM_SetPrimitiveArrayElement
_JVM_SetProtectionDomain
_JVM_SetSockOpt
_JVM_SetThreadPriority
_JVM_Sleep
...
...
make/linux/makefiles/mapfile-vers-debug
浏览文件 @
b339fe4c
...
...
@@ -223,7 +223,6 @@ SUNWprivate_1.1 {
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;
...
...
make/linux/makefiles/mapfile-vers-product
浏览文件 @
b339fe4c
...
...
@@ -223,7 +223,6 @@ SUNWprivate_1.1 {
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;
...
...
make/solaris/makefiles/mapfile-vers
浏览文件 @
b339fe4c
...
...
@@ -223,7 +223,6 @@ SUNWprivate_1.1 {
JVM_SetLength;
JVM_SetNativeThreadName;
JVM_SetPrimitiveArrayElement;
JVM_SetProtectionDomain;
JVM_SetSockOpt;
JVM_SetThreadPriority;
JVM_Sleep;
...
...
src/share/vm/classfile/javaClasses.hpp
浏览文件 @
b339fe4c
...
...
@@ -234,6 +234,7 @@ class java_lang_Class : AllStatic {
static
GrowableArray
<
Klass
*>*
_fixup_mirror_list
;
static
void
set_init_lock
(
oop
java_class
,
oop
init_lock
);
static
void
set_protection_domain
(
oop
java_class
,
oop
protection_domain
);
public:
static
void
compute_offsets
();
...
...
@@ -272,7 +273,6 @@ class java_lang_Class : AllStatic {
// Support for embedded per-class oops
static
oop
protection_domain
(
oop
java_class
);
static
void
set_protection_domain
(
oop
java_class
,
oop
protection_domain
);
static
oop
init_lock
(
oop
java_class
);
static
objArrayOop
signers
(
oop
java_class
);
static
void
set_signers
(
oop
java_class
,
objArrayOop
signers
);
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
浏览文件 @
b339fe4c
...
...
@@ -2017,12 +2017,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
ALL_SINCE_SAVE_MARKS_CLOSURES
(
CFLS_OOP_SINCE_SAVE_MARKS_DEFN
)
void
CompactibleFreeListSpace
::
object_iterate_since_last_GC
(
ObjectClosure
*
cl
)
{
// ugghh... how would one do this efficiently for a non-contiguous space?
guarantee
(
false
,
"NYI"
);
}
bool
CompactibleFreeListSpace
::
linearAllocationWouldFail
()
const
{
return
_smallLinearAllocBlock
.
_word_size
==
0
;
}
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
浏览文件 @
b339fe4c
...
...
@@ -396,7 +396,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
// iteration support for promotion
void
save_marks
();
bool
no_allocs_since_save_marks
();
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
);
// iteration support for sweeping
void
save_sweep_limit
()
{
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
浏览文件 @
b339fe4c
...
...
@@ -3129,26 +3129,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ALL_SINCE_SAVE_MARKS_CLOSURES
(
CMS_SINCE_SAVE_MARKS_DEFN
)
void
ConcurrentMarkSweepGeneration
::
object_iterate_since_last_GC
(
ObjectClosure
*
blk
)
{
// Not currently implemented; need to do the following. -- ysr.
// dld -- I think that is used for some sort of allocation profiler. So it
// really means the objects allocated by the mutator since the last
// GC. We could potentially implement this cheaply by recording only
// the direct allocations in a side data structure.
//
// I think we probably ought not to be required to support these
// iterations at any arbitrary point; I think there ought to be some
// call to enable/disable allocation profiling in a generation/space,
// and the iterator ought to return the objects allocated in the
// gen/space since the enable call, or the last iterator call (which
// will probably be at a GC.) That way, for gens like CM&S that would
// require some extra data structure to support this, we only pay the
// cost when it's in use...
cmsSpace
()
->
object_iterate_since_last_GC
(
blk
);
}
void
ConcurrentMarkSweepGeneration
::
younger_refs_iterate
(
OopsInGenClosure
*
cl
)
{
cl
->
set_generation
(
this
);
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
浏览文件 @
b339fe4c
...
...
@@ -1273,7 +1273,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Iteration support and related enquiries
void
save_marks
();
bool
no_allocs_since_save_marks
();
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
);
void
younger_refs_iterate
(
OopsInGenClosure
*
cl
);
// Iteration support specific to CMS generations
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
b339fe4c
...
...
@@ -54,7 +54,6 @@
#include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/vmThread.hpp"
size_t
G1CollectedHeap
::
_humongous_object_threshold_in_words
=
0
;
...
...
@@ -2665,11 +2664,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
heap_region_iterate
(
&
blk
);
}
void
G1CollectedHeap
::
object_iterate_since_last_GC
(
ObjectClosure
*
cl
)
{
// FIXME: is this right?
guarantee
(
false
,
"object_iterate_since_last_GC not supported by G1 heap"
);
}
// Calls a SpaceClosure on a HeapRegion.
class
SpaceClosureRegionClosure
:
public
HeapRegionClosure
{
...
...
@@ -3598,8 +3592,6 @@ G1CollectedHeap* G1CollectedHeap::heap() {
void
G1CollectedHeap
::
gc_prologue
(
bool
full
/* Ignored */
)
{
// always_do_update_barrier = false;
assert
(
InlineCacheBuffer
::
is_empty
(),
"should have cleaned up ICBuffer"
);
// Call allocation profiler
AllocationProfiler
::
iterate_since_last_gc
();
// Fill TLAB's and such
ensure_parsability
(
true
);
}
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
b339fe4c
...
...
@@ -1360,11 +1360,6 @@ public:
object_iterate
(
cl
);
}
// Iterate over all objects allocated since the last collection, calling
// "cl.do_object" on each. The heap must have been initialized properly
// to support this function, or else this call will fail.
virtual
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
);
// Iterate over all spaces in use in the heap, in ascending address order.
virtual
void
space_iterate
(
SpaceClosure
*
cl
);
...
...
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
浏览文件 @
b339fe4c
...
...
@@ -43,7 +43,6 @@
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/synchronizer.hpp"
...
...
src/share/vm/memory/allocation.cpp
浏览文件 @
b339fe4c
...
...
@@ -236,10 +236,11 @@ class ChunkPool: public CHeapObj<mtInternal> {
size_t
_num_used
;
// number of chunks currently checked out
const
size_t
_size
;
// size of each chunk (must be uniform)
// Our
three
static pools
// Our
four
static pools
static
ChunkPool
*
_large_pool
;
static
ChunkPool
*
_medium_pool
;
static
ChunkPool
*
_small_pool
;
static
ChunkPool
*
_tiny_pool
;
// return first element or null
void
*
get_first
()
{
...
...
@@ -319,15 +320,18 @@ class ChunkPool: public CHeapObj<mtInternal> {
static
ChunkPool
*
large_pool
()
{
assert
(
_large_pool
!=
NULL
,
"must be initialized"
);
return
_large_pool
;
}
static
ChunkPool
*
medium_pool
()
{
assert
(
_medium_pool
!=
NULL
,
"must be initialized"
);
return
_medium_pool
;
}
static
ChunkPool
*
small_pool
()
{
assert
(
_small_pool
!=
NULL
,
"must be initialized"
);
return
_small_pool
;
}
static
ChunkPool
*
tiny_pool
()
{
assert
(
_tiny_pool
!=
NULL
,
"must be initialized"
);
return
_tiny_pool
;
}
static
void
initialize
()
{
_large_pool
=
new
ChunkPool
(
Chunk
::
size
+
Chunk
::
aligned_overhead_size
());
_medium_pool
=
new
ChunkPool
(
Chunk
::
medium_size
+
Chunk
::
aligned_overhead_size
());
_small_pool
=
new
ChunkPool
(
Chunk
::
init_size
+
Chunk
::
aligned_overhead_size
());
_tiny_pool
=
new
ChunkPool
(
Chunk
::
tiny_size
+
Chunk
::
aligned_overhead_size
());
}
static
void
clean
()
{
enum
{
BlocksToKeep
=
5
};
_tiny_pool
->
free_all_but
(
BlocksToKeep
);
_small_pool
->
free_all_but
(
BlocksToKeep
);
_medium_pool
->
free_all_but
(
BlocksToKeep
);
_large_pool
->
free_all_but
(
BlocksToKeep
);
...
...
@@ -337,6 +341,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool
*
ChunkPool
::
_large_pool
=
NULL
;
ChunkPool
*
ChunkPool
::
_medium_pool
=
NULL
;
ChunkPool
*
ChunkPool
::
_small_pool
=
NULL
;
ChunkPool
*
ChunkPool
::
_tiny_pool
=
NULL
;
void
chunkpool_init
()
{
ChunkPool
::
initialize
();
...
...
@@ -376,6 +381,7 @@ void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode,
case
Chunk
::
size
:
return
ChunkPool
::
large_pool
()
->
allocate
(
bytes
,
alloc_failmode
);
case
Chunk
::
medium_size
:
return
ChunkPool
::
medium_pool
()
->
allocate
(
bytes
,
alloc_failmode
);
case
Chunk
::
init_size
:
return
ChunkPool
::
small_pool
()
->
allocate
(
bytes
,
alloc_failmode
);
case
Chunk
::
tiny_size
:
return
ChunkPool
::
tiny_pool
()
->
allocate
(
bytes
,
alloc_failmode
);
default:
{
void
*
p
=
os
::
malloc
(
bytes
,
mtChunk
,
CALLER_PC
);
if
(
p
==
NULL
&&
alloc_failmode
==
AllocFailStrategy
::
EXIT_OOM
)
{
...
...
@@ -392,6 +398,7 @@ void Chunk::operator delete(void* p) {
case
Chunk
::
size
:
ChunkPool
::
large_pool
()
->
free
(
c
);
break
;
case
Chunk
::
medium_size
:
ChunkPool
::
medium_pool
()
->
free
(
c
);
break
;
case
Chunk
::
init_size
:
ChunkPool
::
small_pool
()
->
free
(
c
);
break
;
case
Chunk
::
tiny_size
:
ChunkPool
::
tiny_pool
()
->
free
(
c
);
break
;
default:
os
::
free
(
c
,
mtChunk
);
}
}
...
...
src/share/vm/memory/allocation.hpp
浏览文件 @
b339fe4c
...
...
@@ -353,7 +353,8 @@ class Chunk: CHeapObj<mtChunk> {
slack
=
20
,
// suspected sizeof(Chunk) + internal malloc headers
#endif
init_size
=
1
*
K
-
slack
,
// Size of first chunk
tiny_size
=
256
-
slack
,
// Size of first chunk (tiny)
init_size
=
1
*
K
-
slack
,
// Size of first chunk (normal aka small)
medium_size
=
10
*
K
-
slack
,
// Size of medium-sized chunk
size
=
32
*
K
-
slack
,
// Default size of an Arena chunk (following the first)
non_pool_size
=
init_size
+
32
// An initial size which is not one of above
...
...
src/share/vm/memory/defNewGeneration.cpp
浏览文件 @
b339fe4c
...
...
@@ -450,11 +450,6 @@ void DefNewGeneration::compute_new_size() {
}
}
void
DefNewGeneration
::
object_iterate_since_last_GC
(
ObjectClosure
*
cl
)
{
// $$$ This may be wrong in case of "scavenge failure"?
eden
()
->
object_iterate
(
cl
);
}
void
DefNewGeneration
::
younger_refs_iterate
(
OopsInGenClosure
*
cl
)
{
assert
(
false
,
"NYI -- are you sure you want to call this?"
);
}
...
...
src/share/vm/memory/defNewGeneration.hpp
浏览文件 @
b339fe4c
...
...
@@ -252,7 +252,6 @@ protected:
// Iteration
void
object_iterate
(
ObjectClosure
*
blk
);
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
);
void
younger_refs_iterate
(
OopsInGenClosure
*
cl
);
...
...
src/share/vm/memory/genCollectedHeap.cpp
浏览文件 @
b339fe4c
...
...
@@ -42,7 +42,6 @@
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/fprofiler.hpp"
#include "runtime/handles.hpp"
...
...
@@ -873,12 +872,6 @@ void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
}
}
void
GenCollectedHeap
::
object_iterate_since_last_GC
(
ObjectClosure
*
cl
)
{
for
(
int
i
=
0
;
i
<
_n_gens
;
i
++
)
{
_gens
[
i
]
->
object_iterate_since_last_GC
(
cl
);
}
}
Space
*
GenCollectedHeap
::
space_containing
(
const
void
*
addr
)
const
{
for
(
int
i
=
0
;
i
<
_n_gens
;
i
++
)
{
Space
*
res
=
_gens
[
i
]
->
space_containing
(
addr
);
...
...
@@ -1186,8 +1179,6 @@ void GenCollectedHeap::gc_prologue(bool full) {
CollectedHeap
::
accumulate_statistics_all_tlabs
();
ensure_parsability
(
true
);
// retire TLABs
// Call allocation profiler
AllocationProfiler
::
iterate_since_last_gc
();
// Walk generations
GenGCPrologueClosure
blk
(
full
);
generation_iterate
(
&
blk
,
false
);
// not old-to-young.
...
...
src/share/vm/memory/genCollectedHeap.hpp
浏览文件 @
b339fe4c
...
...
@@ -222,7 +222,6 @@ public:
void
oop_iterate
(
MemRegion
mr
,
ExtendedOopClosure
*
cl
);
void
object_iterate
(
ObjectClosure
*
cl
);
void
safe_object_iterate
(
ObjectClosure
*
cl
);
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
);
Space
*
space_containing
(
const
void
*
addr
)
const
;
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
...
...
src/share/vm/memory/generation.cpp
浏览文件 @
b339fe4c
...
...
@@ -811,16 +811,6 @@ void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
blk
->
do_space
(
_the_space
);
}
void
OneContigSpaceCardGeneration
::
object_iterate_since_last_GC
(
ObjectClosure
*
blk
)
{
// Deal with delayed initialization of _the_space,
// and lack of initialization of _last_gc.
if
(
_last_gc
.
space
()
==
NULL
)
{
assert
(
the_space
()
!=
NULL
,
"shouldn't be NULL"
);
_last_gc
=
the_space
()
->
bottom_mark
();
}
the_space
()
->
object_iterate_from
(
_last_gc
,
blk
);
}
void
OneContigSpaceCardGeneration
::
younger_refs_iterate
(
OopsInGenClosure
*
blk
)
{
blk
->
set_generation
(
this
);
younger_refs_in_space_iterate
(
_the_space
,
blk
);
...
...
src/share/vm/memory/generation.hpp
浏览文件 @
b339fe4c
...
...
@@ -551,12 +551,6 @@ class Generation: public CHeapObj<mtGC> {
// the heap. This defaults to object_iterate() unless overridden.
virtual
void
safe_object_iterate
(
ObjectClosure
*
cl
);
// Iterate over all objects allocated in the generation since the last
// collection, calling "cl.do_object" on each. The generation must have
// been initialized properly to support this function, or else this call
// will fail.
virtual
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
)
=
0
;
// Apply "cl->do_oop" to (the address of) all and only all the ref fields
// in the current generation that contain pointers to objects in younger
// generations. Objects allocated since the last "save_marks" call are
...
...
@@ -724,7 +718,6 @@ class OneContigSpaceCardGeneration: public CardGeneration {
// Iteration
void
object_iterate
(
ObjectClosure
*
blk
);
void
space_iterate
(
SpaceClosure
*
blk
,
bool
usedOnly
=
false
);
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
);
void
younger_refs_iterate
(
OopsInGenClosure
*
blk
);
...
...
src/share/vm/memory/sharedHeap.hpp
浏览文件 @
b339fe4c
...
...
@@ -166,11 +166,6 @@ public:
// Same as above, restricted to a memory region.
virtual
void
oop_iterate
(
MemRegion
mr
,
ExtendedOopClosure
*
cl
)
=
0
;
// Iterate over all objects allocated since the last collection, calling
// "cl->do_object" on each. The heap must have been initialized properly
// to support this function, or else this call will fail.
virtual
void
object_iterate_since_last_GC
(
ObjectClosure
*
cl
)
=
0
;
// Iterate over all spaces in use in the heap, in an undefined order.
virtual
void
space_iterate
(
SpaceClosure
*
cl
)
=
0
;
...
...
src/share/vm/memory/universe.cpp
浏览文件 @
b339fe4c
...
...
@@ -52,7 +52,6 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayKlass.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fprofiler.hpp"
...
...
src/share/vm/oops/arrayKlass.cpp
浏览文件 @
b339fe4c
...
...
@@ -71,7 +71,6 @@ Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature) cons
}
ArrayKlass
::
ArrayKlass
(
Symbol
*
name
)
{
set_alloc_size
(
0
);
set_name
(
name
);
set_super
(
Universe
::
is_bootstrapping
()
?
(
Klass
*
)
NULL
:
SystemDictionary
::
Object_klass
());
...
...
@@ -161,12 +160,6 @@ void ArrayKlass::array_klasses_do(void f(Klass* k)) {
}
}
void
ArrayKlass
::
with_array_klasses_do
(
void
f
(
Klass
*
k
))
{
array_klasses_do
(
f
);
}
// GC support
void
ArrayKlass
::
oops_do
(
OopClosure
*
cl
)
{
...
...
src/share/vm/oops/arrayKlass.hpp
浏览文件 @
b339fe4c
...
...
@@ -39,7 +39,6 @@ class ArrayKlass: public Klass {
Klass
*
volatile
_higher_dimension
;
// Refers the (n+1)'th-dimensional array (if present).
Klass
*
volatile
_lower_dimension
;
// Refers the (n-1)'th-dimensional array (if present).
int
_vtable_len
;
// size of vtable for this klass
juint
_alloc_size
;
// allocation profiling support
oop
_component_mirror
;
// component type, as a java/lang/Class
protected:
...
...
@@ -65,10 +64,6 @@ class ArrayKlass: public Klass {
void
set_lower_dimension
(
Klass
*
k
)
{
_lower_dimension
=
k
;
}
Klass
**
adr_lower_dimension
()
{
return
(
Klass
**
)
&
this
->
_lower_dimension
;}
// Allocation profiling support
juint
alloc_size
()
const
{
return
_alloc_size
;
}
void
set_alloc_size
(
juint
n
)
{
_alloc_size
=
n
;
}
// offset of first element, including any padding for the sake of alignment
int
array_header_in_bytes
()
const
{
return
layout_helper_header_size
(
layout_helper
());
}
int
log2_element_size
()
const
{
return
layout_helper_log2_element_size
(
layout_helper
());
}
...
...
@@ -126,7 +121,6 @@ class ArrayKlass: public Klass {
// Iterators
void
array_klasses_do
(
void
f
(
Klass
*
k
));
void
array_klasses_do
(
void
f
(
Klass
*
k
,
TRAPS
),
TRAPS
);
void
with_array_klasses_do
(
void
f
(
Klass
*
k
));
// GC support
virtual
void
oops_do
(
OopClosure
*
cl
);
...
...
src/share/vm/oops/instanceKlass.cpp
浏览文件 @
b339fe4c
...
...
@@ -1321,12 +1321,6 @@ void InstanceKlass::array_klasses_do(void f(Klass* k)) {
ArrayKlass
::
cast
(
array_klasses
())
->
array_klasses_do
(
f
);
}
void
InstanceKlass
::
with_array_klasses_do
(
void
f
(
Klass
*
k
))
{
f
(
this
);
array_klasses_do
(
f
);
}
#ifdef ASSERT
static
int
linear_search
(
Array
<
Method
*>*
methods
,
Symbol
*
name
,
Symbol
*
signature
)
{
int
len
=
methods
->
length
();
...
...
src/share/vm/oops/instanceKlass.hpp
浏览文件 @
b339fe4c
...
...
@@ -794,7 +794,6 @@ class InstanceKlass: public Klass {
void
methods_do
(
void
f
(
Method
*
method
));
void
array_klasses_do
(
void
f
(
Klass
*
k
));
void
array_klasses_do
(
void
f
(
Klass
*
k
,
TRAPS
),
TRAPS
);
void
with_array_klasses_do
(
void
f
(
Klass
*
k
));
bool
super_types_do
(
SuperTypeClosure
*
blk
);
// Casting from Klass*
...
...
@@ -874,10 +873,6 @@ class InstanceKlass: public Klass {
}
}
// Allocation profiling support
juint
alloc_size
()
const
{
return
_alloc_count
*
size_helper
();
}
void
set_alloc_size
(
juint
n
)
{}
// Use this to return the size of an instance in heap words:
int
size_helper
()
const
{
return
layout_helper_to_size_helper
(
layout_helper
());
...
...
src/share/vm/oops/klass.cpp
浏览文件 @
b339fe4c
...
...
@@ -168,7 +168,6 @@ Klass::Klass() {
set_subklass
(
NULL
);
set_next_sibling
(
NULL
);
set_next_link
(
NULL
);
set_alloc_count
(
0
);
TRACE_INIT_ID
(
this
);
set_prototype_header
(
markOopDesc
::
prototype
());
...
...
@@ -543,12 +542,6 @@ Klass* Klass::array_klass_impl(bool or_null, TRAPS) {
return
NULL
;
}
void
Klass
::
with_array_klasses_do
(
void
f
(
Klass
*
k
))
{
f
(
this
);
}
oop
Klass
::
class_loader
()
const
{
return
class_loader_data
()
->
class_loader
();
}
const
char
*
Klass
::
external_name
()
const
{
...
...
src/share/vm/oops/klass.hpp
浏览文件 @
b339fe4c
...
...
@@ -79,7 +79,6 @@
// [last_biased_lock_bulk_revocation_time] (64 bits)
// [prototype_header]
// [biased_lock_revocation_count]
// [alloc_count ]
// [_modified_oops]
// [_accumulated_modified_oops]
// [trace_id]
...
...
@@ -171,8 +170,6 @@ class Klass : public Metadata {
markOop
_prototype_header
;
// Used when biased locking is both enabled and disabled for this type
jint
_biased_lock_revocation_count
;
juint
_alloc_count
;
// allocation profiling support
TRACE_DEFINE_KLASS_TRACE_ID
;
// Remembered sets support for the oops in the klasses.
...
...
@@ -290,11 +287,6 @@ class Klass : public Metadata {
void
set_next_sibling
(
Klass
*
s
);
public:
// Allocation profiling support
juint
alloc_count
()
const
{
return
_alloc_count
;
}
void
set_alloc_count
(
juint
n
)
{
_alloc_count
=
n
;
}
virtual
juint
alloc_size
()
const
=
0
;
virtual
void
set_alloc_size
(
juint
n
)
=
0
;
// Compiler support
static
ByteSize
super_offset
()
{
return
in_ByteSize
(
offset_of
(
Klass
,
_super
));
}
...
...
@@ -677,7 +669,6 @@ class Klass : public Metadata {
#endif // INCLUDE_ALL_GCS
virtual
void
array_klasses_do
(
void
f
(
Klass
*
k
))
{}
virtual
void
with_array_klasses_do
(
void
f
(
Klass
*
k
));
// Return self, except for abstract classes with exactly 1
// implementor. Then return the 1 concrete implementation.
...
...
src/share/vm/prims/jvm.cpp
浏览文件 @
b339fe4c
...
...
@@ -1121,26 +1121,6 @@ JVM_ENTRY(jobject, JVM_GetProtectionDomain(JNIEnv *env, jclass cls))
JVM_END
// Obsolete since 1.2 (Class.setProtectionDomain removed), although
// still defined in core libraries as of 1.5.
JVM_ENTRY
(
void
,
JVM_SetProtectionDomain
(
JNIEnv
*
env
,
jclass
cls
,
jobject
protection_domain
))
JVMWrapper
(
"JVM_SetProtectionDomain"
);
if
(
JNIHandles
::
resolve
(
cls
)
==
NULL
)
{
THROW
(
vmSymbols
::
java_lang_NullPointerException
());
}
if
(
!
java_lang_Class
::
is_primitive
(
JNIHandles
::
resolve
(
cls
)))
{
// Call is ignored for primitive types
Klass
*
k
=
java_lang_Class
::
as_Klass
(
JNIHandles
::
resolve
(
cls
));
// cls won't be an array, as this called only from ClassLoader.defineClass
if
(
k
->
oop_is_instance
())
{
oop
pd
=
JNIHandles
::
resolve
(
protection_domain
);
assert
(
pd
==
NULL
||
pd
->
is_oop
(),
"just checking"
);
java_lang_Class
::
set_protection_domain
(
k
->
java_mirror
(),
pd
);
}
}
JVM_END
static
bool
is_authorized
(
Handle
context
,
instanceKlassHandle
klass
,
TRAPS
)
{
// If there is a security manager and protection domain, check the access
// in the protection domain, otherwise it is authorized.
...
...
src/share/vm/prims/jvm.h
浏览文件 @
b339fe4c
...
...
@@ -471,9 +471,6 @@ JVM_SetClassSigners(JNIEnv *env, jclass cls, jobjectArray signers);
JNIEXPORT
jobject
JNICALL
JVM_GetProtectionDomain
(
JNIEnv
*
env
,
jclass
cls
);
JNIEXPORT
void
JNICALL
JVM_SetProtectionDomain
(
JNIEnv
*
env
,
jclass
cls
,
jobject
protection_domain
);
JNIEXPORT
jboolean
JNICALL
JVM_IsArrayClass
(
JNIEnv
*
env
,
jclass
cls
);
...
...
src/share/vm/runtime/aprofiler.cpp
已删除
100644 → 0
浏览文件 @
2a09e09a
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/space.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/aprofiler.hpp"
bool
AllocationProfiler
::
_active
=
false
;
GrowableArray
<
Klass
*>*
AllocationProfiler
::
_print_array
=
NULL
;
class
AllocProfClosure
:
public
ObjectClosure
{
public:
void
do_object
(
oop
obj
)
{
Klass
*
k
=
obj
->
klass
();
k
->
set_alloc_count
(
k
->
alloc_count
()
+
1
);
k
->
set_alloc_size
(
k
->
alloc_size
()
+
obj
->
size
());
}
};
void
AllocationProfiler
::
iterate_since_last_gc
()
{
if
(
is_active
())
{
AllocProfClosure
blk
;
GenCollectedHeap
*
heap
=
GenCollectedHeap
::
heap
();
heap
->
object_iterate_since_last_GC
(
&
blk
);
}
}
void
AllocationProfiler
::
engage
()
{
_active
=
true
;
}
void
AllocationProfiler
::
disengage
()
{
_active
=
false
;
}
void
AllocationProfiler
::
add_class_to_array
(
Klass
*
k
)
{
_print_array
->
append
(
k
);
}
void
AllocationProfiler
::
add_classes_to_array
(
Klass
*
k
)
{
// Iterate over klass and all array klasses for klass
k
->
with_array_klasses_do
(
&
AllocationProfiler
::
add_class_to_array
);
}
int
AllocationProfiler
::
compare_classes
(
Klass
**
k1
,
Klass
**
k2
)
{
// Sort by total allocation size
return
(
*
k2
)
->
alloc_size
()
-
(
*
k1
)
->
alloc_size
();
}
int
AllocationProfiler
::
average
(
size_t
alloc_size
,
int
alloc_count
)
{
return
(
int
)
((
double
)
(
alloc_size
*
BytesPerWord
)
/
MAX2
(
alloc_count
,
1
)
+
0.5
);
}
void
AllocationProfiler
::
sort_and_print_array
(
size_t
cutoff
)
{
_print_array
->
sort
(
&
AllocationProfiler
::
compare_classes
);
tty
->
print_cr
(
"________________Size"
"__Instances"
"__Average"
"__Class________________"
);
size_t
total_alloc_size
=
0
;
int
total_alloc_count
=
0
;
for
(
int
index
=
0
;
index
<
_print_array
->
length
();
index
++
)
{
Klass
*
k
=
_print_array
->
at
(
index
);
size_t
alloc_size
=
k
->
alloc_size
();
if
(
alloc_size
>
cutoff
)
{
int
alloc_count
=
k
->
alloc_count
();
#ifdef PRODUCT
const
char
*
name
=
k
->
external_name
();
#else
const
char
*
name
=
k
->
internal_name
();
#endif
tty
->
print_cr
(
"%20u %10u %8u %s"
,
alloc_size
*
BytesPerWord
,
alloc_count
,
average
(
alloc_size
,
alloc_count
),
name
);
total_alloc_size
+=
alloc_size
;
total_alloc_count
+=
alloc_count
;
}
k
->
set_alloc_count
(
0
);
k
->
set_alloc_size
(
0
);
}
tty
->
print_cr
(
"%20u %10u %8u --total--"
,
total_alloc_size
*
BytesPerWord
,
total_alloc_count
,
average
(
total_alloc_size
,
total_alloc_count
));
tty
->
cr
();
}
void
AllocationProfiler
::
print
(
size_t
cutoff
)
{
ResourceMark
rm
;
assert
(
!
is_active
(),
"AllocationProfiler cannot be active while printing profile"
);
tty
->
cr
();
tty
->
print_cr
(
"Allocation profile (sizes in bytes, cutoff = "
SIZE_FORMAT
" bytes):"
,
cutoff
*
BytesPerWord
);
tty
->
cr
();
// Print regular instance klasses and basic type array klasses
_print_array
=
new
GrowableArray
<
Klass
*>
(
SystemDictionary
::
number_of_classes
()
*
2
);
SystemDictionary
::
classes_do
(
&
add_classes_to_array
);
Universe
::
basic_type_classes_do
(
&
add_classes_to_array
);
sort_and_print_array
(
cutoff
);
// This used to print metadata in the permgen but since there isn't a permgen
// anymore, it is not yet implemented.
}
src/share/vm/runtime/aprofiler.hpp
已删除
100644 → 0
浏览文件 @
2a09e09a
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_RUNTIME_APROFILER_HPP
#define SHARE_VM_RUNTIME_APROFILER_HPP
#include "memory/allocation.hpp"
#include "memory/universe.hpp"
#include "oops/klass.hpp"
#include "utilities/top.hpp"
// A simple allocation profiler for Java. The profiler collects and prints
// the number and total size of instances allocated per class, including
// array classes.
//
// The profiler is currently global for all threads. It can be changed to a
// per threads profiler by keeping a more elaborate data structure and calling
// iterate_since_last_scavenge at thread switches.
class
AllocationProfiler
:
AllStatic
{
friend
class
GenCollectedHeap
;
friend
class
G1CollectedHeap
;
friend
class
MarkSweep
;
private:
static
bool
_active
;
// tells whether profiler is active
static
GrowableArray
<
Klass
*>*
_print_array
;
// temporary array for printing
// Utility printing functions
static
void
add_class_to_array
(
Klass
*
k
);
static
void
add_classes_to_array
(
Klass
*
k
);
static
int
compare_classes
(
Klass
**
k1
,
Klass
**
k2
);
static
int
average
(
size_t
alloc_size
,
int
alloc_count
);
static
void
sort_and_print_array
(
size_t
cutoff
);
// Call for collecting allocation information. Called at scavenge, mark-sweep and disengage.
static
void
iterate_since_last_gc
();
public:
// Start profiler
static
void
engage
();
// Stop profiler
static
void
disengage
();
// Tells whether profiler is active
static
bool
is_active
()
{
return
_active
;
}
// Print profile
static
void
print
(
size_t
cutoff
);
// Cutoff in total allocation size (in words)
};
#endif // SHARE_VM_RUNTIME_APROFILER_HPP
src/share/vm/runtime/arguments.cpp
浏览文件 @
b339fe4c
...
...
@@ -68,7 +68,6 @@ char* Arguments::_java_command = NULL;
SystemProperty
*
Arguments
::
_system_properties
=
NULL
;
const
char
*
Arguments
::
_gc_log_filename
=
NULL
;
bool
Arguments
::
_has_profile
=
false
;
bool
Arguments
::
_has_alloc_profile
=
false
;
uintx
Arguments
::
_min_heap_size
=
0
;
Arguments
::
Mode
Arguments
::
_mode
=
_mixed
;
bool
Arguments
::
_java_compiler
=
false
;
...
...
@@ -1986,23 +1985,6 @@ bool Arguments::check_vm_args_consistency() {
status
=
status
&&
check_gc_consistency
();
status
=
status
&&
check_stack_pages
();
if
(
_has_alloc_profile
)
{
if
(
UseParallelGC
||
UseParallelOldGC
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
"error: invalid argument combination.
\n
"
"Allocation profiling (-Xaprof) cannot be used together with "
"Parallel GC (-XX:+UseParallelGC or -XX:+UseParallelOldGC).
\n
"
);
status
=
false
;
}
if
(
UseConcMarkSweepGC
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
"error: invalid argument combination.
\n
"
"Allocation profiling (-Xaprof) cannot be used together with "
"the CMS collector (-XX:+UseConcMarkSweepGC).
\n
"
);
status
=
false
;
}
}
if
(
CMSIncrementalMode
)
{
if
(
!
UseConcMarkSweepGC
)
{
jio_fprintf
(
defaultStream
::
error_stream
(),
...
...
@@ -2700,9 +2682,6 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
"Flat profiling is not supported in this VM.
\n
"
);
return
JNI_ERR
;
#endif // INCLUDE_FPROF
// -Xaprof
}
else
if
(
match_option
(
option
,
"-Xaprof"
,
&
tail
))
{
_has_alloc_profile
=
true
;
// -Xconcurrentio
}
else
if
(
match_option
(
option
,
"-Xconcurrentio"
,
&
tail
))
{
FLAG_SET_CMDLINE
(
bool
,
UseLWPSynchronization
,
true
);
...
...
src/share/vm/runtime/arguments.hpp
浏览文件 @
b339fe4c
...
...
@@ -262,7 +262,6 @@ class Arguments : AllStatic {
// Option flags
static
bool
_has_profile
;
static
bool
_has_alloc_profile
;
static
const
char
*
_gc_log_filename
;
static
uintx
_min_heap_size
;
...
...
@@ -464,9 +463,8 @@ class Arguments : AllStatic {
// -Xloggc:<file>, if not specified will be NULL
static
const
char
*
gc_log_filename
()
{
return
_gc_log_filename
;
}
// -Xprof
/-Xaprof
// -Xprof
static
bool
has_profile
()
{
return
_has_profile
;
}
static
bool
has_alloc_profile
()
{
return
_has_alloc_profile
;
}
// -Xms, -Xmx
static
uintx
min_heap_size
()
{
return
_min_heap_size
;
}
...
...
src/share/vm/runtime/handles.hpp
浏览文件 @
b339fe4c
...
...
@@ -227,7 +227,7 @@ class HandleArea: public Arena {
HandleArea
*
_prev
;
// link to outer (older) area
public:
// Constructor
HandleArea
(
HandleArea
*
prev
)
{
HandleArea
(
HandleArea
*
prev
)
:
Arena
(
Chunk
::
tiny_size
)
{
debug_only
(
_handle_mark_nesting
=
0
);
debug_only
(
_no_handle_mark_nesting
=
0
);
_prev
=
prev
;
...
...
src/share/vm/runtime/java.cpp
浏览文件 @
b339fe4c
...
...
@@ -42,7 +42,6 @@
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
...
...
@@ -509,16 +508,6 @@ void before_exit(JavaThread * thread) {
}
}
if
(
Arguments
::
has_alloc_profile
())
{
HandleMark
hm
;
// Do one last collection to enumerate all the objects
// allocated since the last one.
Universe
::
heap
()
->
collect
(
GCCause
::
_allocation_profiler
);
AllocationProfiler
::
disengage
();
AllocationProfiler
::
print
(
0
);
}
if
(
PrintBytecodeHistogram
)
{
BytecodeHistogram
::
print
();
}
...
...
src/share/vm/runtime/thread.cpp
浏览文件 @
b339fe4c
...
...
@@ -45,7 +45,6 @@
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/privilegedStack.hpp"
#include "runtime/aprofiler.hpp"
#include "runtime/arguments.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/deoptimization.hpp"
...
...
@@ -3677,7 +3676,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
}
if
(
Arguments
::
has_profile
())
FlatProfiler
::
engage
(
main_thread
,
true
);
if
(
Arguments
::
has_alloc_profile
())
AllocationProfiler
::
engage
();
if
(
MemProfiling
)
MemProfiler
::
engage
();
StatSampler
::
engage
();
if
(
CheckJNICalls
)
JniPeriodicChecker
::
engage
();
...
...
src/share/vm/runtime/vmStructs.cpp
浏览文件 @
b339fe4c
...
...
@@ -263,7 +263,7 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
unchecked_c2_static_field) \
\
/******************************************************************/
\
/* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */
\
/* OopDesc and Klass hierarchies (NOTE: MethodData* incomplete) */
\
/******************************************************************/
\
\
volatile_nonstatic_field(oopDesc, _mark, markOop) \
...
...
@@ -274,21 +274,20 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
volatile_nonstatic_field(ArrayKlass, _higher_dimension, Klass*) \
volatile_nonstatic_field(ArrayKlass, _lower_dimension, Klass*) \
nonstatic_field(ArrayKlass, _vtable_len, int) \
nonstatic_field(ArrayKlass, _alloc_size, juint) \
nonstatic_field(ArrayKlass, _component_mirror, oop) \
nonstatic_field(CompiledICHolder, _holder_method, Method*) \
nonstatic_field(CompiledICHolder, _holder_method, Method*)
\
nonstatic_field(CompiledICHolder, _holder_klass, Klass*) \
nonstatic_field(ConstantPool, _tags, Array<u1>*) \
nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \
nonstatic_field(ConstantPool, _cache, ConstantPoolCache*)
\
nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \
nonstatic_field(ConstantPool, _operands, Array<u2>*) \
nonstatic_field(ConstantPool, _length, int) \
nonstatic_field(ConstantPool, _resolved_references, jobject) \
nonstatic_field(ConstantPool, _reference_map, Array<u2>*) \
nonstatic_field(ConstantPoolCache, _length, int) \
nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \
nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*)
\
nonstatic_field(InstanceKlass, _array_klasses, Klass*) \
nonstatic_field(InstanceKlass, _methods, Array<Method*>*) \
nonstatic_field(InstanceKlass, _methods, Array<Method*>*)
\
nonstatic_field(InstanceKlass, _local_interfaces, Array<Klass*>*) \
nonstatic_field(InstanceKlass, _transitive_interfaces, Array<Klass*>*) \
nonstatic_field(InstanceKlass, _fields, Array<u2>*) \
...
...
@@ -336,9 +335,8 @@ typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
nonstatic_field(Klass, _access_flags, AccessFlags) \
nonstatic_field(Klass, _subklass, Klass*) \
nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _alloc_count, juint) \
nonstatic_field(MethodData, _size, int) \
nonstatic_field(MethodData, _method, Method*) \
nonstatic_field(MethodData, _method, Method*)
\
nonstatic_field(MethodData, _data_size, int) \
nonstatic_field(MethodData, _data[0], intptr_t) \
nonstatic_field(MethodData, _nof_decompiles, uint) \
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录