Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
746de19d
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
746de19d
编写于
10月 22, 2012
作者:
C
coleenp
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
2e80078c
3144137d
变更
33
隐藏空白更改
内联
并排
Showing
33 changed file
with
861 addition
and
396 deletion
+861
-396
agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java
.../share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java
+11
-8
agent/src/share/classes/sun/jvm/hotspot/runtime/Bytes.java
agent/src/share/classes/sun/jvm/hotspot/runtime/Bytes.java
+1
-15
agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java
...classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java
+40
-5
src/os/bsd/vm/perfMemory_bsd.cpp
src/os/bsd/vm/perfMemory_bsd.cpp
+9
-0
src/os/linux/vm/perfMemory_linux.cpp
src/os/linux/vm/perfMemory_linux.cpp
+9
-0
src/os/solaris/vm/os_solaris.cpp
src/os/solaris/vm/os_solaris.cpp
+3
-1
src/os/solaris/vm/perfMemory_solaris.cpp
src/os/solaris/vm/perfMemory_solaris.cpp
+9
-0
src/os/windows/vm/perfMemory_windows.cpp
src/os/windows/vm/perfMemory_windows.cpp
+12
-0
src/share/vm/memory/allocation.cpp
src/share/vm/memory/allocation.cpp
+9
-7
src/share/vm/memory/allocation.hpp
src/share/vm/memory/allocation.hpp
+4
-3
src/share/vm/memory/filemap.cpp
src/share/vm/memory/filemap.cpp
+5
-15
src/share/vm/memory/filemap.hpp
src/share/vm/memory/filemap.hpp
+0
-1
src/share/vm/memory/metaspaceShared.cpp
src/share/vm/memory/metaspaceShared.cpp
+2
-10
src/share/vm/memory/resourceArea.hpp
src/share/vm/memory/resourceArea.hpp
+16
-4
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+9
-4
src/share/vm/runtime/handles.cpp
src/share/vm/runtime/handles.cpp
+6
-1
src/share/vm/runtime/handles.hpp
src/share/vm/runtime/handles.hpp
+1
-0
src/share/vm/runtime/handles.inline.hpp
src/share/vm/runtime/handles.inline.hpp
+6
-1
src/share/vm/runtime/os.cpp
src/share/vm/runtime/os.cpp
+8
-8
src/share/vm/runtime/thread.cpp
src/share/vm/runtime/thread.cpp
+16
-9
src/share/vm/services/attachListener.cpp
src/share/vm/services/attachListener.cpp
+2
-0
src/share/vm/services/memBaseline.cpp
src/share/vm/services/memBaseline.cpp
+152
-79
src/share/vm/services/memBaseline.hpp
src/share/vm/services/memBaseline.hpp
+2
-3
src/share/vm/services/memPtr.cpp
src/share/vm/services/memPtr.cpp
+0
-32
src/share/vm/services/memPtr.hpp
src/share/vm/services/memPtr.hpp
+49
-58
src/share/vm/services/memRecorder.cpp
src/share/vm/services/memRecorder.cpp
+11
-6
src/share/vm/services/memRecorder.hpp
src/share/vm/services/memRecorder.hpp
+1
-0
src/share/vm/services/memReporter.cpp
src/share/vm/services/memReporter.cpp
+57
-3
src/share/vm/services/memReporter.hpp
src/share/vm/services/memReporter.hpp
+13
-0
src/share/vm/services/memSnapshot.cpp
src/share/vm/services/memSnapshot.cpp
+291
-74
src/share/vm/services/memSnapshot.hpp
src/share/vm/services/memSnapshot.hpp
+93
-41
src/share/vm/services/memTracker.cpp
src/share/vm/services/memTracker.cpp
+5
-2
src/share/vm/services/memTracker.hpp
src/share/vm/services/memTracker.hpp
+9
-6
未找到文件。
agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java
浏览文件 @
746de19d
...
...
@@ -272,9 +272,10 @@ public class Bytecodes {
public
static
final
int
_fast_aldc
=
229
;
public
static
final
int
_fast_aldc_w
=
230
;
public
static
final
int
_return_register_finalizer
=
231
;
public
static
final
int
_shouldnotreachhere
=
232
;
// For debugging
public
static
final
int
_invokehandle
=
232
;
public
static
final
int
_shouldnotreachhere
=
233
;
// For debugging
public
static
final
int
number_of_codes
=
23
3
;
public
static
final
int
number_of_codes
=
23
4
;
// Flag bits derived from format strings, can_trap, can_rewrite, etc.:
// semantic flags:
...
...
@@ -787,20 +788,22 @@ public class Bytecodes {
def
(
_fast_aaccess_0
,
"fast_aaccess_0"
,
"b_JJ"
,
null
,
BasicType
.
getTObject
()
,
1
,
true
,
_aload_0
);
def
(
_fast_faccess_0
,
"fast_faccess_0"
,
"b_JJ"
,
null
,
BasicType
.
getTObject
()
,
1
,
true
,
_aload_0
);
def
(
_fast_iload
,
"fast_iload"
,
"bi"
,
null
,
BasicType
.
getTInt
()
,
1
,
false
,
_iload
);
def
(
_fast_iload2
,
"fast_iload2"
,
"bi_i"
,
null
,
BasicType
.
getTInt
()
,
2
,
false
,
_iload
);
def
(
_fast_icaload
,
"fast_icaload"
,
"bi_"
,
null
,
BasicType
.
getTInt
()
,
0
,
false
,
_iload
);
def
(
_fast_iload
,
"fast_iload"
,
"bi"
,
null
,
BasicType
.
getTInt
()
,
1
,
false
,
_iload
);
def
(
_fast_iload2
,
"fast_iload2"
,
"bi_i"
,
null
,
BasicType
.
getTInt
()
,
2
,
false
,
_iload
);
def
(
_fast_icaload
,
"fast_icaload"
,
"bi_"
,
null
,
BasicType
.
getTInt
()
,
0
,
false
,
_iload
);
// Faster method invocation.
def
(
_fast_invokevfinal
,
"fast_invokevfinal"
,
"bJJ"
,
null
,
BasicType
.
getTIllegal
(),
-
1
,
true
,
_invokevirtual
);
def
(
_fast_invokevfinal
,
"fast_invokevfinal"
,
"bJJ"
,
null
,
BasicType
.
getTIllegal
(),
-
1
,
true
,
_invokevirtual
);
def
(
_fast_linearswitch
,
"fast_linearswitch"
,
""
,
null
,
BasicType
.
getTVoid
()
,
-
1
,
false
,
_lookupswitch
);
def
(
_fast_binaryswitch
,
"fast_binaryswitch"
,
""
,
null
,
BasicType
.
getTVoid
()
,
-
1
,
false
,
_lookupswitch
);
def
(
_fast_aldc
,
"fast_aldc"
,
"bj"
,
null
,
BasicType
.
getTObject
(),
1
,
true
,
_ldc
);
def
(
_fast_aldc_w
,
"fast_aldc_w"
,
"bJJ"
,
null
,
BasicType
.
getTObject
(),
1
,
true
,
_ldc_w
);
def
(
_return_register_finalizer
,
"return_register_finalizer"
,
"b"
,
null
,
BasicType
.
getTVoid
()
,
0
,
true
,
_return
);
def
(
_fast_aldc
,
"fast_aldc"
,
"bj"
,
null
,
BasicType
.
getTObject
(),
1
,
true
,
_ldc
);
def
(
_
fast_aldc_w
,
"fast_aldc_w"
,
"bJJ"
,
null
,
BasicType
.
getTObject
(),
1
,
true
,
_ldc_w
);
// special handling of signature-polymorphic methods
def
(
_
invokehandle
,
"invokehandle"
,
"bJJ"
,
null
,
BasicType
.
getTIllegal
(),
-
1
,
true
,
_invokevirtual
);
def
(
_shouldnotreachhere
,
"_shouldnotreachhere"
,
"b"
,
null
,
BasicType
.
getTVoid
()
,
0
,
false
);
...
...
agent/src/share/classes/sun/jvm/hotspot/runtime/Bytes.java
浏览文件 @
746de19d
...
...
@@ -30,24 +30,10 @@ import sun.jvm.hotspot.utilities.PlatformInfo;
/** Encapsulates some byte-swapping operations defined in the VM */
public
class
Bytes
{
// swap if client platform is different from server's.
private
boolean
swap
;
public
Bytes
(
MachineDescription
machDesc
)
{
String
cpu
=
PlatformInfo
.
getCPU
();
if
(
cpu
.
equals
(
"sparc"
))
{
if
(
machDesc
.
isBigEndian
())
{
swap
=
false
;
}
else
{
swap
=
true
;
}
}
else
{
// intel
if
(
machDesc
.
isBigEndian
())
{
swap
=
true
;
}
else
{
swap
=
false
;
}
}
swap
=
!
machDesc
.
isBigEndian
();
}
/** Should only swap if the hardware's underlying byte order is
...
...
agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java
浏览文件 @
746de19d
...
...
@@ -29,6 +29,11 @@ import sun.jvm.hotspot.interpreter.*;
import
sun.jvm.hotspot.utilities.*
;
import
sun.jvm.hotspot.debugger.*
;
import
sun.jvm.hotspot.runtime.*
;
import
java.security.AccessController
;
import
java.security.PrivilegedAction
;
import
java.security.AccessControlContext
;
import
java.security.PrivilegedExceptionAction
;
import
java.security.PrivilegedActionException
;
public
class
ByteCodeRewriter
{
...
...
@@ -38,8 +43,20 @@ public class ByteCodeRewriter
private
byte
[]
code
;
private
Bytes
bytes
;
public
static
final
boolean
DEBUG
=
false
;
private
static
final
int
jintSize
=
4
;
public
static
final
boolean
DEBUG
;
static
{
String
debug
=
(
String
)
AccessController
.
doPrivileged
(
new
PrivilegedAction
()
{
public
Object
run
()
{
return
System
.
getProperty
(
"sun.jvm.hotspot.tools.jcore.ByteCodeRewriter.DEBUG"
);
}
}
);
DEBUG
=
(
debug
!=
null
?
debug
.
equalsIgnoreCase
(
"true"
)
:
false
);
}
protected
void
debugMessage
(
String
message
)
{
System
.
out
.
println
(
message
);
...
...
@@ -54,6 +71,18 @@ public class ByteCodeRewriter
}
protected
short
getConstantPoolIndexFromRefMap
(
int
rawcode
,
int
bci
)
{
int
refIndex
;
String
fmt
=
Bytecodes
.
format
(
rawcode
);
switch
(
fmt
.
length
())
{
case
2
:
refIndex
=
0xFF
&
method
.
getBytecodeByteArg
(
bci
);
break
;
case
3
:
refIndex
=
0xFFFF
&
bytes
.
swapShort
(
method
.
getBytecodeShortArg
(
bci
));
break
;
default
:
throw
new
IllegalArgumentException
();
}
return
(
short
)
cpool
.
objectToCPIndex
(
refIndex
);
}
protected
short
getConstantPoolIndex
(
int
rawcode
,
int
bci
)
{
// get ConstantPool index from ConstantPoolCacheIndex at given bci
String
fmt
=
Bytecodes
.
format
(
rawcode
);
...
...
@@ -95,6 +124,12 @@ public class ByteCodeRewriter
int
hotspotcode
=
Bytecodes
.
_illegal
;
int
len
=
0
;
if
(
DEBUG
)
{
String
msg
=
method
.
getMethodHolder
().
getName
().
asString
()
+
"."
+
method
.
getName
().
asString
()
+
method
.
getSignature
().
asString
();
debugMessage
(
msg
);
}
for
(
int
bci
=
0
;
bci
<
code
.
length
;)
{
hotspotcode
=
Bytecodes
.
codeAt
(
method
,
bci
);
bytecode
=
Bytecodes
.
javaCode
(
hotspotcode
);
...
...
@@ -133,15 +168,15 @@ public class ByteCodeRewriter
case
Bytecodes
.
_ldc_w
:
if
(
hotspotcode
!=
bytecode
)
{
// fast_aldc_w puts constant in
CP cache
cpoolIndex
=
getConstantPoolIndex
(
hotspotcode
,
bci
+
1
);
// fast_aldc_w puts constant in
reference map
cpoolIndex
=
getConstantPoolIndex
FromRefMap
(
hotspotcode
,
bci
+
1
);
writeShort
(
code
,
bci
+
1
,
cpoolIndex
);
}
break
;
case
Bytecodes
.
_ldc
:
if
(
hotspotcode
!=
bytecode
)
{
// fast_aldc puts constant in
CP cache
cpoolIndex
=
getConstantPoolIndex
(
hotspotcode
,
bci
+
1
);
// fast_aldc puts constant in
reference map
cpoolIndex
=
getConstantPoolIndex
FromRefMap
(
hotspotcode
,
bci
+
1
);
code
[
bci
+
1
]
=
(
byte
)(
cpoolIndex
);
}
break
;
...
...
src/os/bsd/vm/perfMemory_bsd.cpp
浏览文件 @
746de19d
...
...
@@ -30,6 +30,7 @@
#include "os_bsd.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
// put OS-includes here
...
...
@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region
(
void
)
::
memset
((
void
*
)
mapAddress
,
0
,
size
);
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
return
mapAddress
;
}
...
...
@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"
);
}
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
*
addr
=
mapAddress
;
*
sizep
=
size
;
...
...
src/os/linux/vm/perfMemory_linux.cpp
浏览文件 @
746de19d
...
...
@@ -30,6 +30,7 @@
#include "os_linux.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
// put OS-includes here
...
...
@@ -753,6 +754,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region
(
void
)
::
memset
((
void
*
)
mapAddress
,
0
,
size
);
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
return
mapAddress
;
}
...
...
@@ -912,6 +917,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"
);
}
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
*
addr
=
mapAddress
;
*
sizep
=
size
;
...
...
src/os/solaris/vm/os_solaris.cpp
浏览文件 @
746de19d
...
...
@@ -55,6 +55,7 @@
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/memTracker.hpp"
#include "services/runtimeService.hpp"
#include "thread_solaris.inline.hpp"
#include "utilities/decoder.hpp"
...
...
@@ -3072,11 +3073,12 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// Since snv_84, Solaris attempts to honor the address hint - see 5003415.
// Give it a try, if the kernel honors the hint we can return immediately.
char
*
addr
=
Solaris
::
anon_mmap
(
requested_addr
,
bytes
,
0
,
false
);
volatile
int
err
=
errno
;
if
(
addr
==
requested_addr
)
{
return
addr
;
}
else
if
(
addr
!=
NULL
)
{
unmap_memory
(
addr
,
bytes
);
pd_
unmap_memory
(
addr
,
bytes
);
}
if
(
PrintMiscellaneous
&&
Verbose
)
{
...
...
src/os/solaris/vm/perfMemory_solaris.cpp
浏览文件 @
746de19d
...
...
@@ -30,6 +30,7 @@
#include "os_solaris.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
// put OS-includes here
...
...
@@ -768,6 +769,10 @@ static char* mmap_create_shared(size_t size) {
// clear the shared memory region
(
void
)
::
memset
((
void
*
)
mapAddress
,
0
,
size
);
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
return
mapAddress
;
}
...
...
@@ -927,6 +932,10 @@ static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemor
"Could not map PerfMemory"
);
}
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
*
addr
=
mapAddress
;
*
sizep
=
size
;
...
...
src/os/windows/vm/perfMemory_windows.cpp
浏览文件 @
746de19d
...
...
@@ -30,6 +30,7 @@
#include "os_windows.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
#include <windows.h>
...
...
@@ -1496,6 +1497,10 @@ static char* mapping_create_shared(size_t size) {
// clear the shared memory region
(
void
)
memset
(
mapAddress
,
'\0'
,
size
);
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
return
(
char
*
)
mapAddress
;
}
...
...
@@ -1672,6 +1677,11 @@ static void open_file_mapping(const char* user, int vmid,
"Could not map PerfMemory"
);
}
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_reserve
((
address
)
mapAddress
,
size
,
CURRENT_PC
);
MemTracker
::
record_virtual_memory_type
((
address
)
mapAddress
,
mtInternal
);
*
addrp
=
(
char
*
)
mapAddress
;
*
sizep
=
size
;
...
...
@@ -1824,6 +1834,8 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
}
remove_file_mapping
(
addr
);
// it does not go through os api, the operation has to record from here
MemTracker
::
record_virtual_memory_release
((
address
)
addr
,
bytes
);
}
char
*
PerfMemory
::
backing_store_filename
()
{
...
...
src/share/vm/memory/allocation.cpp
浏览文件 @
746de19d
...
...
@@ -433,19 +433,18 @@ Arena::Arena() {
NOT_PRODUCT
(
Atomic
::
inc
(
&
_instance_count
);)
}
Arena
::
Arena
(
Arena
*
a
)
:
_chunk
(
a
->
_chunk
),
_hwm
(
a
->
_hwm
),
_max
(
a
->
_max
),
_first
(
a
->
_first
)
{
set_size_in_bytes
(
a
->
size_in_bytes
());
NOT_PRODUCT
(
Atomic
::
inc
(
&
_instance_count
);)
}
Arena
*
Arena
::
move_contents
(
Arena
*
copy
)
{
copy
->
destruct_contents
();
copy
->
_chunk
=
_chunk
;
copy
->
_hwm
=
_hwm
;
copy
->
_max
=
_max
;
copy
->
_first
=
_first
;
copy
->
set_size_in_bytes
(
size_in_bytes
());
// workaround rare racing condition, which could double count
// the arena size by native memory tracking
size_t
size
=
size_in_bytes
();
set_size_in_bytes
(
0
);
copy
->
set_size_in_bytes
(
size
);
// Destroy original arena
reset
();
return
copy
;
// Return Arena with contents
...
...
@@ -497,6 +496,9 @@ void Arena::destruct_contents() {
char
*
end
=
_first
->
next
()
?
_first
->
top
()
:
_hwm
;
free_malloced_objects
(
_first
,
_first
->
bottom
(),
end
,
_hwm
);
}
// reset size before chop to avoid a rare racing condition
// that can have total arena memory exceed total chunk memory
set_size_in_bytes
(
0
);
_first
->
chop
();
reset
();
}
...
...
src/share/vm/memory/allocation.hpp
浏览文件 @
746de19d
...
...
@@ -144,8 +144,10 @@ enum MemoryType {
mtNMT
=
0x0A00
,
// memory used by native memory tracking
mtChunk
=
0x0B00
,
// chunk that holds content of arenas
mtJavaHeap
=
0x0C00
,
// Java heap
mtDontTrack
=
0x0D00
,
// memory we donot or cannot track
mt_number_of_types
=
0x000C
,
// number of memory types
mtClassShared
=
0x0D00
,
// class data sharing
mt_number_of_types
=
0x000D
,
// number of memory types (mtDontTrack
// is not included as validate type)
mtDontTrack
=
0x0E00
,
// memory we do not or cannot track
mt_masks
=
0x7F00
,
// object type mask
...
...
@@ -342,7 +344,6 @@ protected:
public:
Arena
();
Arena
(
size_t
init_size
);
Arena
(
Arena
*
old
);
~
Arena
();
void
destruct_contents
();
char
*
hwm
()
const
{
return
_hwm
;
}
...
...
src/share/vm/memory/filemap.cpp
浏览文件 @
746de19d
...
...
@@ -29,6 +29,7 @@
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/defaultStream.hpp"
# include <sys/stat.h>
...
...
@@ -344,24 +345,13 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
fail_continue
(
err_msg
(
"Unable to reserved shared space at required address "
INTPTR_FORMAT
,
requested_addr
));
return
rs
;
}
// the reserved virtual memory is for mapping class data sharing archive
if
(
MemTracker
::
is_on
())
{
MemTracker
::
record_virtual_memory_type
((
address
)
rs
.
base
(),
mtClassShared
);
}
return
rs
;
}
// Memory map a region in the address space.
char
*
FileMapInfo
::
map_region
(
int
i
,
ReservedSpace
rs
)
{
struct
FileMapInfo
::
FileMapHeader
::
space_info
*
si
=
&
_header
.
_space
[
i
];
size_t
used
=
si
->
_used
;
size_t
size
=
align_size_up
(
used
,
os
::
vm_allocation_granularity
());
ReservedSpace
mapped_rs
=
rs
.
first_part
(
size
,
true
,
true
);
ReservedSpace
unmapped_rs
=
rs
.
last_part
(
size
);
mapped_rs
.
release
();
return
map_region
(
i
);
}
// Memory map a region in the address space.
static
const
char
*
shared_region_name
[]
=
{
"ReadOnly"
,
"ReadWrite"
,
"MiscData"
,
"MiscCode"
};
...
...
src/share/vm/memory/filemap.hpp
浏览文件 @
746de19d
...
...
@@ -125,7 +125,6 @@ public:
size_t
capacity
,
bool
read_only
,
bool
allow_exec
);
void
write_bytes
(
const
void
*
buffer
,
int
count
);
void
write_bytes_aligned
(
const
void
*
buffer
,
int
count
);
char
*
map_region
(
int
i
,
ReservedSpace
rs
);
char
*
map_region
(
int
i
);
void
unmap_region
(
int
i
);
void
close
();
...
...
src/share/vm/memory/metaspaceShared.cpp
浏览文件 @
746de19d
...
...
@@ -663,8 +663,8 @@ bool MetaspaceShared::is_in_shared_space(const void* p) {
if
(
_ro_base
==
NULL
||
_rw_base
==
NULL
)
{
return
false
;
}
else
{
return
((
p
>
_ro_base
&&
p
<
(
_ro_base
+
SharedReadOnlySize
))
||
(
p
>
_rw_base
&&
p
<
(
_rw_base
+
SharedReadWriteSize
)));
return
((
p
>
=
_ro_base
&&
p
<
(
_ro_base
+
SharedReadOnlySize
))
||
(
p
>
=
_rw_base
&&
p
<
(
_rw_base
+
SharedReadWriteSize
)));
}
}
...
...
@@ -693,14 +693,6 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
ReservedSpace
shared_rs
=
mapinfo
->
reserve_shared_memory
();
if
(
!
shared_rs
.
is_reserved
())
return
false
;
// Split reserved memory into pieces (windows needs this)
ReservedSpace
ro_rs
=
shared_rs
.
first_part
(
SharedReadOnlySize
);
ReservedSpace
tmp_rs1
=
shared_rs
.
last_part
(
SharedReadOnlySize
);
ReservedSpace
rw_rs
=
tmp_rs1
.
first_part
(
SharedReadWriteSize
);
ReservedSpace
tmp_rs2
=
tmp_rs1
.
last_part
(
SharedReadWriteSize
);
ReservedSpace
md_rs
=
tmp_rs2
.
first_part
(
SharedMiscDataSize
);
ReservedSpace
mc_rs
=
tmp_rs2
.
last_part
(
SharedMiscDataSize
);
// Map each shared region
if
((
_ro_base
=
mapinfo
->
map_region
(
ro
))
!=
NULL
&&
(
_rw_base
=
mapinfo
->
map_region
(
rw
))
!=
NULL
&&
...
...
src/share/vm/memory/resourceArea.hpp
浏览文件 @
746de19d
...
...
@@ -127,15 +127,21 @@ protected:
void
reset_to_mark
()
{
if
(
UseMallocOnly
)
free_malloced_objects
();
if
(
_chunk
->
next
()
)
// Delete later chunks
if
(
_chunk
->
next
()
)
{
// Delete later chunks
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert
(
_area
->
size_in_bytes
()
>
size_in_bytes
(),
"Sanity check"
);
_area
->
set_size_in_bytes
(
size_in_bytes
());
_chunk
->
next_chop
();
}
else
{
assert
(
_area
->
size_in_bytes
()
==
size_in_bytes
(),
"Sanity check"
);
}
_area
->
_chunk
=
_chunk
;
// Roll back arena to saved chunk
_area
->
_hwm
=
_hwm
;
_area
->
_max
=
_max
;
// clear out this chunk (to detect allocation bugs)
if
(
ZapResourceArea
)
memset
(
_hwm
,
badResourceValue
,
_max
-
_hwm
);
_area
->
set_size_in_bytes
(
size_in_bytes
());
}
~
ResourceMark
()
{
...
...
@@ -219,15 +225,21 @@ protected:
void
reset_to_mark
()
{
if
(
UseMallocOnly
)
free_malloced_objects
();
if
(
_chunk
->
next
()
)
// Delete later chunks
if
(
_chunk
->
next
()
)
{
// Delete later chunks
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert
(
_area
->
size_in_bytes
()
>
size_in_bytes
(),
"Sanity check"
);
_area
->
set_size_in_bytes
(
size_in_bytes
());
_chunk
->
next_chop
();
}
else
{
assert
(
_area
->
size_in_bytes
()
==
size_in_bytes
(),
"Sanity check"
);
}
_area
->
_chunk
=
_chunk
;
// Roll back arena to saved chunk
_area
->
_hwm
=
_hwm
;
_area
->
_max
=
_max
;
// clear out this chunk (to detect allocation bugs)
if
(
ZapResourceArea
)
memset
(
_hwm
,
badResourceValue
,
_max
-
_hwm
);
_area
->
set_size_in_bytes
(
size_in_bytes
());
}
~
DeoptResourceMark
()
{
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
746de19d
...
...
@@ -791,6 +791,10 @@ void Arguments::print_on(outputStream* st) {
st
->
print
(
"jvm_args: "
);
print_jvm_args_on
(
st
);
}
st
->
print_cr
(
"java_command: %s"
,
java_command
()
?
java_command
()
:
"<unknown>"
);
if
(
_java_class_path
!=
NULL
)
{
char
*
path
=
_java_class_path
->
value
();
st
->
print_cr
(
"java_class_path (initial): %s"
,
strlen
(
path
)
==
0
?
"<not set>"
:
path
);
}
st
->
print_cr
(
"Launcher Type: %s"
,
_sun_java_launcher
);
}
...
...
@@ -2771,6 +2775,11 @@ SOLARIS_ONLY(
return
JNI_EINVAL
;
}
FLAG_SET_CMDLINE
(
uintx
,
MaxDirectMemorySize
,
max_direct_memory_size
);
}
else
if
(
match_option
(
option
,
"-XX:+UseVMInterruptibleIO"
,
&
tail
))
{
// NOTE! In JDK 9, the UseVMInterruptibleIO flag will completely go
// away and will cause VM initialization failures!
warning
(
"-XX:+UseVMInterruptibleIO is obsolete and will be removed in a future release."
);
FLAG_SET_CMDLINE
(
bool
,
UseVMInterruptibleIO
,
true
);
}
else
if
(
match_option
(
option
,
"-XX:"
,
&
tail
))
{
// -XX:xxxx
// Skip -XX:Flags= since that case has already been handled
if
(
strncmp
(
tail
,
"Flags="
,
strlen
(
"Flags="
))
!=
0
)
{
...
...
@@ -2786,10 +2795,6 @@ SOLARIS_ONLY(
// Change the default value for flags which have different default values
// when working with older JDKs.
if
(
JDK_Version
::
current
().
compare_major
(
6
)
<=
0
&&
FLAG_IS_DEFAULT
(
UseVMInterruptibleIO
))
{
FLAG_SET_DEFAULT
(
UseVMInterruptibleIO
,
true
);
}
#ifdef LINUX
if
(
JDK_Version
::
current
().
compare_major
(
6
)
<=
0
&&
FLAG_IS_DEFAULT
(
UseLinuxPosixThreadCPUClocks
))
{
...
...
src/share/vm/runtime/handles.cpp
浏览文件 @
746de19d
...
...
@@ -158,13 +158,18 @@ HandleMark::~HandleMark() {
// Delete later chunks
if
(
_chunk
->
next
()
)
{
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert
(
area
->
size_in_bytes
()
>
size_in_bytes
(),
"Sanity check"
);
area
->
set_size_in_bytes
(
size_in_bytes
());
_chunk
->
next_chop
();
}
else
{
assert
(
area
->
size_in_bytes
()
==
size_in_bytes
(),
"Sanity check"
);
}
// Roll back arena to saved top markers
area
->
_chunk
=
_chunk
;
area
->
_hwm
=
_hwm
;
area
->
_max
=
_max
;
area
->
set_size_in_bytes
(
_size_in_bytes
);
#ifdef ASSERT
// clear out first chunk (to detect allocation bugs)
if
(
ZapVMHandleArea
)
{
...
...
src/share/vm/runtime/handles.hpp
浏览文件 @
746de19d
...
...
@@ -297,6 +297,7 @@ class HandleMark {
void
set_previous_handle_mark
(
HandleMark
*
mark
)
{
_previous_handle_mark
=
mark
;
}
HandleMark
*
previous_handle_mark
()
const
{
return
_previous_handle_mark
;
}
size_t
size_in_bytes
()
const
{
return
_size_in_bytes
;
}
public:
HandleMark
();
// see handles_inline.hpp
HandleMark
(
Thread
*
thread
)
{
initialize
(
thread
);
}
...
...
src/share/vm/runtime/handles.inline.hpp
浏览文件 @
746de19d
...
...
@@ -136,13 +136,18 @@ inline void HandleMark::pop_and_restore() {
HandleArea
*
area
=
_area
;
// help compilers with poor alias analysis
// Delete later chunks
if
(
_chunk
->
next
()
)
{
// reset arena size before delete chunks. Otherwise, the total
// arena size could exceed total chunk size
assert
(
area
->
size_in_bytes
()
>
size_in_bytes
(),
"Sanity check"
);
area
->
set_size_in_bytes
(
size_in_bytes
());
_chunk
->
next_chop
();
}
else
{
assert
(
area
->
size_in_bytes
()
==
size_in_bytes
(),
"Sanity check"
);
}
// Roll back arena to saved top markers
area
->
_chunk
=
_chunk
;
area
->
_hwm
=
_hwm
;
area
->
_max
=
_max
;
area
->
set_size_in_bytes
(
_size_in_bytes
);
debug_only
(
area
->
_handle_mark_nesting
--
);
}
...
...
src/share/vm/runtime/os.cpp
浏览文件 @
746de19d
...
...
@@ -600,9 +600,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
if
(
PrintMalloc
&&
tty
!=
NULL
)
tty
->
print_cr
(
"os::malloc "
SIZE_FORMAT
" bytes --> "
PTR_FORMAT
,
size
,
memblock
);
// we do not track MallocCushion memory
if
(
MemTracker
::
is_on
())
{
MemTracker
::
record_malloc
((
address
)
memblock
,
size
,
memflags
,
caller
==
0
?
CALLER_PC
:
caller
);
}
return
memblock
;
}
...
...
@@ -613,7 +611,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller
NOT_PRODUCT
(
inc_stat_counter
(
&
num_mallocs
,
1
));
NOT_PRODUCT
(
inc_stat_counter
(
&
alloc_bytes
,
size
));
void
*
ptr
=
::
realloc
(
memblock
,
size
);
if
(
ptr
!=
NULL
&&
MemTracker
::
is_on
()
)
{
if
(
ptr
!=
NULL
)
{
MemTracker
::
record_realloc
((
address
)
memblock
,
(
address
)
ptr
,
size
,
memflags
,
caller
==
0
?
CALLER_PC
:
caller
);
}
...
...
@@ -1401,7 +1399,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
char
*
os
::
reserve_memory
(
size_t
bytes
,
char
*
addr
,
size_t
alignment_hint
)
{
char
*
result
=
pd_reserve_memory
(
bytes
,
addr
,
alignment_hint
);
if
(
result
!=
NULL
&&
MemTracker
::
is_on
()
)
{
if
(
result
!=
NULL
)
{
MemTracker
::
record_virtual_memory_reserve
((
address
)
result
,
bytes
,
CALLER_PC
);
}
...
...
@@ -1409,7 +1407,7 @@ char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
}
char
*
os
::
attempt_reserve_memory_at
(
size_t
bytes
,
char
*
addr
)
{
char
*
result
=
pd_attempt_reserve_memory_at
(
bytes
,
addr
);
if
(
result
!=
NULL
&&
MemTracker
::
is_on
()
)
{
if
(
result
!=
NULL
)
{
MemTracker
::
record_virtual_memory_reserve
((
address
)
result
,
bytes
,
CALLER_PC
);
}
return
result
;
...
...
@@ -1422,7 +1420,7 @@ void os::split_reserved_memory(char *base, size_t size,
bool
os
::
commit_memory
(
char
*
addr
,
size_t
bytes
,
bool
executable
)
{
bool
res
=
pd_commit_memory
(
addr
,
bytes
,
executable
);
if
(
res
&&
MemTracker
::
is_on
()
)
{
if
(
res
)
{
MemTracker
::
record_virtual_memory_commit
((
address
)
addr
,
bytes
,
CALLER_PC
);
}
return
res
;
...
...
@@ -1431,7 +1429,7 @@ bool os::commit_memory(char* addr, size_t bytes, bool executable) {
bool
os
::
commit_memory
(
char
*
addr
,
size_t
size
,
size_t
alignment_hint
,
bool
executable
)
{
bool
res
=
os
::
pd_commit_memory
(
addr
,
size
,
alignment_hint
,
executable
);
if
(
res
&&
MemTracker
::
is_on
()
)
{
if
(
res
)
{
MemTracker
::
record_virtual_memory_commit
((
address
)
addr
,
size
,
CALLER_PC
);
}
return
res
;
...
...
@@ -1458,8 +1456,9 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char
*
addr
,
size_t
bytes
,
bool
read_only
,
bool
allow_exec
)
{
char
*
result
=
pd_map_memory
(
fd
,
file_name
,
file_offset
,
addr
,
bytes
,
read_only
,
allow_exec
);
if
(
result
!=
NULL
&&
MemTracker
::
is_on
()
)
{
if
(
result
!=
NULL
)
{
MemTracker
::
record_virtual_memory_reserve
((
address
)
result
,
bytes
,
CALLER_PC
);
MemTracker
::
record_virtual_memory_commit
((
address
)
result
,
bytes
,
CALLER_PC
);
}
return
result
;
}
...
...
@@ -1474,6 +1473,7 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
bool
os
::
unmap_memory
(
char
*
addr
,
size_t
bytes
)
{
bool
result
=
pd_unmap_memory
(
addr
,
bytes
);
if
(
result
)
{
MemTracker
::
record_virtual_memory_uncommit
((
address
)
addr
,
bytes
);
MemTracker
::
record_virtual_memory_release
((
address
)
addr
,
bytes
);
}
return
result
;
...
...
src/share/vm/runtime/thread.cpp
浏览文件 @
746de19d
...
...
@@ -323,12 +323,10 @@ void Thread::record_stack_base_and_size() {
os
::
initialize_thread
(
this
);
#if INCLUDE_NMT
// record thread's native stack, stack grows downward
if
(
MemTracker
::
is_on
())
{
address
stack_low_addr
=
stack_base
()
-
stack_size
();
MemTracker
::
record_thread_stack
(
stack_low_addr
,
stack_size
(),
this
,
// record thread's native stack, stack grows downward
address
stack_low_addr
=
stack_base
()
-
stack_size
();
MemTracker
::
record_thread_stack
(
stack_low_addr
,
stack_size
(),
this
,
CURRENT_PC
);
}
#endif // INCLUDE_NMT
}
...
...
@@ -345,6 +343,9 @@ Thread::~Thread() {
if
(
_stack_base
!=
NULL
)
{
address
low_stack_addr
=
stack_base
()
-
stack_size
();
MemTracker
::
release_thread_stack
(
low_stack_addr
,
stack_size
(),
this
);
#ifdef ASSERT
set_stack_base
(
NULL
);
#endif
}
#endif // INCLUDE_NMT
...
...
@@ -1521,10 +1522,12 @@ JavaThread::~JavaThread() {
tty
->
print_cr
(
"terminate thread %p"
,
this
);
}
//
Info NMT that this JavaThread is exiting, its memory
//
recorder should be collected
//
By now, this thread should already be invisible to safepoint,
//
and its per-thread recorder also collected.
assert
(
!
is_safepoint_visible
(),
"wrong state"
);
MemTracker
::
thread_exiting
(
this
);
#if INCLUDE_NMT
assert
(
get_recorder
()
==
NULL
,
"Already collected"
);
#endif // INCLUDE_NMT
// JSR166 -- return the parker to the free list
Parker
::
Release
(
_parker
);
...
...
@@ -2425,6 +2428,7 @@ void JavaThread::create_stack_guard_pages() {
}
void
JavaThread
::
remove_stack_guard_pages
()
{
assert
(
Thread
::
current
()
==
this
,
"from different thread"
);
if
(
_stack_guard_state
==
stack_guard_unused
)
return
;
address
low_addr
=
stack_base
()
-
stack_size
();
size_t
len
=
(
StackYellowPages
+
StackRedPages
)
*
os
::
vm_page_size
();
...
...
@@ -4093,7 +4097,10 @@ void Threads::remove(JavaThread* p) {
// Now, this thread is not visible to safepoint
p
->
set_safepoint_visible
(
false
);
// once the thread becomes safepoint invisible, we can not use its per-thread
// recorder. And Threads::do_threads() no longer walks this thread, so we have
// to release its per-thread recorder here.
MemTracker
::
thread_exiting
(
p
);
}
// unlock Threads_lock
// Since Events::log uses a lock, we grab it outside the Threads_lock
...
...
src/share/vm/services/attachListener.cpp
浏览文件 @
746de19d
...
...
@@ -404,6 +404,8 @@ static AttachOperationFunctionInfo funcs[] = {
static
void
attach_listener_thread_entry
(
JavaThread
*
thread
,
TRAPS
)
{
os
::
set_priority
(
thread
,
NearMaxPriority
);
thread
->
record_stack_base_and_size
();
if
(
AttachListener
::
pd_init
()
!=
0
)
{
return
;
}
...
...
src/share/vm/services/memBaseline.cpp
浏览文件 @
746de19d
...
...
@@ -40,6 +40,7 @@ MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
{
mtSymbol
,
"Symbol"
},
{
mtNMT
,
"Memory Tracking"
},
{
mtChunk
,
"Pooled Free Chunks"
},
{
mtClassShared
,
"Shared spaces for classes"
},
{
mtNone
,
"Unknown"
}
// It can happen when type tagging records are lagging
// behind
};
...
...
@@ -55,6 +56,7 @@ MemBaseline::MemBaseline() {
_malloc_cs
=
NULL
;
_vm_cs
=
NULL
;
_vm_map
=
NULL
;
_number_of_classes
=
0
;
_number_of_threads
=
0
;
...
...
@@ -72,6 +74,11 @@ void MemBaseline::clear() {
_vm_cs
=
NULL
;
}
if
(
_vm_map
!=
NULL
)
{
delete
_vm_map
;
_vm_map
=
NULL
;
}
reset
();
}
...
...
@@ -85,6 +92,7 @@ void MemBaseline::reset() {
if
(
_malloc_cs
!=
NULL
)
_malloc_cs
->
clear
();
if
(
_vm_cs
!=
NULL
)
_vm_cs
->
clear
();
if
(
_vm_map
!=
NULL
)
_vm_map
->
clear
();
for
(
int
index
=
0
;
index
<
NUMBER_OF_MEMORY_TYPE
;
index
++
)
{
_malloc_data
[
index
].
clear
();
...
...
@@ -94,39 +102,33 @@ void MemBaseline::reset() {
}
MemBaseline
::~
MemBaseline
()
{
if
(
_malloc_cs
!=
NULL
)
{
delete
_malloc_cs
;
}
if
(
_vm_cs
!=
NULL
)
{
delete
_vm_cs
;
}
clear
();
}
// baseline malloc'd memory records, generate overall summary and summaries by
// memory types
bool
MemBaseline
::
baseline_malloc_summary
(
const
MemPointerArray
*
malloc_records
)
{
MemPointerArrayIteratorImpl
m
I
tr
((
MemPointerArray
*
)
malloc_records
);
MemPointerRecord
*
m
ptr
=
(
MemPointerRecord
*
)
mI
tr
.
current
();
MemPointerArrayIteratorImpl
m
alloc_i
tr
((
MemPointerArray
*
)
malloc_records
);
MemPointerRecord
*
m
alloc_ptr
=
(
MemPointerRecord
*
)
malloc_i
tr
.
current
();
size_t
used_arena_size
=
0
;
int
index
;
while
(
mptr
!=
NULL
)
{
index
=
flag2index
(
FLAGS_TO_MEMORY_TYPE
(
mptr
->
flags
()));
size_t
size
=
mptr
->
size
();
while
(
m
alloc_
ptr
!=
NULL
)
{
index
=
flag2index
(
FLAGS_TO_MEMORY_TYPE
(
m
alloc_
ptr
->
flags
()));
size_t
size
=
m
alloc_
ptr
->
size
();
_total_malloced
+=
size
;
_malloc_data
[
index
].
inc
(
size
);
if
(
MemPointerRecord
::
is_arena_record
(
mptr
->
flags
()))
{
if
(
MemPointerRecord
::
is_arena_record
(
m
alloc_
ptr
->
flags
()))
{
// see if arena size record present
MemPointerRecord
*
next_
p
=
(
MemPointerRecordEx
*
)
mI
tr
.
peek_next
();
if
(
MemPointerRecord
::
is_arena_size_record
(
next_
p
->
flags
()))
{
assert
(
next_
p
->
is_size_record_of_arena
(
m
ptr
),
"arena records do not match"
);
size
=
next_
p
->
size
();
MemPointerRecord
*
next_
malloc_ptr
=
(
MemPointerRecordEx
*
)
malloc_i
tr
.
peek_next
();
if
(
MemPointerRecord
::
is_arena_size_record
(
next_
malloc_ptr
->
flags
()))
{
assert
(
next_
malloc_ptr
->
is_size_record_of_arena
(
malloc_
ptr
),
"arena records do not match"
);
size
=
next_
malloc_ptr
->
size
();
_arena_data
[
index
].
inc
(
size
);
used_arena_size
+=
size
;
m
I
tr
.
next
();
m
alloc_i
tr
.
next
();
}
}
m
ptr
=
(
MemPointerRecordEx
*
)
mI
tr
.
next
();
m
alloc_ptr
=
(
MemPointerRecordEx
*
)
malloc_i
tr
.
next
();
}
// substract used arena size to get size of arena chunk in free list
...
...
@@ -142,20 +144,23 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records)
// baseline mmap'd memory records, generate overall summary and summaries by
// memory types
bool
MemBaseline
::
baseline_vm_summary
(
const
MemPointerArray
*
vm_records
)
{
MemPointerArrayIteratorImpl
v
I
tr
((
MemPointerArray
*
)
vm_records
);
VMMemRegion
*
v
ptr
=
(
VMMemRegion
*
)
vI
tr
.
current
();
MemPointerArrayIteratorImpl
v
m_i
tr
((
MemPointerArray
*
)
vm_records
);
VMMemRegion
*
v
m_ptr
=
(
VMMemRegion
*
)
vm_i
tr
.
current
();
int
index
;
while
(
vptr
!=
NULL
)
{
i
ndex
=
flag2index
(
FLAGS_TO_MEMORY_TYPE
(
vptr
->
flags
()));
while
(
v
m_
ptr
!=
NULL
)
{
i
f
(
vm_ptr
->
is_reserved_region
())
{
index
=
flag2index
(
FLAGS_TO_MEMORY_TYPE
(
vm_ptr
->
flags
()));
// we use the number of thread stack to count threads
if
(
IS_MEMORY_TYPE
(
v
ptr
->
flags
(),
mtThreadStack
))
{
if
(
IS_MEMORY_TYPE
(
vm_
ptr
->
flags
(),
mtThreadStack
))
{
_number_of_threads
++
;
}
_total_vm_reserved
+=
vptr
->
reserved_size
();
_total_vm_committed
+=
vptr
->
committed_size
();
_vm_data
[
index
].
inc
(
vptr
->
reserved_size
(),
vptr
->
committed_size
());
vptr
=
(
VMMemRegion
*
)
vItr
.
next
();
_total_vm_reserved
+=
vm_ptr
->
size
();
_vm_data
[
index
].
inc
(
vm_ptr
->
size
(),
0
);
}
else
{
_total_vm_committed
+=
vm_ptr
->
size
();
_vm_data
[
index
].
inc
(
0
,
vm_ptr
->
size
());
}
vm_ptr
=
(
VMMemRegion
*
)
vm_itr
.
next
();
}
return
true
;
}
...
...
@@ -165,41 +170,57 @@ bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
bool
MemBaseline
::
baseline_malloc_details
(
const
MemPointerArray
*
malloc_records
)
{
assert
(
MemTracker
::
track_callsite
(),
"detail tracking is off"
);
MemPointerArrayIteratorImpl
m
Itr
((
MemPointerArray
*
)
malloc_records
);
MemPointerRecordEx
*
m
ptr
=
(
MemPointerRecordEx
*
)
mI
tr
.
current
();
MallocCallsitePointer
m
p
;
MemPointerArrayIteratorImpl
m
alloc_itr
(
const_cast
<
MemPointerArray
*>
(
malloc_records
)
);
MemPointerRecordEx
*
m
alloc_ptr
=
(
MemPointerRecordEx
*
)
malloc_i
tr
.
current
();
MallocCallsitePointer
m
alloc_callsite
;
// initailize malloc callsite array
if
(
_malloc_cs
==
NULL
)
{
_malloc_cs
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
MallocCallsitePointer
>
(
64
);
// out of native memory
if
(
_malloc_cs
==
NULL
)
{
if
(
_malloc_cs
==
NULL
||
_malloc_cs
->
out_of_memory
()
)
{
return
false
;
}
}
else
{
_malloc_cs
->
clear
();
}
MemPointerArray
*
malloc_data
=
const_cast
<
MemPointerArray
*>
(
malloc_records
);
// sort into callsite pc order. Details are aggregated by callsites
malloc_data
->
sort
((
FN_SORT
)
malloc_sort_by_pc
);
bool
ret
=
true
;
// baseline memory that is totaled over 1 KB
while
(
mptr
!=
NULL
)
{
if
(
!
MemPointerRecord
::
is_arena_size_record
(
mptr
->
flags
()))
{
while
(
m
alloc_
ptr
!=
NULL
)
{
if
(
!
MemPointerRecord
::
is_arena_size_record
(
m
alloc_
ptr
->
flags
()))
{
// skip thread stacks
if
(
!
IS_MEMORY_TYPE
(
mptr
->
flags
(),
mtThreadStack
))
{
if
(
mp
.
addr
()
!=
mptr
->
pc
())
{
if
((
mp
.
amount
()
/
K
)
>
0
)
{
if
(
!
_malloc_cs
->
append
(
&
mp
))
{
return
false
;
if
(
!
IS_MEMORY_TYPE
(
malloc_ptr
->
flags
(),
mtThreadStack
))
{
if
(
malloc_callsite
.
addr
()
!=
malloc_ptr
->
pc
())
{
if
((
malloc_callsite
.
amount
()
/
K
)
>
0
)
{
if
(
!
_malloc_cs
->
append
(
&
malloc_callsite
))
{
ret
=
false
;
break
;
}
}
m
p
=
MallocCallsitePointer
(
m
ptr
->
pc
());
m
alloc_callsite
=
MallocCallsitePointer
(
malloc_
ptr
->
pc
());
}
m
p
.
inc
(
m
ptr
->
size
());
m
alloc_callsite
.
inc
(
malloc_
ptr
->
size
());
}
}
m
ptr
=
(
MemPointerRecordEx
*
)
mI
tr
.
next
();
m
alloc_ptr
=
(
MemPointerRecordEx
*
)
malloc_i
tr
.
next
();
}
if
(
mp
.
addr
()
!=
0
&&
(
mp
.
amount
()
/
K
)
>
0
)
{
if
(
!
_malloc_cs
->
append
(
&
mp
))
{
// restore to address order. Snapshot malloc data is maintained in memory
// address order.
malloc_data
->
sort
((
FN_SORT
)
malloc_sort_by_addr
);
if
(
!
ret
)
{
return
false
;
}
// deal with last record
if
(
malloc_callsite
.
addr
()
!=
0
&&
(
malloc_callsite
.
amount
()
/
K
)
>
0
)
{
if
(
!
_malloc_cs
->
append
(
&
malloc_callsite
))
{
return
false
;
}
}
...
...
@@ -210,34 +231,106 @@ bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records)
bool
MemBaseline
::
baseline_vm_details
(
const
MemPointerArray
*
vm_records
)
{
assert
(
MemTracker
::
track_callsite
(),
"detail tracking is off"
);
VMCallsitePointer
vp
;
MemPointerArrayIteratorImpl
vItr
((
MemPointerArray
*
)
vm_records
);
VMMemRegionEx
*
vptr
=
(
VMMemRegionEx
*
)
vItr
.
current
();
VMCallsitePointer
vm_callsite
;
VMCallsitePointer
*
cur_callsite
=
NULL
;
MemPointerArrayIteratorImpl
vm_itr
((
MemPointerArray
*
)
vm_records
);
VMMemRegionEx
*
vm_ptr
=
(
VMMemRegionEx
*
)
vm_itr
.
current
();
// initialize virtual memory map array
if
(
_vm_map
==
NULL
)
{
_vm_map
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
VMMemRegionEx
>
(
vm_records
->
length
());
if
(
_vm_map
==
NULL
||
_vm_map
->
out_of_memory
())
{
return
false
;
}
}
else
{
_vm_map
->
clear
();
}
// initialize virtual memory callsite array
if
(
_vm_cs
==
NULL
)
{
_vm_cs
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
VMCallsitePointer
>
(
64
);
if
(
_vm_cs
==
NULL
)
{
if
(
_vm_cs
==
NULL
||
_vm_cs
->
out_of_memory
()
)
{
return
false
;
}
}
else
{
_vm_cs
->
clear
();
}
while
(
vptr
!=
NULL
)
{
if
(
vp
.
addr
()
!=
vptr
->
pc
())
{
if
(
!
_vm_cs
->
append
(
&
vp
))
{
// consolidate virtual memory data
VMMemRegionEx
*
reserved_rec
=
NULL
;
VMMemRegionEx
*
committed_rec
=
NULL
;
// vm_ptr is coming in increasing base address order
while
(
vm_ptr
!=
NULL
)
{
if
(
vm_ptr
->
is_reserved_region
())
{
// consolidate reserved memory regions for virtual memory map.
// The criteria for consolidation is:
// 1. two adjacent reserved memory regions
// 2. belong to the same memory type
// 3. reserved from the same callsite
if
(
reserved_rec
==
NULL
||
reserved_rec
->
base
()
+
reserved_rec
->
size
()
!=
vm_ptr
->
addr
()
||
FLAGS_TO_MEMORY_TYPE
(
reserved_rec
->
flags
())
!=
FLAGS_TO_MEMORY_TYPE
(
vm_ptr
->
flags
())
||
reserved_rec
->
pc
()
!=
vm_ptr
->
pc
())
{
if
(
!
_vm_map
->
append
(
vm_ptr
))
{
return
false
;
}
vp
=
VMCallsitePointer
(
vptr
->
pc
());
// inserted reserved region, we need the pointer to the element in virtual
// memory map array.
reserved_rec
=
(
VMMemRegionEx
*
)
_vm_map
->
at
(
_vm_map
->
length
()
-
1
);
}
else
{
reserved_rec
->
expand_region
(
vm_ptr
->
addr
(),
vm_ptr
->
size
());
}
vp
.
inc
(
vptr
->
size
(),
vptr
->
committed_size
());
vptr
=
(
VMMemRegionEx
*
)
vItr
.
next
();
}
if
(
vp
.
addr
()
!=
0
)
{
if
(
!
_vm_cs
->
append
(
&
vp
))
{
if
(
cur_callsite
!=
NULL
&&
!
_vm_cs
->
append
(
cur_callsite
))
{
return
false
;
}
vm_callsite
=
VMCallsitePointer
(
vm_ptr
->
pc
());
cur_callsite
=
&
vm_callsite
;
vm_callsite
.
inc
(
vm_ptr
->
size
(),
0
);
}
else
{
// consolidate committed memory regions for virtual memory map
// The criterial is:
// 1. two adjacent committed memory regions
// 2. committed from the same callsite
if
(
committed_rec
==
NULL
||
committed_rec
->
base
()
+
committed_rec
->
size
()
!=
vm_ptr
->
addr
()
||
committed_rec
->
pc
()
!=
vm_ptr
->
pc
())
{
if
(
!
_vm_map
->
append
(
vm_ptr
))
{
return
false
;
}
committed_rec
=
(
VMMemRegionEx
*
)
_vm_map
->
at
(
_vm_map
->
length
()
-
1
);
}
else
{
committed_rec
->
expand_region
(
vm_ptr
->
addr
(),
vm_ptr
->
size
());
}
vm_callsite
.
inc
(
0
,
vm_ptr
->
size
());
}
vm_ptr
=
(
VMMemRegionEx
*
)
vm_itr
.
next
();
}
// deal with last record
if
(
cur_callsite
!=
NULL
&&
!
_vm_cs
->
append
(
cur_callsite
))
{
return
false
;
}
// sort it into callsite pc order. Details are aggregated by callsites
_vm_cs
->
sort
((
FN_SORT
)
bl_vm_sort_by_pc
);
// walk the array to consolidate record by pc
MemPointerArrayIteratorImpl
itr
(
_vm_cs
);
VMCallsitePointer
*
callsite_rec
=
(
VMCallsitePointer
*
)
itr
.
current
();
VMCallsitePointer
*
next_rec
=
(
VMCallsitePointer
*
)
itr
.
next
();
while
(
next_rec
!=
NULL
)
{
assert
(
callsite_rec
!=
NULL
,
"Sanity check"
);
if
(
next_rec
->
addr
()
==
callsite_rec
->
addr
())
{
callsite_rec
->
inc
(
next_rec
->
reserved_amount
(),
next_rec
->
committed_amount
());
itr
.
remove
();
next_rec
=
(
VMCallsitePointer
*
)
itr
.
current
();
}
else
{
callsite_rec
=
next_rec
;
next_rec
=
(
VMCallsitePointer
*
)
itr
.
next
();
}
}
return
true
;
}
...
...
@@ -251,12 +344,8 @@ bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
_number_of_classes
=
SystemDictionary
::
number_of_classes
();
if
(
!
summary_only
&&
MemTracker
::
track_callsite
()
&&
_baselined
)
{
((
MemPointerArray
*
)
snapshot
.
_alloc_ptrs
)
->
sort
((
FN_SORT
)
malloc_sort_by_pc
);
((
MemPointerArray
*
)
snapshot
.
_vm_ptrs
)
->
sort
((
FN_SORT
)
vm_sort_by_pc
);
_baselined
=
baseline_malloc_details
(
snapshot
.
_alloc_ptrs
)
&&
baseline_vm_details
(
snapshot
.
_vm_ptrs
);
((
MemPointerArray
*
)
snapshot
.
_alloc_ptrs
)
->
sort
((
FN_SORT
)
malloc_sort_by_addr
);
((
MemPointerArray
*
)
snapshot
.
_vm_ptrs
)
->
sort
((
FN_SORT
)
vm_sort_by_addr
);
}
return
_baselined
;
}
...
...
@@ -278,7 +367,7 @@ const char* MemBaseline::type2name(MEMFLAGS type) {
return
MemType2NameMap
[
index
].
_name
;
}
}
assert
(
false
,
"no type"
);
assert
(
false
,
err_msg
(
"bad type %x"
,
type
)
);
return
NULL
;
}
...
...
@@ -341,13 +430,6 @@ int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
return
UNSIGNED_COMPARE
(
mp1
->
addr
(),
mp2
->
addr
());
}
// sort snapshot mmap'd records in callsite pc order
int
MemBaseline
::
vm_sort_by_pc
(
const
void
*
p1
,
const
void
*
p2
)
{
assert
(
MemTracker
::
track_callsite
(),
"Just check"
);
const
VMMemRegionEx
*
mp1
=
(
const
VMMemRegionEx
*
)
p1
;
const
VMMemRegionEx
*
mp2
=
(
const
VMMemRegionEx
*
)
p2
;
return
UNSIGNED_COMPARE
(
mp1
->
pc
(),
mp2
->
pc
());
}
// sort baselined mmap'd records in size (reserved size) order
int
MemBaseline
::
bl_vm_sort_by_size
(
const
void
*
p1
,
const
void
*
p2
)
{
...
...
@@ -376,12 +458,3 @@ int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
return
delta
;
}
// sort snapshot mmap'd records in memory block address order
int
MemBaseline
::
vm_sort_by_addr
(
const
void
*
p1
,
const
void
*
p2
)
{
assert
(
MemTracker
::
is_on
(),
"Just check"
);
const
VMMemRegion
*
mp1
=
(
const
VMMemRegion
*
)
p1
;
const
VMMemRegion
*
mp2
=
(
const
VMMemRegion
*
)
p2
;
int
delta
=
UNSIGNED_COMPARE
(
mp1
->
addr
(),
mp2
->
addr
());
assert
(
delta
!=
0
,
"dup pointer"
);
return
delta
;
}
src/share/vm/services/memBaseline.hpp
浏览文件 @
746de19d
...
...
@@ -320,6 +320,8 @@ class MemBaseline : public _ValueObj {
// only available when detail tracking is on.
MemPointerArray
*
_malloc_cs
;
MemPointerArray
*
_vm_cs
;
// virtual memory map
MemPointerArray
*
_vm_map
;
private:
static
MemType2Name
MemType2NameMap
[
NUMBER_OF_MEMORY_TYPE
];
...
...
@@ -432,9 +434,6 @@ class MemBaseline : public _ValueObj {
static
int
malloc_sort_by_pc
(
const
void
*
p1
,
const
void
*
p2
);
static
int
malloc_sort_by_addr
(
const
void
*
p1
,
const
void
*
p2
);
static
int
vm_sort_by_pc
(
const
void
*
p1
,
const
void
*
p2
);
static
int
vm_sort_by_addr
(
const
void
*
p1
,
const
void
*
p2
);
private:
// sorting functions for baselined records
static
int
bl_malloc_sort_by_size
(
const
void
*
p1
,
const
void
*
p2
);
...
...
src/share/vm/services/memPtr.cpp
浏览文件 @
746de19d
...
...
@@ -40,35 +40,3 @@ jint SequenceGenerator::next() {
return
seq
;
}
bool
VMMemRegion
::
contains
(
const
VMMemRegion
*
mr
)
const
{
assert
(
base
()
!=
0
,
"Sanity check"
);
assert
(
size
()
!=
0
||
committed_size
()
!=
0
,
"Sanity check"
);
address
base_addr
=
base
();
address
end_addr
=
base_addr
+
(
is_reserve_record
()
?
reserved_size
()
:
committed_size
());
if
(
mr
->
is_reserve_record
())
{
if
(
mr
->
base
()
==
base_addr
&&
mr
->
size
()
==
size
())
{
// the same range
return
true
;
}
return
false
;
}
else
if
(
mr
->
is_commit_record
()
||
mr
->
is_uncommit_record
())
{
assert
(
mr
->
base
()
!=
0
&&
mr
->
committed_size
()
>
0
,
"bad record"
);
return
(
mr
->
base
()
>=
base_addr
&&
(
mr
->
base
()
+
mr
->
committed_size
())
<=
end_addr
);
}
else
if
(
mr
->
is_type_tagging_record
())
{
assert
(
mr
->
base
()
!=
NULL
,
"Sanity check"
);
return
(
mr
->
base
()
>=
base_addr
&&
mr
->
base
()
<
end_addr
);
}
else
if
(
mr
->
is_release_record
())
{
assert
(
mr
->
base
()
!=
0
&&
mr
->
size
()
>
0
,
"bad record"
);
return
(
mr
->
base
()
==
base_addr
&&
mr
->
size
()
==
size
());
}
else
{
ShouldNotReachHere
();
return
false
;
}
}
src/share/vm/services/memPtr.hpp
浏览文件 @
746de19d
...
...
@@ -291,6 +291,26 @@ public:
inline
bool
is_type_tagging_record
()
const
{
return
is_virtual_memory_type_record
(
_flags
);
}
// if the two memory pointer records actually represent the same
// memory block
inline
bool
is_same_region
(
const
MemPointerRecord
*
other
)
const
{
return
(
addr
()
==
other
->
addr
()
&&
size
()
==
other
->
size
());
}
// if this memory region fully contains another one
inline
bool
contains_region
(
const
MemPointerRecord
*
other
)
const
{
return
contains_region
(
other
->
addr
(),
other
->
size
());
}
// if this memory region fully contains specified memory range
inline
bool
contains_region
(
address
add
,
size_t
sz
)
const
{
return
(
addr
()
<=
add
&&
addr
()
+
size
()
>=
add
+
sz
);
}
inline
bool
contains_address
(
address
add
)
const
{
return
(
addr
()
<=
add
&&
addr
()
+
size
()
>
add
);
}
};
// MemPointerRecordEx also records callsite pc, from where
...
...
@@ -321,66 +341,32 @@ class MemPointerRecordEx : public MemPointerRecord {
}
};
// a virtual memory region
// a virtual memory region. The region can represent a reserved
// virtual memory region or a committed memory region
class
VMMemRegion
:
public
MemPointerRecord
{
private:
// committed size
size_t
_committed_size
;
public:
VMMemRegion
()
:
_committed_size
(
0
)
{
}
VMMemRegion
()
{
}
void
init
(
const
MemPointerRecord
*
mp
)
{
assert
(
mp
->
is_vm_pointer
(),
"
not virtual memory pointer
"
);
assert
(
mp
->
is_vm_pointer
(),
"
Sanity check
"
);
_addr
=
mp
->
addr
();
if
(
mp
->
is_commit_record
()
||
mp
->
is_uncommit_record
())
{
_committed_size
=
mp
->
size
();
set_size
(
_committed_size
);
}
else
{
set_size
(
mp
->
size
());
_committed_size
=
0
;
}
set_flags
(
mp
->
flags
());
}
VMMemRegion
&
operator
=
(
const
VMMemRegion
&
other
)
{
MemPointerRecord
::
operator
=
(
other
);
_committed_size
=
other
.
committed_size
();
return
*
this
;
}
inline
bool
is_reserve_record
()
const
{
return
is_virtual_memory_reserve_record
(
flags
());
}
inline
bool
is_release_record
()
const
{
return
is_virtual_memory_release_record
(
flags
());
}
// resize reserved VM range
inline
void
set_reserved_size
(
size_t
new_size
)
{
assert
(
new_size
>=
committed_size
(),
"resize"
);
set_size
(
new_size
);
}
inline
void
commit
(
size_t
size
)
{
_committed_size
+=
size
;
inline
bool
is_reserved_region
()
const
{
return
is_allocation_record
();
}
inline
void
uncommit
(
size_t
size
)
{
if
(
_committed_size
>=
size
)
{
_committed_size
-=
size
;
}
else
{
_committed_size
=
0
;
}
inline
bool
is_committed_region
()
const
{
return
is_commit_record
();
}
/*
* if this virtual memory range covers whole range of
* the other VMMemRegion
*/
bool
contains
(
const
VMMemRegion
*
mr
)
const
;
/* base address of this virtual memory range */
inline
address
base
()
const
{
return
addr
();
...
...
@@ -391,13 +377,28 @@ public:
set_flags
(
flags
()
|
(
f
&
mt_masks
));
}
// release part of memory range
inline
void
partial_release
(
address
add
,
size_t
sz
)
{
assert
(
add
>=
addr
()
&&
add
<
addr
()
+
size
(),
"not valid address"
);
// for now, it can partially release from the both ends,
// but not in the middle
// expand this region to also cover specified range.
// The range has to be on either end of the memory region.
void
expand_region
(
address
addr
,
size_t
sz
)
{
if
(
addr
<
base
())
{
assert
(
addr
+
sz
==
base
(),
"Sanity check"
);
_addr
=
addr
;
set_size
(
size
()
+
sz
);
}
else
{
assert
(
base
()
+
size
()
==
addr
,
"Sanity check"
);
set_size
(
size
()
+
sz
);
}
}
// exclude the specified address range from this region.
// The excluded memory range has to be on either end of this memory
// region.
inline
void
exclude_region
(
address
add
,
size_t
sz
)
{
assert
(
is_reserved_region
()
||
is_committed_region
(),
"Sanity check"
);
assert
(
addr
()
!=
NULL
&&
size
()
!=
0
,
"Sanity check"
);
assert
(
add
>=
addr
()
&&
add
<
addr
()
+
size
(),
"Sanity check"
);
assert
(
add
==
addr
()
||
(
add
+
sz
)
==
(
addr
()
+
size
()),
"
releas
e in the middle"
);
"
exclud
e in the middle"
);
if
(
add
==
addr
())
{
set_addr
(
add
+
sz
);
set_size
(
size
()
-
sz
);
...
...
@@ -405,16 +406,6 @@ public:
set_size
(
size
()
-
sz
);
}
}
// the committed size of the virtual memory block
inline
size_t
committed_size
()
const
{
return
_committed_size
;
}
// the reserved size of the virtual memory block
inline
size_t
reserved_size
()
const
{
return
size
();
}
};
class
VMMemRegionEx
:
public
VMMemRegion
{
...
...
src/share/vm/services/memRecorder.cpp
浏览文件 @
746de19d
...
...
@@ -31,14 +31,19 @@
#include "services/memTracker.hpp"
MemPointer
*
SequencedRecordIterator
::
next_record
()
{
MemPointer
*
itr_cur
=
_itr
.
current
();
if
(
itr_cur
==
NULL
)
return
NULL
;
MemPointer
*
itr_next
=
_itr
.
next
();
MemPointerRecord
*
itr_cur
=
(
MemPointerRecord
*
)
_itr
.
current
();
if
(
itr_cur
==
NULL
)
{
return
itr_cur
;
}
MemPointerRecord
*
itr_next
=
(
MemPointerRecord
*
)
_itr
.
next
();
while
(
itr_next
!=
NULL
&&
same_kind
((
MemPointerRecord
*
)
itr_cur
,
(
MemPointerRecord
*
)
itr_next
))
{
// don't collapse virtual memory records
while
(
itr_next
!=
NULL
&&
!
itr_cur
->
is_vm_pointer
()
&&
!
itr_next
->
is_vm_pointer
()
&&
same_kind
(
itr_cur
,
itr_next
))
{
itr_cur
=
itr_next
;
itr_next
=
_itr
.
next
();
itr_next
=
(
MemPointerRecord
*
)
_itr
.
next
();
}
return
itr_cur
;
...
...
src/share/vm/services/memRecorder.hpp
浏览文件 @
746de19d
...
...
@@ -188,6 +188,7 @@ class SequencedRecordIterator : public MemPointerArrayIterator {
// Test if the two records are the same kind: the same memory block and allocation
// type.
inline
bool
same_kind
(
const
MemPointerRecord
*
p1
,
const
MemPointerRecord
*
p2
)
const
{
assert
(
!
p1
->
is_vm_pointer
()
&&
!
p2
->
is_vm_pointer
(),
"malloc pointer only"
);
return
(
p1
->
addr
()
==
p2
->
addr
()
&&
(
p1
->
flags
()
&
MemPointerRecord
::
tag_masks
)
==
(
p2
->
flags
()
&
MemPointerRecord
::
tag_masks
));
...
...
src/share/vm/services/memReporter.cpp
浏览文件 @
746de19d
...
...
@@ -51,6 +51,7 @@ void BaselineReporter::report_baseline(const MemBaseline& baseline, bool summary
report_summaries
(
baseline
);
if
(
!
summary_only
&&
MemTracker
::
track_callsite
())
{
report_virtual_memory_map
(
baseline
);
report_callsites
(
baseline
);
}
_outputer
.
done
();
...
...
@@ -74,6 +75,25 @@ void BaselineReporter::report_summaries(const MemBaseline& baseline) {
_outputer
.
done_category_summary
();
}
void
BaselineReporter
::
report_virtual_memory_map
(
const
MemBaseline
&
baseline
)
{
_outputer
.
start_virtual_memory_map
();
MemBaseline
*
pBL
=
const_cast
<
MemBaseline
*>
(
&
baseline
);
MemPointerArrayIteratorImpl
itr
=
MemPointerArrayIteratorImpl
(
pBL
->
_vm_map
);
VMMemRegionEx
*
rgn
=
(
VMMemRegionEx
*
)
itr
.
current
();
while
(
rgn
!=
NULL
)
{
if
(
rgn
->
is_reserved_region
())
{
_outputer
.
reserved_memory_region
(
FLAGS_TO_MEMORY_TYPE
(
rgn
->
flags
()),
rgn
->
base
(),
rgn
->
base
()
+
rgn
->
size
(),
amount_in_current_scale
(
rgn
->
size
()),
rgn
->
pc
());
}
else
{
_outputer
.
committed_memory_region
(
rgn
->
base
(),
rgn
->
base
()
+
rgn
->
size
(),
amount_in_current_scale
(
rgn
->
size
()),
rgn
->
pc
());
}
rgn
=
(
VMMemRegionEx
*
)
itr
.
next
();
}
_outputer
.
done_virtual_memory_map
();
}
void
BaselineReporter
::
report_callsites
(
const
MemBaseline
&
baseline
)
{
_outputer
.
start_callsite
();
MemBaseline
*
pBL
=
const_cast
<
MemBaseline
*>
(
&
baseline
);
...
...
@@ -324,6 +344,40 @@ void BaselineTTYOutputer::done_category_summary() {
_output
->
print_cr
(
" "
);
}
void
BaselineTTYOutputer
::
start_virtual_memory_map
()
{
_output
->
print_cr
(
"Virtual memory map:"
);
}
void
BaselineTTYOutputer
::
reserved_memory_region
(
MEMFLAGS
type
,
address
base
,
address
end
,
size_t
size
,
address
pc
)
{
const
char
*
unit
=
memory_unit
(
_scale
);
char
buf
[
128
];
int
offset
;
_output
->
print_cr
(
" "
);
_output
->
print_cr
(
"["
PTR_FORMAT
" - "
PTR_FORMAT
"] reserved %d%s for %s"
,
base
,
end
,
size
,
unit
,
MemBaseline
::
type2name
(
type
));
if
(
os
::
dll_address_to_function_name
(
pc
,
buf
,
sizeof
(
buf
),
&
offset
))
{
_output
->
print_cr
(
"
\t\t
from [%s+0x%x]"
,
buf
,
offset
);
}
}
void
BaselineTTYOutputer
::
committed_memory_region
(
address
base
,
address
end
,
size_t
size
,
address
pc
)
{
const
char
*
unit
=
memory_unit
(
_scale
);
char
buf
[
128
];
int
offset
;
_output
->
print
(
"
\t
["
PTR_FORMAT
" - "
PTR_FORMAT
"] committed %d%s"
,
base
,
end
,
size
,
unit
);
if
(
os
::
dll_address_to_function_name
(
pc
,
buf
,
sizeof
(
buf
),
&
offset
))
{
_output
->
print_cr
(
" from [%s+0x%x]"
,
buf
,
offset
);
}
}
void
BaselineTTYOutputer
::
done_virtual_memory_map
()
{
_output
->
print_cr
(
" "
);
}
void
BaselineTTYOutputer
::
start_callsite
()
{
_output
->
print_cr
(
"Details:"
);
_output
->
print_cr
(
" "
);
...
...
@@ -337,7 +391,7 @@ void BaselineTTYOutputer::malloc_callsite(address pc, size_t malloc_amt,
size_t
malloc_count
)
{
if
(
malloc_amt
>
0
)
{
const
char
*
unit
=
memory_unit
(
_scale
);
char
buf
[
64
];
char
buf
[
128
];
int
offset
;
if
(
pc
==
0
)
{
_output
->
print
(
"[BOOTSTRAP]%18s"
,
" "
);
...
...
@@ -357,7 +411,7 @@ void BaselineTTYOutputer::virtual_memory_callsite(address pc, size_t reserved_am
size_t
committed_amt
)
{
if
(
reserved_amt
>
0
)
{
const
char
*
unit
=
memory_unit
(
_scale
);
char
buf
[
64
];
char
buf
[
128
];
int
offset
;
if
(
pc
==
0
)
{
_output
->
print
(
"[BOOTSTRAP]%18s"
,
" "
);
...
...
@@ -502,7 +556,7 @@ void BaselineTTYOutputer::diff_malloc_callsite(address pc,
int
malloc_diff
,
int
malloc_count_diff
)
{
if
(
malloc_diff
!=
0
)
{
const
char
*
unit
=
memory_unit
(
_scale
);
char
buf
[
64
];
char
buf
[
128
];
int
offset
;
if
(
pc
==
0
)
{
_output
->
print_cr
(
"[BOOTSTRAP]%18s"
,
" "
);
...
...
src/share/vm/services/memReporter.hpp
浏览文件 @
746de19d
...
...
@@ -93,6 +93,11 @@ class BaselineOutputer : public StackObj {
virtual
void
done_category_summary
()
=
0
;
virtual
void
start_virtual_memory_map
()
=
0
;
virtual
void
reserved_memory_region
(
MEMFLAGS
type
,
address
base
,
address
end
,
size_t
size
,
address
pc
)
=
0
;
virtual
void
committed_memory_region
(
address
base
,
address
end
,
size_t
size
,
address
pc
)
=
0
;
virtual
void
done_virtual_memory_map
()
=
0
;
/*
* Report callsite information
*/
...
...
@@ -136,6 +141,7 @@ class BaselineReporter : public StackObj {
private:
void
report_summaries
(
const
MemBaseline
&
baseline
);
void
report_virtual_memory_map
(
const
MemBaseline
&
baseline
);
void
report_callsites
(
const
MemBaseline
&
baseline
);
void
diff_summaries
(
const
MemBaseline
&
cur
,
const
MemBaseline
&
prev
);
...
...
@@ -251,6 +257,13 @@ class BaselineTTYOutputer : public BaselineOutputer {
void
done_category_summary
();
// virtual memory map
void
start_virtual_memory_map
();
void
reserved_memory_region
(
MEMFLAGS
type
,
address
base
,
address
end
,
size_t
size
,
address
pc
);
void
committed_memory_region
(
address
base
,
address
end
,
size_t
size
,
address
pc
);
void
done_virtual_memory_map
();
/*
* Report callsite information
*/
...
...
src/share/vm/services/memSnapshot.cpp
浏览文件 @
746de19d
...
...
@@ -31,6 +31,220 @@
#include "services/memSnapshot.hpp"
#include "services/memTracker.hpp"
bool
VMMemPointerIterator
::
insert_record
(
MemPointerRecord
*
rec
)
{
VMMemRegionEx
new_rec
;
assert
(
rec
->
is_allocation_record
()
||
rec
->
is_commit_record
(),
"Sanity check"
);
if
(
MemTracker
::
track_callsite
())
{
new_rec
.
init
((
MemPointerRecordEx
*
)
rec
);
}
else
{
new_rec
.
init
(
rec
);
}
return
insert
(
&
new_rec
);
}
bool
VMMemPointerIterator
::
insert_record_after
(
MemPointerRecord
*
rec
)
{
VMMemRegionEx
new_rec
;
assert
(
rec
->
is_allocation_record
()
||
rec
->
is_commit_record
(),
"Sanity check"
);
if
(
MemTracker
::
track_callsite
())
{
new_rec
.
init
((
MemPointerRecordEx
*
)
rec
);
}
else
{
new_rec
.
init
(
rec
);
}
return
insert_after
(
&
new_rec
);
}
// we don't consolidate reserved regions, since they may be categorized
// in different types.
bool
VMMemPointerIterator
::
add_reserved_region
(
MemPointerRecord
*
rec
)
{
assert
(
rec
->
is_allocation_record
(),
"Sanity check"
);
VMMemRegion
*
cur
=
(
VMMemRegion
*
)
current
();
// we don't have anything yet
if
(
cur
==
NULL
)
{
return
insert_record
(
rec
);
}
assert
(
cur
->
is_reserved_region
(),
"Sanity check"
);
// duplicated records
if
(
cur
->
is_same_region
(
rec
))
{
return
true
;
}
assert
(
cur
->
base
()
>
rec
->
addr
(),
"Just check: locate()"
);
assert
(
rec
->
addr
()
+
rec
->
size
()
<=
cur
->
base
(),
"Can not overlap"
);
return
insert_record
(
rec
);
}
// we do consolidate committed regions
bool
VMMemPointerIterator
::
add_committed_region
(
MemPointerRecord
*
rec
)
{
assert
(
rec
->
is_commit_record
(),
"Sanity check"
);
VMMemRegion
*
cur
;
cur
=
(
VMMemRegion
*
)
current
();
assert
(
cur
->
is_reserved_region
()
&&
cur
->
contains_region
(
rec
),
"Sanity check"
);
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
if
(
FLAGS_TO_MEMORY_TYPE
(
cur
->
flags
())
==
mtThreadStack
&&
FLAGS_TO_MEMORY_TYPE
(
rec
->
flags
())
!=
mtThreadStack
)
{
return
true
;
}
cur
=
(
VMMemRegion
*
)
next
();
while
(
cur
!=
NULL
&&
cur
->
is_committed_region
())
{
// duplicated commit records
if
(
cur
->
contains_region
(
rec
))
{
return
true
;
}
if
(
cur
->
base
()
>
rec
->
addr
())
{
// committed regions can not overlap
assert
(
rec
->
addr
()
+
rec
->
size
()
<=
cur
->
base
(),
"Can not overlap"
);
if
(
rec
->
addr
()
+
rec
->
size
()
==
cur
->
base
())
{
cur
->
expand_region
(
rec
->
addr
(),
rec
->
size
());
return
true
;
}
else
{
return
insert_record
(
rec
);
}
}
else
if
(
cur
->
base
()
+
cur
->
size
()
==
rec
->
addr
())
{
cur
->
expand_region
(
rec
->
addr
(),
rec
->
size
());
VMMemRegion
*
next_reg
=
(
VMMemRegion
*
)
next
();
// see if we can consolidate next committed region
if
(
next_reg
!=
NULL
&&
next_reg
->
is_committed_region
()
&&
next_reg
->
base
()
==
cur
->
base
()
+
cur
->
size
())
{
cur
->
expand_region
(
next_reg
->
base
(),
next_reg
->
size
());
remove
();
}
return
true
;
}
cur
=
(
VMMemRegion
*
)
next
();
}
return
insert_record
(
rec
);
}
bool
VMMemPointerIterator
::
remove_uncommitted_region
(
MemPointerRecord
*
rec
)
{
assert
(
rec
->
is_uncommit_record
(),
"sanity check"
);
VMMemRegion
*
cur
;
cur
=
(
VMMemRegion
*
)
current
();
assert
(
cur
->
is_reserved_region
()
&&
cur
->
contains_region
(
rec
),
"Sanity check"
);
// thread's native stack is always marked as "committed", ignore
// the "commit" operation for creating stack guard pages
if
(
FLAGS_TO_MEMORY_TYPE
(
cur
->
flags
())
==
mtThreadStack
&&
FLAGS_TO_MEMORY_TYPE
(
rec
->
flags
())
!=
mtThreadStack
)
{
return
true
;
}
cur
=
(
VMMemRegion
*
)
next
();
while
(
cur
!=
NULL
&&
cur
->
is_committed_region
())
{
// region already uncommitted, must be due to duplicated record
if
(
cur
->
addr
()
>=
rec
->
addr
()
+
rec
->
size
())
{
break
;
}
else
if
(
cur
->
contains_region
(
rec
))
{
// uncommit whole region
if
(
cur
->
is_same_region
(
rec
))
{
remove
();
break
;
}
else
if
(
rec
->
addr
()
==
cur
->
addr
()
||
rec
->
addr
()
+
rec
->
size
()
==
cur
->
addr
()
+
cur
->
size
())
{
// uncommitted from either end of current memory region.
cur
->
exclude_region
(
rec
->
addr
(),
rec
->
size
());
break
;
}
else
{
// split the committed region and release the middle
address
high_addr
=
cur
->
addr
()
+
cur
->
size
();
size_t
sz
=
high_addr
-
rec
->
addr
();
cur
->
exclude_region
(
rec
->
addr
(),
sz
);
sz
=
high_addr
-
(
rec
->
addr
()
+
rec
->
size
());
if
(
MemTracker
::
track_callsite
())
{
MemPointerRecordEx
tmp
(
rec
->
addr
()
+
rec
->
size
(),
cur
->
flags
(),
sz
,
((
VMMemRegionEx
*
)
cur
)
->
pc
());
return
insert_record_after
(
&
tmp
);
}
else
{
MemPointerRecord
tmp
(
rec
->
addr
()
+
rec
->
size
(),
cur
->
flags
(),
sz
);
return
insert_record_after
(
&
tmp
);
}
}
}
cur
=
(
VMMemRegion
*
)
next
();
}
// we may not find committed record due to duplicated records
return
true
;
}
bool
VMMemPointerIterator
::
remove_released_region
(
MemPointerRecord
*
rec
)
{
assert
(
rec
->
is_deallocation_record
(),
"Sanity check"
);
VMMemRegion
*
cur
=
(
VMMemRegion
*
)
current
();
assert
(
cur
->
is_reserved_region
()
&&
cur
->
contains_region
(
rec
),
"Sanity check"
);
#ifdef ASSERT
VMMemRegion
*
next_reg
=
(
VMMemRegion
*
)
peek_next
();
// should not have any committed memory in this reserved region
assert
(
next_reg
==
NULL
||
!
next_reg
->
is_committed_region
(),
"Sanity check"
);
#endif
if
(
rec
->
is_same_region
(
cur
))
{
remove
();
}
else
if
(
rec
->
addr
()
==
cur
->
addr
()
||
rec
->
addr
()
+
rec
->
size
()
==
cur
->
addr
()
+
cur
->
size
())
{
// released region is at either end of this region
cur
->
exclude_region
(
rec
->
addr
(),
rec
->
size
());
}
else
{
// split the reserved region and release the middle
address
high_addr
=
cur
->
addr
()
+
cur
->
size
();
size_t
sz
=
high_addr
-
rec
->
addr
();
cur
->
exclude_region
(
rec
->
addr
(),
sz
);
sz
=
high_addr
-
rec
->
addr
()
-
rec
->
size
();
if
(
MemTracker
::
track_callsite
())
{
MemPointerRecordEx
tmp
(
rec
->
addr
()
+
rec
->
size
(),
cur
->
flags
(),
sz
,
((
VMMemRegionEx
*
)
cur
)
->
pc
());
return
insert_reserved_region
(
&
tmp
);
}
else
{
MemPointerRecord
tmp
(
rec
->
addr
()
+
rec
->
size
(),
cur
->
flags
(),
sz
);
return
insert_reserved_region
(
&
tmp
);
}
}
return
true
;
}
bool
VMMemPointerIterator
::
insert_reserved_region
(
MemPointerRecord
*
rec
)
{
// skip all 'commit' records associated with previous reserved region
VMMemRegion
*
p
=
(
VMMemRegion
*
)
next
();
while
(
p
!=
NULL
&&
p
->
is_committed_region
()
&&
p
->
base
()
+
p
->
size
()
<
rec
->
addr
())
{
p
=
(
VMMemRegion
*
)
next
();
}
return
insert_record
(
rec
);
}
bool
VMMemPointerIterator
::
split_reserved_region
(
VMMemRegion
*
rgn
,
address
new_rgn_addr
,
size_t
new_rgn_size
)
{
assert
(
rgn
->
contains_region
(
new_rgn_addr
,
new_rgn_size
),
"Not fully contained"
);
address
pc
=
(
MemTracker
::
track_callsite
()
?
((
VMMemRegionEx
*
)
rgn
)
->
pc
()
:
NULL
);
if
(
rgn
->
base
()
==
new_rgn_addr
)
{
// new region is at the beginning of the region
size_t
sz
=
rgn
->
size
()
-
new_rgn_size
;
// the original region becomes 'new' region
rgn
->
exclude_region
(
new_rgn_addr
+
new_rgn_size
,
sz
);
// remaining becomes next region
MemPointerRecordEx
next_rgn
(
new_rgn_addr
+
new_rgn_size
,
rgn
->
flags
(),
sz
,
pc
);
return
insert_reserved_region
(
&
next_rgn
);
}
else
if
(
rgn
->
base
()
+
rgn
->
size
()
==
new_rgn_addr
+
new_rgn_size
)
{
rgn
->
exclude_region
(
new_rgn_addr
,
new_rgn_size
);
MemPointerRecordEx
next_rgn
(
new_rgn_addr
,
rgn
->
flags
(),
new_rgn_size
,
pc
);
return
insert_reserved_region
(
&
next_rgn
);
}
else
{
// the orginal region will be split into three
address
rgn_high_addr
=
rgn
->
base
()
+
rgn
->
size
();
// first region
rgn
->
exclude_region
(
new_rgn_addr
,
(
rgn_high_addr
-
new_rgn_addr
));
// the second region is the new region
MemPointerRecordEx
new_rgn
(
new_rgn_addr
,
rgn
->
flags
(),
new_rgn_size
,
pc
);
if
(
!
insert_reserved_region
(
&
new_rgn
))
return
false
;
// the remaining region
MemPointerRecordEx
rem_rgn
(
new_rgn_addr
+
new_rgn_size
,
rgn
->
flags
(),
rgn_high_addr
-
(
new_rgn_addr
+
new_rgn_size
),
pc
);
return
insert_reserved_region
(
&
rem_rgn
);
}
}
static
int
sort_in_seq_order
(
const
void
*
p1
,
const
void
*
p2
)
{
assert
(
p1
!=
NULL
&&
p2
!=
NULL
,
"Sanity check"
);
const
MemPointerRecord
*
mp1
=
(
MemPointerRecord
*
)
p1
;
...
...
@@ -61,11 +275,11 @@ bool StagingArea::init() {
}
MemPointerArrayIteratorImpl
StagingArea
::
virtual_memory_record_walker
()
{
VMRecordIterator
StagingArea
::
virtual_memory_record_walker
()
{
MemPointerArray
*
arr
=
vm_data
();
// sort into seq number order
arr
->
sort
((
FN_SORT
)
sort_in_seq_order
);
return
MemPointerArrayIteratorImpl
(
arr
);
return
VMRecordIterator
(
arr
);
}
...
...
@@ -135,6 +349,8 @@ bool MemSnapshot::merge(MemRecorder* rec) {
return
false
;
}
}
else
{
// locate matched record and/or also position the iterator to proper
// location for this incoming record.
p2
=
(
MemPointerRecord
*
)
malloc_staging_itr
.
locate
(
p1
->
addr
());
// we have not seen this memory block, so just add to staging area
if
(
p2
==
NULL
)
{
...
...
@@ -199,7 +415,7 @@ bool MemSnapshot::promote() {
MallocRecordIterator
malloc_itr
=
_staging_area
.
malloc_record_walker
();
bool
promoted
=
false
;
if
(
promote_malloc_records
(
&
malloc_itr
))
{
MemPointerArrayIteratorImpl
vm_itr
=
_staging_area
.
virtual_memory_record_walker
();
VMRecordIterator
vm_itr
=
_staging_area
.
virtual_memory_record_walker
();
if
(
promote_virtual_memory_records
(
&
vm_itr
))
{
promoted
=
true
;
}
...
...
@@ -218,7 +434,7 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
matched_rec
=
(
MemPointerRecord
*
)
malloc_snapshot_itr
.
locate
(
new_rec
->
addr
());
// found matched memory block
if
(
matched_rec
!=
NULL
&&
new_rec
->
addr
()
==
matched_rec
->
addr
())
{
// snapshot already contains 'live
d
' records
// snapshot already contains 'live' records
assert
(
matched_rec
->
is_allocation_record
()
||
matched_rec
->
is_arena_size_record
(),
"Sanity check"
);
// update block states
...
...
@@ -277,87 +493,60 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
bool
MemSnapshot
::
promote_virtual_memory_records
(
MemPointerArrayIterator
*
itr
)
{
VMMemPointerIterator
vm_snapshot_itr
(
_vm_ptrs
);
MemPointerRecord
*
new_rec
=
(
MemPointerRecord
*
)
itr
->
current
();
VMMemRegionEx
new_vm_rec
;
VMMemRegion
*
matched_rec
;
VMMemRegion
*
reserved_rec
;
while
(
new_rec
!=
NULL
)
{
assert
(
new_rec
->
is_vm_pointer
(),
"Sanity check"
);
if
(
MemTracker
::
track_callsite
())
{
new_vm_rec
.
init
((
MemPointerRecordEx
*
)
new_rec
);
}
else
{
new_vm_rec
.
init
(
new_rec
);
}
matched_rec
=
(
VMMemRegion
*
)
vm_snapshot_itr
.
locate
(
new_rec
->
addr
());
if
(
matched_rec
!=
NULL
&&
(
matched_rec
->
contains
(
&
new_vm_rec
)
||
matched_rec
->
base
()
==
new_vm_rec
.
base
()))
{
// locate a reserved region that contains the specified address, or
// the nearest reserved region has base address just above the specified
// address
reserved_rec
=
(
VMMemRegion
*
)
vm_snapshot_itr
.
locate
(
new_rec
->
addr
());
if
(
reserved_rec
!=
NULL
&&
reserved_rec
->
contains_region
(
new_rec
))
{
// snapshot can only have 'live' records
assert
(
matched_rec
->
is_reserve_record
(),
"Sanity check"
);
if
(
new_vm_rec
.
is_reserve_record
()
&&
matched_rec
->
base
()
==
new_vm_rec
.
base
())
{
// resize reserved virtual memory range
// resize has to cover committed area
assert
(
new_vm_rec
.
size
()
>=
matched_rec
->
committed_size
(),
"Sanity check"
);
matched_rec
->
set_reserved_size
(
new_vm_rec
.
size
());
}
else
if
(
new_vm_rec
.
is_commit_record
())
{
// commit memory inside reserved memory range
assert
(
new_vm_rec
.
committed_size
()
<=
matched_rec
->
reserved_size
(),
"Sanity check"
);
// thread stacks are marked committed, so we ignore 'commit' record for creating
// stack guard pages
if
(
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
!=
mtThreadStack
)
{
matched_rec
->
commit
(
new_vm_rec
.
committed_size
());
}
}
else
if
(
new_vm_rec
.
is_uncommit_record
())
{
if
(
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
==
mtThreadStack
)
{
// ignore 'uncommit' record from removing stack guard pages, uncommit
// thread stack as whole
if
(
matched_rec
->
committed_size
()
==
new_vm_rec
.
committed_size
())
{
matched_rec
->
uncommit
(
new_vm_rec
.
committed_size
());
assert
(
reserved_rec
->
is_reserved_region
(),
"Sanity check"
);
if
(
new_rec
->
is_allocation_record
())
{
if
(
!
reserved_rec
->
is_same_region
(
new_rec
))
{
// only deal with split a bigger reserved region into smaller regions.
// So far, CDS is the only use case.
if
(
!
vm_snapshot_itr
.
split_reserved_region
(
reserved_rec
,
new_rec
->
addr
(),
new_rec
->
size
()))
{
return
false
;
}
}
else
{
// uncommit memory inside reserved memory range
assert
(
new_vm_rec
.
committed_size
()
<=
matched_rec
->
committed_size
(),
"Sanity check"
);
matched_rec
->
uncommit
(
new_vm_rec
.
committed_size
());
}
}
else
if
(
new_vm_rec
.
is_type_tagging_record
())
{
// tag this virtual memory range to a memory type
// can not re-tag a memory range to different type
assert
(
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
==
mtNone
||
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
==
FLAGS_TO_MEMORY_TYPE
(
new_vm_rec
.
flags
()),
"Sanity check"
);
matched_rec
->
tag
(
new_vm_rec
.
flags
());
}
else
if
(
new_vm_rec
.
is_release_record
())
{
// release part or whole memory range
if
(
new_vm_rec
.
base
()
==
matched_rec
->
base
()
&&
new_vm_rec
.
size
()
==
matched_rec
->
size
())
{
// release whole virtual memory range
assert
(
matched_rec
->
committed_size
()
==
0
,
"Sanity check"
);
vm_snapshot_itr
.
remove
();
}
else
{
// partial release
matched_rec
->
partial_release
(
new_vm_rec
.
base
(),
new_vm_rec
.
size
());
}
else
if
(
new_rec
->
is_uncommit_record
())
{
if
(
!
vm_snapshot_itr
.
remove_uncommitted_region
(
new_rec
))
{
return
false
;
}
}
else
{
// multiple reserve/commit on the same virtual memory range
assert
((
new_vm_rec
.
is_reserve_record
()
||
new_vm_rec
.
is_commit_record
())
&&
(
new_vm_rec
.
base
()
==
matched_rec
->
base
()
&&
new_vm_rec
.
size
()
==
matched_rec
->
size
()),
"Sanity check"
);
matched_rec
->
tag
(
new_vm_rec
.
flags
());
}
}
else
if
(
new_rec
->
is_commit_record
())
{
// insert or expand existing committed region to cover this
// newly committed region
if
(
!
vm_snapshot_itr
.
add_committed_region
(
new_rec
))
{
return
false
;
}
}
else
if
(
new_rec
->
is_deallocation_record
())
{
// release part or all memory region
if
(
!
vm_snapshot_itr
.
remove_released_region
(
new_rec
))
{
return
false
;
}
}
else
if
(
new_rec
->
is_type_tagging_record
())
{
// tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
// to different type.
assert
(
FLAGS_TO_MEMORY_TYPE
(
reserved_rec
->
flags
())
==
mtNone
||
FLAGS_TO_MEMORY_TYPE
(
reserved_rec
->
flags
())
==
FLAGS_TO_MEMORY_TYPE
(
new_rec
->
flags
()),
"Sanity check"
);
reserved_rec
->
tag
(
new_rec
->
flags
());
}
else
{
// no matched record
if
(
new_vm_rec
.
is_reserve_record
())
{
if
(
matched_rec
==
NULL
||
matched_rec
->
base
()
>
new_vm_rec
.
base
())
{
if
(
!
vm_snapshot_itr
.
insert
(
&
new_vm_rec
))
{
return
false
;
ShouldNotReachHere
();
}
}
else
{
if
(
!
vm_snapshot_itr
.
insert_after
(
&
new_vm_rec
))
{
/*
* The assertion failure indicates mis-matched virtual memory records. The likely
* scenario is, that some virtual memory operations are not going through os::xxxx_memory()
* api, which have to be tracked manually. (perfMemory is an example).
*/
assert
(
new_rec
->
is_allocation_record
(),
"Sanity check"
);
if
(
!
vm_snapshot_itr
.
add_reserved_region
(
new_rec
))
{
return
false
;
}
}
}
else
{
// throw out obsolete records, which are the commit/uncommit/release/tag records
// on memory regions that are already released.
}
}
new_rec
=
(
MemPointerRecord
*
)
itr
->
next
();
}
...
...
@@ -433,5 +622,33 @@ void MemSnapshot::check_staging_data() {
cur
=
(
MemPointerRecord
*
)
vm_itr
.
next
();
}
}
void
MemSnapshot
::
dump_all_vm_pointers
()
{
MemPointerArrayIteratorImpl
itr
(
_vm_ptrs
);
VMMemRegion
*
ptr
=
(
VMMemRegion
*
)
itr
.
current
();
tty
->
print_cr
(
"dump virtual memory pointers:"
);
while
(
ptr
!=
NULL
)
{
if
(
ptr
->
is_committed_region
())
{
tty
->
print
(
"
\t
"
);
}
tty
->
print
(
"["
PTR_FORMAT
" - "
PTR_FORMAT
"] [%x]"
,
ptr
->
addr
(),
(
ptr
->
addr
()
+
ptr
->
size
()),
ptr
->
flags
());
if
(
MemTracker
::
track_callsite
())
{
VMMemRegionEx
*
ex
=
(
VMMemRegionEx
*
)
ptr
;
if
(
ex
->
pc
()
!=
NULL
)
{
char
buf
[
1024
];
if
(
os
::
dll_address_to_function_name
(
ex
->
pc
(),
buf
,
sizeof
(
buf
),
NULL
))
{
tty
->
print_cr
(
"
\t
%s"
,
buf
);
}
else
{
tty
->
print_cr
(
""
);
}
}
}
ptr
=
(
VMMemRegion
*
)
itr
.
next
();
}
tty
->
flush
();
}
#endif // ASSERT
src/share/vm/services/memSnapshot.hpp
浏览文件 @
746de19d
...
...
@@ -111,33 +111,41 @@ class VMMemPointerIterator : public MemPointerIterator {
MemPointerIterator
(
arr
)
{
}
// locate an existing record that contains specified address, or
// the record, where the record with specified address, should
// be inserted.
// virtual memory record array is sorted in address order, so
// binary search is performed
// locate an existing reserved memory region that contains specified address,
// or the reserved region just above this address, where the incoming
// reserved region should be inserted.
virtual
MemPointer
*
locate
(
address
addr
)
{
int
index_low
=
0
;
int
index_high
=
_array
->
length
();
int
index_mid
=
(
index_high
+
index_low
)
/
2
;
int
r
=
1
;
while
(
index_low
<
index_high
&&
(
r
=
compare
(
index_mid
,
addr
))
!=
0
)
{
if
(
r
>
0
)
{
index_high
=
index_mid
;
}
else
{
index_low
=
index_mid
;
reset
();
VMMemRegion
*
reg
=
(
VMMemRegion
*
)
current
();
while
(
reg
!=
NULL
)
{
if
(
reg
->
is_reserved_region
())
{
if
(
reg
->
contains_address
(
addr
)
||
addr
<
reg
->
base
())
{
return
reg
;
}
index_mid
=
(
index_high
+
index_low
)
/
2
;
}
if
(
r
==
0
)
{
// update current location
_pos
=
index_mid
;
return
_array
->
at
(
index_mid
);
}
else
{
reg
=
(
VMMemRegion
*
)
next
();
}
return
NULL
;
}
}
// following methods update virtual memory in the context
// of 'current' position, which is properly positioned by
// callers via locate method.
bool
add_reserved_region
(
MemPointerRecord
*
rec
);
bool
add_committed_region
(
MemPointerRecord
*
rec
);
bool
remove_uncommitted_region
(
MemPointerRecord
*
rec
);
bool
remove_released_region
(
MemPointerRecord
*
rec
);
// split a reserved region to create a new memory region with specified base and size
bool
split_reserved_region
(
VMMemRegion
*
rgn
,
address
new_rgn_addr
,
size_t
new_rgn_size
);
private:
bool
insert_record
(
MemPointerRecord
*
rec
);
bool
insert_record_after
(
MemPointerRecord
*
rec
);
bool
insert_reserved_region
(
MemPointerRecord
*
rec
);
// reset current position
inline
void
reset
()
{
_pos
=
0
;
}
#ifdef ASSERT
virtual
bool
is_dup_pointer
(
const
MemPointer
*
ptr1
,
const
MemPointer
*
ptr2
)
const
{
...
...
@@ -154,32 +162,17 @@ class VMMemPointerIterator : public MemPointerIterator {
(
p1
->
flags
()
&
MemPointerRecord
::
tag_masks
)
==
MemPointerRecord
::
tag_release
;
}
#endif
// compare if an address falls into a memory region,
// return 0, if the address falls into a memory region at specified index
// return 1, if memory region pointed by specified index is higher than the address
// return -1, if memory region pointed by specified index is lower than the address
int
compare
(
int
index
,
address
addr
)
const
{
VMMemRegion
*
r
=
(
VMMemRegion
*
)
_array
->
at
(
index
);
assert
(
r
->
is_reserve_record
(),
"Sanity check"
);
if
(
r
->
addr
()
>
addr
)
{
return
1
;
}
else
if
(
r
->
addr
()
+
r
->
reserved_size
()
<=
addr
)
{
return
-
1
;
}
else
{
return
0
;
}
}
};
class
MallocRecordIterator
:
public
MemPointerArrayIterator
{
pr
ivate
:
pr
otected
:
MemPointerArrayIteratorImpl
_itr
;
public:
MallocRecordIterator
(
MemPointerArray
*
arr
)
:
_itr
(
arr
)
{
}
MemPointer
*
current
()
const
{
virtual
MemPointer
*
current
()
const
{
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
_itr
.
current
();
assert
(
cur
==
NULL
||
!
cur
->
is_vm_pointer
(),
"seek error"
);
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
...
...
@@ -194,7 +187,7 @@ class MallocRecordIterator : public MemPointerArrayIterator {
}
}
MemPointer
*
next
()
{
virtual
MemPointer
*
next
()
{
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
_itr
.
current
();
assert
(
cur
==
NULL
||
!
cur
->
is_vm_pointer
(),
"Sanity check"
);
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
_itr
.
next
();
...
...
@@ -214,6 +207,63 @@ class MallocRecordIterator : public MemPointerArrayIterator {
bool
insert_after
(
MemPointer
*
ptr
)
{
ShouldNotReachHere
();
return
false
;
}
};
// collapse duplicated records. Eliminating duplicated records here, is much
// cheaper than during promotion phase. However, it does have limitation - it
// can only eliminate duplicated records within the generation, there are
// still chances seeing duplicated records during promotion.
// We want to use the record with higher sequence number, because it has
// more accurate callsite pc.
class
VMRecordIterator
:
public
MallocRecordIterator
{
public:
VMRecordIterator
(
MemPointerArray
*
arr
)
:
MallocRecordIterator
(
arr
)
{
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
_itr
.
current
();
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
while
(
next
!=
NULL
)
{
assert
(
cur
!=
NULL
,
"Sanity check"
);
assert
(((
SeqMemPointerRecord
*
)
next
)
->
seq
()
>
((
SeqMemPointerRecord
*
)
cur
)
->
seq
(),
"pre-sort order"
);
if
(
is_duplicated_record
(
cur
,
next
))
{
_itr
.
next
();
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
}
else
{
break
;
}
}
}
virtual
MemPointer
*
current
()
const
{
return
_itr
.
current
();
}
// get next record, but skip the duplicated records
virtual
MemPointer
*
next
()
{
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
_itr
.
next
();
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
while
(
next
!=
NULL
)
{
assert
(
cur
!=
NULL
,
"Sanity check"
);
assert
(((
SeqMemPointerRecord
*
)
next
)
->
seq
()
>
((
SeqMemPointerRecord
*
)
cur
)
->
seq
(),
"pre-sort order"
);
if
(
is_duplicated_record
(
cur
,
next
))
{
_itr
.
next
();
cur
=
next
;
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
}
else
{
break
;
}
}
return
cur
;
}
private:
bool
is_duplicated_record
(
MemPointerRecord
*
p1
,
MemPointerRecord
*
p2
)
const
{
bool
ret
=
(
p1
->
addr
()
==
p2
->
addr
()
&&
p1
->
size
()
==
p2
->
size
()
&&
p1
->
flags
()
==
p2
->
flags
());
assert
(
!
(
ret
&&
FLAGS_TO_MEMORY_TYPE
(
p1
->
flags
())
==
mtThreadStack
),
"dup on stack record"
);
return
ret
;
}
};
class
StagingArea
:
public
_ValueObj
{
private:
MemPointerArray
*
_malloc_data
;
...
...
@@ -233,7 +283,8 @@ class StagingArea : public _ValueObj {
return
MallocRecordIterator
(
malloc_data
());
}
MemPointerArrayIteratorImpl
virtual_memory_record_walker
();
VMRecordIterator
virtual_memory_record_walker
();
bool
init
();
void
clear
()
{
assert
(
_malloc_data
!=
NULL
&&
_vm_data
!=
NULL
,
"Just check"
);
...
...
@@ -293,6 +344,8 @@ class MemSnapshot : public CHeapObj<mtNMT> {
NOT_PRODUCT
(
void
check_staging_data
();)
NOT_PRODUCT
(
void
check_malloc_pointers
();)
NOT_PRODUCT
(
bool
has_allocation_record
(
address
addr
);)
// dump all virtual memory pointers in snapshot
DEBUG_ONLY
(
void
dump_all_vm_pointers
();)
private:
// copy pointer data from src to dest
...
...
@@ -302,5 +355,4 @@ class MemSnapshot : public CHeapObj<mtNMT> {
bool
promote_virtual_memory_records
(
MemPointerArrayIterator
*
itr
);
};
#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
src/share/vm/services/memTracker.cpp
浏览文件 @
746de19d
...
...
@@ -364,7 +364,7 @@ void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
if
(
thread
!=
NULL
)
{
if
(
thread
->
is_Java_thread
()
&&
((
JavaThread
*
)
thread
)
->
is_safepoint_visible
())
{
JavaThread
*
java_thread
=
static_cast
<
JavaThread
*>
(
thread
)
;
JavaThread
*
java_thread
=
(
JavaThread
*
)
thread
;
JavaThreadState
state
=
java_thread
->
thread_state
();
if
(
SafepointSynchronize
::
safepoint_safe
(
java_thread
,
state
))
{
// JavaThreads that are safepoint safe, can run through safepoint,
...
...
@@ -472,6 +472,8 @@ void MemTracker::sync() {
// it should guarantee that NMT is fully sync-ed.
ThreadCritical
tc
;
SequenceGenerator
::
reset
();
// walk all JavaThreads to collect recorders
SyncThreadRecorderClosure
stc
;
Threads
::
threads_do
(
&
stc
);
...
...
@@ -484,11 +486,12 @@ void MemTracker::sync() {
pending_recorders
=
_global_recorder
;
_global_recorder
=
NULL
;
}
SequenceGenerator
::
reset
();
// check _worker_thread with lock to avoid racing condition
if
(
_worker_thread
!=
NULL
)
{
_worker_thread
->
at_sync_point
(
pending_recorders
);
}
assert
(
SequenceGenerator
::
peek
()
==
1
,
"Should not have memory activities during sync-point"
);
}
}
...
...
src/share/vm/services/memTracker.hpp
浏览文件 @
746de19d
...
...
@@ -113,8 +113,10 @@ class MemTracker : AllStatic {
#include "thread_solaris.inline.hpp"
#endif
#ifdef _DEBUG
#define DEBUG_CALLER_PC os::get_caller_pc(3)
extern
bool
NMT_track_callsite
;
#ifdef ASSERT
#define DEBUG_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
#else
#define DEBUG_CALLER_PC 0
#endif
...
...
@@ -261,7 +263,7 @@ class MemTracker : AllStatic {
// record a 'malloc' call
static
inline
void
record_malloc
(
address
addr
,
size_t
size
,
MEMFLAGS
flags
,
address
pc
=
0
,
Thread
*
thread
=
NULL
)
{
if
(
NMT_CAN_TRACK
(
flags
))
{
if
(
is_on
()
&&
NMT_CAN_TRACK
(
flags
))
{
assert
(
size
>
0
,
"Sanity check"
);
create_memory_record
(
addr
,
(
flags
|
MemPointerRecord
::
malloc_tag
()),
size
,
pc
,
thread
);
}
...
...
@@ -275,7 +277,7 @@ class MemTracker : AllStatic {
// record a 'realloc' call
static
inline
void
record_realloc
(
address
old_addr
,
address
new_addr
,
size_t
size
,
MEMFLAGS
flags
,
address
pc
=
0
,
Thread
*
thread
=
NULL
)
{
if
(
is_on
())
{
if
(
is_on
()
&&
NMT_CAN_TRACK
(
flags
)
)
{
assert
(
size
>
0
,
"Sanity check"
);
record_free
(
old_addr
,
flags
,
thread
);
record_malloc
(
new_addr
,
size
,
flags
,
pc
,
thread
);
...
...
@@ -317,6 +319,7 @@ class MemTracker : AllStatic {
static
inline
void
release_thread_stack
(
address
addr
,
size_t
size
,
Thread
*
thr
)
{
if
(
is_on
())
{
assert
(
size
>
0
&&
thr
!=
NULL
,
"Sanity check"
);
assert
(
!
thr
->
is_Java_thread
(),
"too early"
);
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_uncommit_tag
()
|
mtThreadStack
,
size
,
DEBUG_CALLER_PC
,
thr
);
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_release_tag
()
|
mtThreadStack
,
...
...
@@ -326,11 +329,11 @@ class MemTracker : AllStatic {
// record a virtual memory 'commit' call
static
inline
void
record_virtual_memory_commit
(
address
addr
,
size_t
size
,
address
pc
=
0
,
Thread
*
thread
=
NULL
)
{
address
pc
,
Thread
*
thread
=
NULL
)
{
if
(
is_on
())
{
assert
(
size
>
0
,
"Sanity check"
);
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_commit_tag
(),
size
,
DEBUG_CALLER_PC
,
thread
);
size
,
pc
,
thread
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录