Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
eedeebde
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
eedeebde
编写于
9月 11, 2012
作者:
Z
zgu
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
c9bb8cb1
93294c84
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
379 addition
and
366 deletion
+379
-366
src/share/vm/runtime/thread.cpp
src/share/vm/runtime/thread.cpp
+5
-6
src/share/vm/services/memPtr.cpp
src/share/vm/services/memPtr.cpp
+5
-5
src/share/vm/services/memPtrArray.hpp
src/share/vm/services/memPtrArray.hpp
+0
-4
src/share/vm/services/memSnapshot.cpp
src/share/vm/services/memSnapshot.cpp
+244
-270
src/share/vm/services/memSnapshot.hpp
src/share/vm/services/memSnapshot.hpp
+95
-75
src/share/vm/services/memTrackWorker.cpp
src/share/vm/services/memTrackWorker.cpp
+4
-1
src/share/vm/services/memTracker.hpp
src/share/vm/services/memTracker.hpp
+26
-5
未找到文件。
src/share/vm/runtime/thread.cpp
浏览文件 @
eedeebde
...
...
@@ -318,10 +318,9 @@ void Thread::record_stack_base_and_size() {
set_stack_size
(
os
::
current_stack_size
());
// record thread's native stack, stack grows downward
address
vm_base
=
_stack_base
-
_stack_size
;
MemTracker
::
record_virtual_memory_reserve
(
vm_base
,
_stack_size
,
CURRENT_PC
,
this
);
MemTracker
::
record_virtual_memory_type
(
vm_base
,
mtThreadStack
);
address
low_stack_addr
=
stack_base
()
-
stack_size
();
MemTracker
::
record_thread_stack
(
low_stack_addr
,
stack_size
(),
this
,
CURRENT_PC
);
}
...
...
@@ -329,8 +328,8 @@ Thread::~Thread() {
// Reclaim the objectmonitors from the omFreeList of the moribund thread.
ObjectSynchronizer
::
omFlush
(
this
)
;
MemTracker
::
record_virtual_memory_release
((
_stack_base
-
_stack_size
),
_stack_size
,
this
);
address
low_stack_addr
=
stack_base
()
-
stack_size
();
MemTracker
::
release_thread_stack
(
low_stack_addr
,
stack_size
()
,
this
);
// deallocate data structures
delete
resource_area
();
...
...
src/share/vm/services/memPtr.cpp
浏览文件 @
eedeebde
...
...
@@ -43,9 +43,9 @@ jint SequenceGenerator::next() {
bool
VMMemRegion
::
contains
(
const
VMMemRegion
*
mr
)
const
{
assert
(
base
()
!=
0
,
"
no base address
"
);
assert
(
base
()
!=
0
,
"
Sanity check
"
);
assert
(
size
()
!=
0
||
committed_size
()
!=
0
,
"
no range
"
);
"
Sanity check
"
);
address
base_addr
=
base
();
address
end_addr
=
base_addr
+
(
is_reserve_record
()
?
reserved_size
()
:
committed_size
());
...
...
@@ -61,14 +61,14 @@ bool VMMemRegion::contains(const VMMemRegion* mr) const {
return
(
mr
->
base
()
>=
base_addr
&&
(
mr
->
base
()
+
mr
->
committed_size
())
<=
end_addr
);
}
else
if
(
mr
->
is_type_tagging_record
())
{
assert
(
mr
->
base
()
!=
0
,
"no base
"
);
return
mr
->
base
()
==
base_addr
;
assert
(
mr
->
base
()
!=
NULL
,
"Sanity check
"
);
return
(
mr
->
base
()
>=
base_addr
&&
mr
->
base
()
<
end_addr
)
;
}
else
if
(
mr
->
is_release_record
())
{
assert
(
mr
->
base
()
!=
0
&&
mr
->
size
()
>
0
,
"bad record"
);
return
(
mr
->
base
()
==
base_addr
&&
mr
->
size
()
==
size
());
}
else
{
assert
(
false
,
"what happened?"
);
ShouldNotReachHere
(
);
return
false
;
}
}
src/share/vm/services/memPtrArray.hpp
浏览文件 @
eedeebde
...
...
@@ -84,11 +84,7 @@ class MemPointerArrayIterator VALUE_OBJ_CLASS_SPEC {
// implementation class
class
MemPointerArrayIteratorImpl
:
public
MemPointerArrayIterator
{
#ifdef ASSERT
protected:
#else
private:
#endif
MemPointerArray
*
_array
;
int
_pos
;
...
...
src/share/vm/services/memSnapshot.cpp
浏览文件 @
eedeebde
...
...
@@ -31,148 +31,54 @@
#include "services/memSnapshot.hpp"
#include "services/memTracker.hpp"
static
int
sort_in_seq_order
(
const
void
*
p1
,
const
void
*
p2
)
{
assert
(
p1
!=
NULL
&&
p2
!=
NULL
,
"Sanity check"
);
const
MemPointerRecord
*
mp1
=
(
MemPointerRecord
*
)
p1
;
const
MemPointerRecord
*
mp2
=
(
MemPointerRecord
*
)
p2
;
return
(
mp1
->
seq
()
-
mp2
->
seq
());
}
// stagging data groups the data of a VM memory range, so we can consolidate
// them into one record during the walk
bool
StagingWalker
::
consolidate_vm_records
(
VMMemRegionEx
*
vm_rec
)
{
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
_itr
.
current
();
assert
(
cur
!=
NULL
&&
cur
->
is_vm_pointer
(),
"not a virtual memory pointer"
);
jint
cur_seq
;
jint
next_seq
;
bool
trackCallsite
=
MemTracker
::
track_callsite
();
if
(
trackCallsite
)
{
vm_rec
->
init
((
MemPointerRecordEx
*
)
cur
);
cur_seq
=
((
SeqMemPointerRecordEx
*
)
cur
)
->
seq
();
bool
StagingArea
::
init
()
{
if
(
MemTracker
::
track_callsite
())
{
_malloc_data
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
SeqMemPointerRecordEx
>
();
_vm_data
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
SeqMemPointerRecordEx
>
();
}
else
{
vm_rec
->
init
((
MemPointerRecord
*
)
cur
);
cur_seq
=
((
SeqMemPointerRecord
*
)
cur
)
->
seq
();
_malloc_data
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
SeqMemPointerRecord
>
(
);
_vm_data
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
SeqMemPointerRecord
>
();
}
// only can consolidate when we have allocation record,
// which contains virtual memory range
if
(
!
cur
->
is_allocation_record
())
{
_itr
.
next
();
if
(
_malloc_data
!=
NULL
&&
_vm_data
!=
NULL
&&
!
_malloc_data
->
out_of_memory
()
&&
!
_vm_data
->
out_of_memory
())
{
return
true
;
}
// allocation range
address
base
=
cur
->
addr
();
address
end
=
base
+
cur
->
size
();
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
// if the memory range is alive
bool
live_vm_rec
=
true
;
while
(
next
!=
NULL
&&
next
->
is_vm_pointer
())
{
if
(
next
->
is_allocation_record
())
{
assert
(
next
->
addr
()
>=
base
,
"sorting order or overlapping"
);
break
;
}
if
(
trackCallsite
)
{
next_seq
=
((
SeqMemPointerRecordEx
*
)
next
)
->
seq
();
}
else
{
next_seq
=
((
SeqMemPointerRecord
*
)
next
)
->
seq
();
}
if
(
next_seq
<
cur_seq
)
{
_itr
.
next
();
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
continue
;
}
if
(
next
->
is_deallocation_record
())
{
if
(
next
->
addr
()
==
base
&&
next
->
size
()
==
cur
->
size
())
{
// the virtual memory range has been released
_itr
.
next
();
live_vm_rec
=
false
;
break
;
}
else
if
(
next
->
addr
()
<
end
)
{
// partial release
vm_rec
->
partial_release
(
next
->
addr
(),
next
->
size
());
_itr
.
next
();
}
else
{
break
;
}
}
else
if
(
next
->
is_commit_record
())
{
if
(
next
->
addr
()
>=
base
&&
next
->
addr
()
+
next
->
size
()
<=
end
)
{
vm_rec
->
commit
(
next
->
size
());
_itr
.
next
();
}
else
{
assert
(
next
->
addr
()
>=
base
,
"sorting order or overlapping"
);
break
;
}
}
else
if
(
next
->
is_uncommit_record
())
{
if
(
next
->
addr
()
>=
base
&&
next
->
addr
()
+
next
->
size
()
<=
end
)
{
vm_rec
->
uncommit
(
next
->
size
());
_itr
.
next
();
}
else
{
assert
(
next
->
addr
()
>=
end
,
"sorting order or overlapping"
);
break
;
}
}
else
if
(
next
->
is_type_tagging_record
())
{
if
(
next
->
addr
()
>=
base
&&
next
->
addr
()
<
end
)
{
vm_rec
->
tag
(
next
->
flags
());
_itr
.
next
();
}
else
{
break
;
}
}
else
{
assert
(
false
,
"unknown record type"
);
}
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
if
(
_malloc_data
!=
NULL
)
delete
_malloc_data
;
if
(
_vm_data
!=
NULL
)
delete
_vm_data
;
_malloc_data
=
NULL
;
_vm_data
=
NULL
;
return
false
;
}
_itr
.
next
();
return
live_vm_rec
;
}
MemPointer
*
StagingWalker
::
next
()
{
MemPointerRecord
*
cur_p
=
(
MemPointerRecord
*
)
_itr
.
current
();
if
(
cur_p
==
NULL
)
{
_end_of_array
=
true
;
return
NULL
;
}
MemPointerRecord
*
next_p
;
if
(
cur_p
->
is_vm_pointer
())
{
_is_vm_record
=
true
;
if
(
!
consolidate_vm_records
(
&
_vm_record
))
{
return
next
();
}
}
else
{
// malloc-ed pointer
_is_vm_record
=
false
;
next_p
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
if
(
next_p
!=
NULL
&&
next_p
->
addr
()
==
cur_p
->
addr
())
{
assert
(
cur_p
->
is_allocation_record
(),
"sorting order"
);
assert
(
!
next_p
->
is_allocation_record
(),
"sorting order"
);
_itr
.
next
();
if
(
cur_p
->
seq
()
<
next_p
->
seq
())
{
cur_p
=
next_p
;
}
}
if
(
MemTracker
::
track_callsite
())
{
_malloc_record
.
init
((
MemPointerRecordEx
*
)
cur_p
);
}
else
{
_malloc_record
.
init
((
MemPointerRecord
*
)
cur_p
);
}
_itr
.
next
();
}
return
current
();
MemPointerArrayIteratorImpl
StagingArea
::
virtual_memory_record_walker
()
{
MemPointerArray
*
arr
=
vm_data
();
// sort into seq number order
arr
->
sort
((
FN_SORT
)
sort_in_seq_order
);
return
MemPointerArrayIteratorImpl
(
arr
);
}
MemSnapshot
::
MemSnapshot
()
{
if
(
MemTracker
::
track_callsite
())
{
_alloc_ptrs
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
MemPointerRecordEx
>
();
_vm_ptrs
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
VMMemRegionEx
>
(
64
,
true
);
_staging_area
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
SeqMemPointerRecordEx
>
();
}
else
{
_alloc_ptrs
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
MemPointerRecord
>
();
_vm_ptrs
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
VMMemRegion
>
(
64
,
true
);
_staging_area
=
new
(
std
::
nothrow
)
MemPointerArrayImpl
<
SeqMemPointerRecord
>
();
}
_staging_area
.
init
();
_lock
=
new
(
std
::
nothrow
)
Mutex
(
Monitor
::
max_nonleaf
-
1
,
"memSnapshotLock"
);
NOT_PRODUCT
(
_untracked_count
=
0
;)
}
...
...
@@ -181,11 +87,6 @@ MemSnapshot::~MemSnapshot() {
assert
(
MemTracker
::
shutdown_in_progress
(),
"native memory tracking still on"
);
{
MutexLockerEx
locker
(
_lock
);
if
(
_staging_area
!=
NULL
)
{
delete
_staging_area
;
_staging_area
=
NULL
;
}
if
(
_alloc_ptrs
!=
NULL
)
{
delete
_alloc_ptrs
;
_alloc_ptrs
=
NULL
;
...
...
@@ -221,33 +122,34 @@ void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* s
bool
MemSnapshot
::
merge
(
MemRecorder
*
rec
)
{
assert
(
rec
!=
NULL
&&
!
rec
->
out_of_memory
(),
"Just check"
);
// out of memory
if
(
_staging_area
==
NULL
||
_staging_area
->
out_of_memory
())
{
return
false
;
}
SequencedRecordIterator
itr
(
rec
->
pointer_itr
());
MutexLockerEx
lock
(
_lock
,
true
);
MemPointerIterator
staging_itr
(
_staging_area
);
MemPointerIterator
malloc_staging_itr
(
_staging_area
.
malloc_data
()
);
MemPointerRecord
*
p1
,
*
p2
;
p1
=
(
MemPointerRecord
*
)
itr
.
current
();
while
(
p1
!=
NULL
)
{
p2
=
(
MemPointerRecord
*
)
staging_itr
.
locate
(
p1
->
addr
());
if
(
p1
->
is_vm_pointer
())
{
// we don't do anything with virtual memory records during merge
if
(
!
_staging_area
.
vm_data
()
->
append
(
p1
))
{
return
false
;
}
}
else
{
p2
=
(
MemPointerRecord
*
)
malloc_staging_itr
.
locate
(
p1
->
addr
());
// we have not seen this memory block, so just add to staging area
if
(
p2
==
NULL
)
{
if
(
!
staging_itr
.
insert
(
p1
))
{
if
(
!
malloc_
staging_itr
.
insert
(
p1
))
{
return
false
;
}
}
else
if
(
p1
->
addr
()
==
p2
->
addr
())
{
MemPointerRecord
*
staging_next
=
(
MemPointerRecord
*
)
staging_itr
.
peek_next
();
MemPointerRecord
*
staging_next
=
(
MemPointerRecord
*
)
malloc_
staging_itr
.
peek_next
();
// a memory block can have many tagging records, find right one to replace or
// right position to insert
while
(
staging_next
!=
NULL
&&
staging_next
->
addr
()
==
p1
->
addr
())
{
if
((
staging_next
->
flags
()
&
MemPointerRecord
::
tag_masks
)
<=
(
p1
->
flags
()
&
MemPointerRecord
::
tag_masks
))
{
p2
=
(
MemPointerRecord
*
)
staging_itr
.
next
();
staging_next
=
(
MemPointerRecord
*
)
staging_itr
.
peek_next
();
p2
=
(
MemPointerRecord
*
)
malloc_
staging_itr
.
next
();
staging_next
=
(
MemPointerRecord
*
)
malloc_
staging_itr
.
peek_next
();
}
else
{
break
;
}
...
...
@@ -261,23 +163,24 @@ bool MemSnapshot::merge(MemRecorder* rec) {
copy_pointer
(
p2
,
p1
);
}
}
else
if
(
df
<
0
)
{
if
(
!
staging_itr
.
insert
(
p1
))
{
if
(
!
malloc_
staging_itr
.
insert
(
p1
))
{
return
false
;
}
}
else
{
if
(
!
staging_itr
.
insert_after
(
p1
))
{
if
(
!
malloc_
staging_itr
.
insert_after
(
p1
))
{
return
false
;
}
}
}
else
if
(
p1
->
addr
()
<
p2
->
addr
())
{
if
(
!
staging_itr
.
insert
(
p1
))
{
if
(
!
malloc_
staging_itr
.
insert
(
p1
))
{
return
false
;
}
}
else
{
if
(
!
staging_itr
.
insert_after
(
p1
))
{
if
(
!
malloc_
staging_itr
.
insert_after
(
p1
))
{
return
false
;
}
}
}
p1
=
(
MemPointerRecord
*
)
itr
.
next
();
}
NOT_PRODUCT
(
void
check_staging_data
();)
...
...
@@ -287,123 +190,180 @@ bool MemSnapshot::merge(MemRecorder* rec) {
// promote data to next generation
void
MemSnapshot
::
promote
()
{
assert
(
_alloc_ptrs
!=
NULL
&&
_staging_area
!=
NULL
&&
_vm_ptrs
!=
NULL
,
bool
MemSnapshot
::
promote
()
{
assert
(
_alloc_ptrs
!=
NULL
&&
_vm_ptrs
!=
NULL
,
"Just check"
);
assert
(
_staging_area
.
malloc_data
()
!=
NULL
&&
_staging_area
.
vm_data
()
!=
NULL
,
"Just check"
);
MutexLockerEx
lock
(
_lock
,
true
);
StagingWalker
walker
(
_staging_area
);
MemPointerIterator
malloc_itr
(
_alloc_ptrs
);
VMMemPointerIterator
vm_itr
(
_vm_ptrs
);
MemPointer
*
cur
=
walker
.
current
();
while
(
cur
!=
NULL
)
{
if
(
walker
.
is_vm_record
())
{
VMMemRegion
*
cur_vm
=
(
VMMemRegion
*
)
cur
;
VMMemRegion
*
p
=
(
VMMemRegion
*
)
vm_itr
.
locate
(
cur_vm
->
addr
());
cur_vm
=
(
VMMemRegion
*
)
cur
;
if
(
p
!=
NULL
&&
(
p
->
contains
(
cur_vm
)
||
p
->
base
()
==
cur_vm
->
base
()))
{
assert
(
p
->
is_reserve_record
()
||
p
->
is_commit_record
(),
"wrong vm record type"
);
// resize existing reserved range
if
(
cur_vm
->
is_reserve_record
()
&&
p
->
base
()
==
cur_vm
->
base
())
{
assert
(
cur_vm
->
size
()
>=
p
->
committed_size
(),
"incorrect resizing"
);
p
->
set_reserved_size
(
cur_vm
->
size
());
}
else
if
(
cur_vm
->
is_commit_record
())
{
p
->
commit
(
cur_vm
->
committed_size
());
}
else
if
(
cur_vm
->
is_uncommit_record
())
{
p
->
uncommit
(
cur_vm
->
committed_size
());
if
(
!
p
->
is_reserve_record
()
&&
p
->
committed_size
()
==
0
)
{
vm_itr
.
remove
();
}
}
else
if
(
cur_vm
->
is_type_tagging_record
())
{
p
->
tag
(
cur_vm
->
flags
());
}
else
if
(
cur_vm
->
is_release_record
())
{
if
(
cur_vm
->
base
()
==
p
->
base
()
&&
cur_vm
->
size
()
==
p
->
size
())
{
// release the whole range
vm_itr
.
remove
();
}
else
{
// partial release
p
->
partial_release
(
cur_vm
->
base
(),
cur_vm
->
size
());
MallocRecordIterator
malloc_itr
=
_staging_area
.
malloc_record_walker
();
bool
promoted
=
false
;
if
(
promote_malloc_records
(
&
malloc_itr
))
{
MemPointerArrayIteratorImpl
vm_itr
=
_staging_area
.
virtual_memory_record_walker
();
if
(
promote_virtual_memory_records
(
&
vm_itr
))
{
promoted
=
true
;
}
}
else
{
// we do see multiple reserver on the same vm range
assert
((
cur_vm
->
is_commit_record
()
||
cur_vm
->
is_reserve_record
())
&&
cur_vm
->
base
()
==
p
->
base
()
&&
cur_vm
->
size
()
==
p
->
size
(),
"bad record"
);
p
->
tag
(
cur_vm
->
flags
());
}
NOT_PRODUCT
(
check_malloc_pointers
();)
_staging_area
.
clear
();
return
promoted
;
}
bool
MemSnapshot
::
promote_malloc_records
(
MemPointerArrayIterator
*
itr
)
{
MemPointerIterator
malloc_snapshot_itr
(
_alloc_ptrs
);
MemPointerRecord
*
new_rec
=
(
MemPointerRecord
*
)
itr
->
current
();
MemPointerRecord
*
matched_rec
;
while
(
new_rec
!=
NULL
)
{
matched_rec
=
(
MemPointerRecord
*
)
malloc_snapshot_itr
.
locate
(
new_rec
->
addr
());
// found matched memory block
if
(
matched_rec
!=
NULL
&&
new_rec
->
addr
()
==
matched_rec
->
addr
())
{
// snapshot already contains 'lived' records
assert
(
matched_rec
->
is_allocation_record
()
||
matched_rec
->
is_arena_size_record
(),
"Sanity check"
);
// update block states
if
(
new_rec
->
is_allocation_record
()
||
new_rec
->
is_arena_size_record
())
{
copy_pointer
(
matched_rec
,
new_rec
);
}
else
{
if
(
cur_vm
->
is_reserve_record
())
{
if
(
p
==
NULL
||
p
->
base
()
>
cur_vm
->
base
())
{
vm_itr
.
insert
(
cur_vm
);
}
else
{
vm_itr
.
insert_after
(
cur_vm
);
// a deallocation record
assert
(
new_rec
->
is_deallocation_record
(),
"Sanity check"
);
// an arena record can be followed by a size record, we need to remove both
if
(
matched_rec
->
is_arena_record
())
{
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
malloc_snapshot_itr
.
peek_next
();
if
(
next
->
is_arena_size_record
())
{
// it has to match the arena record
assert
(
next
->
is_size_record_of_arena
(
matched_rec
),
"Sanity check"
);
malloc_snapshot_itr
.
remove
();
}
}
else
{
// In theory, we should assert without conditions. However, in case of native
// thread stack, NMT explicitly releases the thread stack in Thread's destructor,
// due to platform dependent behaviors. On some platforms, we see uncommit/release
// native thread stack, but some, we don't.
assert
(
cur_vm
->
is_uncommit_record
()
||
cur_vm
->
is_deallocation_record
(),
err_msg
(
"Should not reach here, pointer addr = ["
INTPTR_FORMAT
"], flags = [%x]"
,
cur_vm
->
addr
(),
cur_vm
->
flags
()));
}
// the memory is deallocated, remove related record(s)
malloc_snapshot_itr
.
remove
();
}
}
else
{
MemPointerRecord
*
cur_p
=
(
MemPointerRecord
*
)
cur
;
MemPointerRecord
*
p
=
(
MemPointerRecord
*
)
malloc_itr
.
locate
(
cur
->
addr
());
if
(
p
!=
NULL
&&
cur_p
->
addr
()
==
p
->
addr
())
{
assert
(
p
->
is_allocation_record
()
||
p
->
is_arena_size_record
(),
"untracked"
);
if
(
cur_p
->
is_allocation_record
()
||
cur_p
->
is_arena_size_record
())
{
copy_pointer
(
p
,
cur_p
);
}
else
{
// deallocation record
assert
(
cur_p
->
is_deallocation_record
(),
"wrong record type"
);
// we are removing an arena record, we also need to remove its 'size'
// record behind it
if
(
p
->
is_arena_record
())
{
MemPointerRecord
*
next_p
=
(
MemPointerRecord
*
)
malloc_itr
.
peek_next
();
if
(
next_p
->
is_arena_size_record
())
{
assert
(
next_p
->
is_size_record_of_arena
(
p
),
"arena records dont match"
);
malloc_itr
.
remove
();
}
}
malloc_itr
.
remove
();
// it is a new record, insert into snapshot
if
(
new_rec
->
is_arena_size_record
())
{
MemPointerRecord
*
prev
=
(
MemPointerRecord
*
)
malloc_snapshot_itr
.
peek_prev
();
if
(
prev
==
NULL
||
!
prev
->
is_arena_record
()
||
!
new_rec
->
is_size_record_of_arena
(
prev
))
{
// no matched arena record, ignore the size record
new_rec
=
NULL
;
}
}
// only 'live' record can go into snapshot
if
(
new_rec
!=
NULL
)
{
if
(
new_rec
->
is_allocation_record
()
||
new_rec
->
is_arena_size_record
())
{
if
(
matched_rec
!=
NULL
&&
new_rec
->
addr
()
>
matched_rec
->
addr
())
{
if
(
!
malloc_snapshot_itr
.
insert_after
(
new_rec
))
{
return
false
;
}
}
else
{
if
(
cur_p
->
is_arena_size_record
())
{
MemPointerRecord
*
prev_p
=
(
MemPointerRecord
*
)
malloc_itr
.
peek_prev
();
if
(
prev_p
!=
NULL
&&
(
!
prev_p
->
is_arena_record
()
||
!
cur_p
->
is_size_record_of_arena
(
prev_p
)))
{
// arena already deallocated
cur_p
=
NULL
;
}
}
if
(
cur_p
!=
NULL
)
{
if
(
cur_p
->
is_allocation_record
()
||
cur_p
->
is_arena_size_record
())
{
if
(
p
!=
NULL
&&
cur_p
->
addr
()
>
p
->
addr
())
{
malloc_itr
.
insert_after
(
cur
);
}
else
{
malloc_itr
.
insert
(
cur
);
if
(
!
malloc_snapshot_itr
.
insert
(
new_rec
))
{
return
false
;
}
}
}
#ifndef PRODUCT
else
if
(
!
has_allocation_record
(
cur_p
->
addr
())){
// NMT can not track some startup memory, which allocated before NMT
// is enabled
else
if
(
!
has_allocation_record
(
new_rec
->
addr
()))
{
// NMT can not track some startup memory, which is allocated before NMT is on
_untracked_count
++
;
}
#endif
}
}
new_rec
=
(
MemPointerRecord
*
)
itr
->
next
();
}
return
true
;
}
cur
=
walker
.
next
();
bool
MemSnapshot
::
promote_virtual_memory_records
(
MemPointerArrayIterator
*
itr
)
{
VMMemPointerIterator
vm_snapshot_itr
(
_vm_ptrs
);
MemPointerRecord
*
new_rec
=
(
MemPointerRecord
*
)
itr
->
current
();
VMMemRegionEx
new_vm_rec
;
VMMemRegion
*
matched_rec
;
while
(
new_rec
!=
NULL
)
{
assert
(
new_rec
->
is_vm_pointer
(),
"Sanity check"
);
if
(
MemTracker
::
track_callsite
())
{
new_vm_rec
.
init
((
MemPointerRecordEx
*
)
new_rec
);
}
else
{
new_vm_rec
.
init
(
new_rec
);
}
matched_rec
=
(
VMMemRegion
*
)
vm_snapshot_itr
.
locate
(
new_rec
->
addr
());
if
(
matched_rec
!=
NULL
&&
(
matched_rec
->
contains
(
&
new_vm_rec
)
||
matched_rec
->
base
()
==
new_vm_rec
.
base
()))
{
// snapshot can only have 'live' records
assert
(
matched_rec
->
is_reserve_record
(),
"Sanity check"
);
if
(
new_vm_rec
.
is_reserve_record
()
&&
matched_rec
->
base
()
==
new_vm_rec
.
base
())
{
// resize reserved virtual memory range
// resize has to cover committed area
assert
(
new_vm_rec
.
size
()
>=
matched_rec
->
committed_size
(),
"Sanity check"
);
matched_rec
->
set_reserved_size
(
new_vm_rec
.
size
());
}
else
if
(
new_vm_rec
.
is_commit_record
())
{
// commit memory inside reserved memory range
assert
(
new_vm_rec
.
committed_size
()
<=
matched_rec
->
reserved_size
(),
"Sanity check"
);
// thread stacks are marked committed, so we ignore 'commit' record for creating
// stack guard pages
if
(
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
!=
mtThreadStack
)
{
matched_rec
->
commit
(
new_vm_rec
.
committed_size
());
}
}
else
if
(
new_vm_rec
.
is_uncommit_record
())
{
if
(
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
==
mtThreadStack
)
{
// ignore 'uncommit' record from removing stack guard pages, uncommit
// thread stack as whole
if
(
matched_rec
->
committed_size
()
==
new_vm_rec
.
committed_size
())
{
matched_rec
->
uncommit
(
new_vm_rec
.
committed_size
());
}
NOT_PRODUCT
(
check_malloc_pointers
();)
_staging_area
->
shrink
();
_staging_area
->
clear
();
}
else
{
// uncommit memory inside reserved memory range
assert
(
new_vm_rec
.
committed_size
()
<=
matched_rec
->
committed_size
(),
"Sanity check"
);
matched_rec
->
uncommit
(
new_vm_rec
.
committed_size
());
}
}
else
if
(
new_vm_rec
.
is_type_tagging_record
())
{
// tag this virtual memory range to a memory type
// can not re-tag a memory range to different type
assert
(
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
==
mtNone
||
FLAGS_TO_MEMORY_TYPE
(
matched_rec
->
flags
())
==
FLAGS_TO_MEMORY_TYPE
(
new_vm_rec
.
flags
()),
"Sanity check"
);
matched_rec
->
tag
(
new_vm_rec
.
flags
());
}
else
if
(
new_vm_rec
.
is_release_record
())
{
// release part or whole memory range
if
(
new_vm_rec
.
base
()
==
matched_rec
->
base
()
&&
new_vm_rec
.
size
()
==
matched_rec
->
size
())
{
// release whole virtual memory range
assert
(
matched_rec
->
committed_size
()
==
0
,
"Sanity check"
);
vm_snapshot_itr
.
remove
();
}
else
{
// partial release
matched_rec
->
partial_release
(
new_vm_rec
.
base
(),
new_vm_rec
.
size
());
}
}
else
{
// multiple reserve/commit on the same virtual memory range
assert
((
new_vm_rec
.
is_reserve_record
()
||
new_vm_rec
.
is_commit_record
())
&&
(
new_vm_rec
.
base
()
==
matched_rec
->
base
()
&&
new_vm_rec
.
size
()
==
matched_rec
->
size
()),
"Sanity check"
);
matched_rec
->
tag
(
new_vm_rec
.
flags
());
}
}
else
{
// no matched record
if
(
new_vm_rec
.
is_reserve_record
())
{
if
(
matched_rec
==
NULL
||
matched_rec
->
base
()
>
new_vm_rec
.
base
())
{
if
(
!
vm_snapshot_itr
.
insert
(
&
new_vm_rec
))
{
return
false
;
}
}
else
{
if
(
!
vm_snapshot_itr
.
insert_after
(
&
new_vm_rec
))
{
return
false
;
}
}
}
else
{
// throw out obsolete records, which are the commit/uncommit/release/tag records
// on memory regions that are already released.
}
}
new_rec
=
(
MemPointerRecord
*
)
itr
->
next
();
}
return
true
;
}
#ifndef PRODUCT
void
MemSnapshot
::
print_snapshot_stats
(
outputStream
*
st
)
{
st
->
print_cr
(
"Snapshot:"
);
...
...
@@ -413,8 +373,15 @@ void MemSnapshot::print_snapshot_stats(outputStream* st) {
st
->
print_cr
(
"
\t
VM: %d/%d [%5.2f%%] %dKB"
,
_vm_ptrs
->
length
(),
_vm_ptrs
->
capacity
(),
(
100.0
*
(
float
)
_vm_ptrs
->
length
())
/
(
float
)
_vm_ptrs
->
capacity
(),
_vm_ptrs
->
instance_size
()
/
K
);
st
->
print_cr
(
"
\t
Staging: %d/%d [%5.2f%%] %dKB"
,
_staging_area
->
length
(),
_staging_area
->
capacity
(),
(
100.0
*
(
float
)
_staging_area
->
length
())
/
(
float
)
_staging_area
->
capacity
(),
_staging_area
->
instance_size
()
/
K
);
st
->
print_cr
(
"
\t
Malloc staging Area: %d/%d [%5.2f%%] %dKB"
,
_staging_area
.
malloc_data
()
->
length
(),
_staging_area
.
malloc_data
()
->
capacity
(),
(
100.0
*
(
float
)
_staging_area
.
malloc_data
()
->
length
())
/
(
float
)
_staging_area
.
malloc_data
()
->
capacity
(),
_staging_area
.
malloc_data
()
->
instance_size
()
/
K
);
st
->
print_cr
(
"
\t
Virtual memory staging Area: %d/%d [%5.2f%%] %dKB"
,
_staging_area
.
vm_data
()
->
length
(),
_staging_area
.
vm_data
()
->
capacity
(),
(
100.0
*
(
float
)
_staging_area
.
vm_data
()
->
length
())
/
(
float
)
_staging_area
.
vm_data
()
->
capacity
(),
_staging_area
.
vm_data
()
->
instance_size
()
/
K
);
st
->
print_cr
(
"
\t
Untracked allocation: %d"
,
_untracked_count
);
}
...
...
@@ -433,7 +400,7 @@ void MemSnapshot::check_malloc_pointers() {
}
bool
MemSnapshot
::
has_allocation_record
(
address
addr
)
{
MemPointerArrayIteratorImpl
itr
(
_staging_area
);
MemPointerArrayIteratorImpl
itr
(
_staging_area
.
malloc_data
()
);
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
itr
.
current
();
while
(
cur
!=
NULL
)
{
if
(
cur
->
addr
()
==
addr
&&
cur
->
is_allocation_record
())
{
...
...
@@ -447,7 +414,7 @@ bool MemSnapshot::has_allocation_record(address addr) {
#ifdef ASSERT
void
MemSnapshot
::
check_staging_data
()
{
MemPointerArrayIteratorImpl
itr
(
_staging_area
);
MemPointerArrayIteratorImpl
itr
(
_staging_area
.
malloc_data
()
);
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
itr
.
current
();
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
itr
.
next
();
while
(
next
!=
NULL
)
{
...
...
@@ -458,6 +425,13 @@ void MemSnapshot::check_staging_data() {
cur
=
next
;
next
=
(
MemPointerRecord
*
)
itr
.
next
();
}
MemPointerArrayIteratorImpl
vm_itr
(
_staging_area
.
vm_data
());
cur
=
(
MemPointerRecord
*
)
vm_itr
.
current
();
while
(
cur
!=
NULL
)
{
assert
(
cur
->
is_vm_pointer
(),
"virtual memory pointer only"
);
cur
=
(
MemPointerRecord
*
)
vm_itr
.
next
();
}
}
#endif // ASSERT
src/share/vm/services/memSnapshot.hpp
浏览文件 @
eedeebde
...
...
@@ -111,38 +111,32 @@ class VMMemPointerIterator : public MemPointerIterator {
MemPointerIterator
(
arr
)
{
}
// locate an exiting record that contains specified address, or
// locate an exi
s
ting record that contains specified address, or
// the record, where the record with specified address, should
// be inserted
// be inserted.
// virtual memory record array is sorted in address order, so
// binary search is performed
virtual
MemPointer
*
locate
(
address
addr
)
{
VMMemRegion
*
cur
=
(
VMMemRegion
*
)
current
();
VMMemRegion
*
next_p
;
while
(
cur
!=
NULL
)
{
if
(
cur
->
base
()
>
addr
)
{
return
cur
;
int
index_low
=
0
;
int
index_high
=
_array
->
length
();
int
index_mid
=
(
index_high
+
index_low
)
/
2
;
int
r
=
1
;
while
(
index_low
<
index_high
&&
(
r
=
compare
(
index_mid
,
addr
))
!=
0
)
{
if
(
r
>
0
)
{
index_high
=
index_mid
;
}
else
{
// find nearest existing range that has base address <= addr
next_p
=
(
VMMemRegion
*
)
peek_next
();
if
(
next_p
!=
NULL
&&
next_p
->
base
()
<=
addr
)
{
cur
=
(
VMMemRegion
*
)
next
();
continue
;
}
}
if
(
cur
->
is_reserve_record
()
&&
cur
->
base
()
<=
addr
&&
(
cur
->
base
()
+
cur
->
size
()
>
addr
))
{
return
cur
;
}
else
if
(
cur
->
is_commit_record
()
&&
cur
->
base
()
<=
addr
&&
(
cur
->
base
()
+
cur
->
committed_size
()
>
addr
))
{
return
cur
;
index_low
=
index_mid
;
}
cur
=
(
VMMemRegion
*
)
next
()
;
index_mid
=
(
index_high
+
index_low
)
/
2
;
}
if
(
r
==
0
)
{
// update current location
_pos
=
index_mid
;
return
_array
->
at
(
index_mid
);
}
else
{
return
NULL
;
}
}
#ifdef ASSERT
virtual
bool
is_dup_pointer
(
const
MemPointer
*
ptr1
,
...
...
@@ -160,75 +154,99 @@ class VMMemPointerIterator : public MemPointerIterator {
(
p1
->
flags
()
&
MemPointerRecord
::
tag_masks
)
==
MemPointerRecord
::
tag_release
;
}
#endif
// compare if an address falls into a memory region,
// return 0, if the address falls into a memory region at specified index
// return 1, if memory region pointed by specified index is higher than the address
// return -1, if memory region pointed by specified index is lower than the address
int
compare
(
int
index
,
address
addr
)
const
{
VMMemRegion
*
r
=
(
VMMemRegion
*
)
_array
->
at
(
index
);
assert
(
r
->
is_reserve_record
(),
"Sanity check"
);
if
(
r
->
addr
()
>
addr
)
{
return
1
;
}
else
if
(
r
->
addr
()
+
r
->
reserved_size
()
<=
addr
)
{
return
-
1
;
}
else
{
return
0
;
}
}
};
class
StagingWalke
r
:
public
MemPointerArrayIterator
{
class
MallocRecordIterato
r
:
public
MemPointerArrayIterator
{
private:
MemPointerArrayIteratorImpl
_itr
;
bool
_is_vm_record
;
bool
_end_of_array
;
VMMemRegionEx
_vm_record
;
MemPointerRecordEx
_malloc_record
;
public:
StagingWalker
(
MemPointerArray
*
arr
)
:
_itr
(
arr
)
{
_end_of_array
=
false
;
next
();
MallocRecordIterator
(
MemPointerArray
*
arr
)
:
_itr
(
arr
)
{
}
// return the pointer at current position
MemPointer
*
current
()
const
{
if
(
_end_of_array
)
{
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
_itr
.
current
();
assert
(
cur
==
NULL
||
!
cur
->
is_vm_pointer
(),
"seek error"
);
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
_itr
.
peek_next
();
if
(
next
==
NULL
||
next
->
addr
()
!=
cur
->
addr
())
{
return
cur
;
}
else
{
assert
(
!
cur
->
is_vm_pointer
(),
"Sanity check"
);
assert
(
cur
->
is_allocation_record
()
&&
next
->
is_deallocation_record
(),
"sorting order"
);
assert
(
cur
->
seq
()
!=
next
->
seq
(),
"Sanity check"
);
return
cur
->
seq
()
>
next
->
seq
()
?
cur
:
next
;
}
}
MemPointer
*
next
()
{
MemPointerRecord
*
cur
=
(
MemPointerRecord
*
)
_itr
.
current
();
assert
(
cur
==
NULL
||
!
cur
->
is_vm_pointer
(),
"Sanity check"
);
MemPointerRecord
*
next
=
(
MemPointerRecord
*
)
_itr
.
next
();
if
(
next
==
NULL
)
{
return
NULL
;
}
if
(
is_vm_record
())
{
return
(
MemPointer
*
)
&
_vm_record
;
}
else
{
return
(
MemPointer
*
)
&
_malloc_record
;
if
(
cur
->
addr
()
==
next
->
addr
())
{
next
=
(
MemPointerRecord
*
)
_itr
.
next
();
}
return
current
();
}
// return the next pointer and advance current position
MemPointer
*
next
();
MemPointer
*
peek_next
()
const
{
ShouldNotReachHere
();
return
NULL
;
}
MemPointer
*
peek_prev
()
const
{
ShouldNotReachHere
();
return
NULL
;
}
void
remove
()
{
ShouldNotReachHere
();
}
bool
insert
(
MemPointer
*
ptr
)
{
ShouldNotReachHere
();
return
false
;
}
bool
insert_after
(
MemPointer
*
ptr
)
{
ShouldNotReachHere
();
return
false
;
}
};
// type of 'current' record
bool
is_vm_record
()
const
{
return
_is_vm_record
;
}
class
StagingArea
:
public
_ValueObj
{
private:
MemPointerArray
*
_malloc_data
;
MemPointerArray
*
_vm_data
;
// return the next poinger without advancing current position
MemPointer
*
peek_next
()
const
{
assert
(
false
,
"not supported"
);
return
NULL
;
public:
StagingArea
()
:
_malloc_data
(
NULL
),
_vm_data
(
NULL
)
{
init
();
}
MemPointer
*
peek_prev
()
const
{
assert
(
false
,
"not supported"
);
return
NULL
;
}
// remove the pointer at current position
void
remove
()
{
assert
(
false
,
"not supported"
);
~
StagingArea
()
{
if
(
_malloc_data
!=
NULL
)
delete
_malloc_data
;
if
(
_vm_data
!=
NULL
)
delete
_vm_data
;
}
// insert the pointer at current position
bool
insert
(
MemPointer
*
ptr
)
{
assert
(
false
,
"not supported"
);
return
false
;
MallocRecordIterator
malloc_record_walker
()
{
return
MallocRecordIterator
(
malloc_data
());
}
bool
insert_after
(
MemPointer
*
ptr
)
{
assert
(
false
,
"not supported"
);
return
false
;
MemPointerArrayIteratorImpl
virtual_memory_record_walker
();
bool
init
();
void
clear
()
{
assert
(
_malloc_data
!=
NULL
&&
_vm_data
!=
NULL
,
"Just check"
);
_malloc_data
->
shrink
();
_malloc_data
->
clear
();
_vm_data
->
clear
();
}
private:
// consolidate all records referring to this vm region
bool
consolidate_vm_records
(
VMMemRegionEx
*
vm_rec
);
inline
MemPointerArray
*
malloc_data
()
{
return
_malloc_data
;
}
inline
MemPointerArray
*
vm_data
()
{
return
_vm_data
;
}
};
class
MemBaseline
;
class
MemSnapshot
:
public
CHeapObj
<
mtNMT
>
{
private:
// the following two arrays contain records of all known lived memory blocks
...
...
@@ -237,9 +255,7 @@ class MemSnapshot : public CHeapObj<mtNMT> {
// live virtual memory pointers
MemPointerArray
*
_vm_ptrs
;
// stagging a generation's data, before
// it can be prompted to snapshot
MemPointerArray
*
_staging_area
;
StagingArea
_staging_area
;
// the lock to protect this snapshot
Monitor
*
_lock
;
...
...
@@ -252,18 +268,19 @@ class MemSnapshot : public CHeapObj<mtNMT> {
virtual
~
MemSnapshot
();
// if we are running out of native memory
bool
out_of_memory
()
const
{
return
(
_alloc_ptrs
==
NULL
||
_staging_area
==
NULL
||
bool
out_of_memory
()
{
return
(
_alloc_ptrs
==
NULL
||
_staging_area
.
malloc_data
()
==
NULL
||
_staging_area
.
vm_data
()
==
NULL
||
_vm_ptrs
==
NULL
||
_lock
==
NULL
||
_alloc_ptrs
->
out_of_memory
()
||
_staging_area
->
out_of_memory
()
||
_vm_ptrs
->
out_of_memory
());
}
// merge a per-thread memory recorder into staging area
bool
merge
(
MemRecorder
*
rec
);
// promote staged data to snapshot
void
promote
();
bool
promote
();
void
wait
(
long
timeout
)
{
...
...
@@ -280,6 +297,9 @@ class MemSnapshot : public CHeapObj<mtNMT> {
private:
// copy pointer data from src to dest
void
copy_pointer
(
MemPointerRecord
*
dest
,
const
MemPointerRecord
*
src
);
bool
promote_malloc_records
(
MemPointerArrayIterator
*
itr
);
bool
promote_virtual_memory_records
(
MemPointerArrayIterator
*
itr
);
};
...
...
src/share/vm/services/memTrackWorker.cpp
浏览文件 @
eedeebde
...
...
@@ -118,7 +118,10 @@ void MemTrackWorker::run() {
_head
=
(
_head
+
1
)
%
MAX_GENERATIONS
;
}
// promote this generation data to snapshot
snapshot
->
promote
();
if
(
!
snapshot
->
promote
())
{
// failed to promote, means out of memory
MemTracker
::
shutdown
(
MemTracker
::
NMT_out_of_memory
);
}
}
else
{
snapshot
->
wait
(
1000
);
ThreadCritical
tc
;
...
...
src/share/vm/services/memTracker.hpp
浏览文件 @
eedeebde
...
...
@@ -39,7 +39,7 @@
#include "thread_solaris.inline.hpp"
#endif
#ifdef _DEBUG
_
#ifdef _DEBUG
#define DEBUG_CALLER_PC os::get_caller_pc(3)
#else
#define DEBUG_CALLER_PC 0
...
...
@@ -223,12 +223,33 @@ class MemTracker : AllStatic {
}
}
static
inline
void
record_thread_stack
(
address
addr
,
size_t
size
,
Thread
*
thr
,
address
pc
=
0
)
{
if
(
is_on
())
{
assert
(
size
>
0
&&
thr
!=
NULL
,
"Sanity check"
);
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_reserve_tag
()
|
mtThreadStack
,
size
,
pc
,
thr
);
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_commit_tag
()
|
mtThreadStack
,
size
,
pc
,
thr
);
}
}
static
inline
void
release_thread_stack
(
address
addr
,
size_t
size
,
Thread
*
thr
)
{
if
(
is_on
())
{
assert
(
size
>
0
&&
thr
!=
NULL
,
"Sanity check"
);
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_uncommit_tag
()
|
mtThreadStack
,
size
,
DEBUG_CALLER_PC
,
thr
);
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_release_tag
()
|
mtThreadStack
,
size
,
DEBUG_CALLER_PC
,
thr
);
}
}
// record a virtual memory 'commit' call
static
inline
void
record_virtual_memory_commit
(
address
addr
,
size_t
size
,
address
pc
=
0
,
Thread
*
thread
=
NULL
)
{
if
(
is_on
())
{
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_commit_tag
(),
size
,
pc
,
thread
);
size
,
DEBUG_CALLER_PC
,
thread
);
}
}
...
...
@@ -237,7 +258,7 @@ class MemTracker : AllStatic {
Thread
*
thread
=
NULL
)
{
if
(
is_on
())
{
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_uncommit_tag
(),
size
,
0
,
thread
);
size
,
DEBUG_CALLER_PC
,
thread
);
}
}
...
...
@@ -246,7 +267,7 @@ class MemTracker : AllStatic {
Thread
*
thread
=
NULL
)
{
if
(
is_on
())
{
create_memory_record
(
addr
,
MemPointerRecord
::
virtual_memory_release_tag
(),
size
,
0
,
thread
);
size
,
DEBUG_CALLER_PC
,
thread
);
}
}
...
...
@@ -257,7 +278,7 @@ class MemTracker : AllStatic {
assert
(
base
>
0
,
"wrong base address"
);
assert
((
flags
&
(
~
mt_masks
))
==
0
,
"memory type only"
);
create_memory_record
(
base
,
(
flags
|
MemPointerRecord
::
virtual_memory_type_tag
()),
0
,
0
,
thread
);
0
,
DEBUG_CALLER_PC
,
thread
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录