Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
2a8d5f43
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2a8d5f43
编写于
10月 09, 2019
作者:
D
ddong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
8214542: JFR: Old Object Sample event slow on a deep heap in debug builds
Reviewed-by: egahlin, rwestberg
上级
5a277f2d
变更
36
隐藏空白更改
内联
并排
Showing
36 changed file
with
1041 addition
and
954 deletion
+1041
-954
src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp
src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp
+10
-17
src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp
src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp
+1
-1
src/share/vm/jfr/leakprofiler/chains/bitset.hpp
src/share/vm/jfr/leakprofiler/chains/bitset.hpp
+1
-1
src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp
src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp
+26
-20
src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp
src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp
+2
-2
src/share/vm/jfr/leakprofiler/chains/edge.hpp
src/share/vm/jfr/leakprofiler/chains/edge.hpp
+1
-1
src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp
src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp
+237
-73
src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp
src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp
+39
-51
src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp
src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp
+9
-170
src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp
src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp
+5
-5
src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp
...are/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp
+37
-141
src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp
...are/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp
+46
-0
src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp
src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp
+28
-35
src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp
src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp
+4
-8
src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.cpp
src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.cpp
+147
-0
src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.hpp
src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.hpp
+23
-34
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
...vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
+39
-66
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
...vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
+7
-6
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp
...are/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp
+24
-37
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp
...are/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp
+5
-9
src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp
src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp
+1
-1
src/share/vm/jfr/leakprofiler/leakProfiler.cpp
src/share/vm/jfr/leakprofiler/leakProfiler.cpp
+43
-65
src/share/vm/jfr/leakprofiler/leakProfiler.hpp
src/share/vm/jfr/leakprofiler/leakProfiler.hpp
+2
-22
src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp
src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp
+140
-51
src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp
src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp
+25
-7
src/share/vm/jfr/leakprofiler/startOperation.hpp
src/share/vm/jfr/leakprofiler/startOperation.hpp
+6
-23
src/share/vm/jfr/leakprofiler/stopOperation.hpp
src/share/vm/jfr/leakprofiler/stopOperation.hpp
+4
-20
src/share/vm/jfr/leakprofiler/utilities/vmOperation.hpp
src/share/vm/jfr/leakprofiler/utilities/vmOperation.hpp
+41
-0
src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp
src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp
+2
-2
src/share/vm/jfr/recorder/service/jfrRecorderService.cpp
src/share/vm/jfr/recorder/service/jfrRecorderService.cpp
+31
-19
src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
...re/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
+27
-46
src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
...re/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
+16
-12
src/share/vm/jfr/support/jfrFlush.hpp
src/share/vm/jfr/support/jfrFlush.hpp
+5
-3
src/share/vm/jfr/support/jfrThreadLocal.cpp
src/share/vm/jfr/support/jfrThreadLocal.cpp
+5
-3
src/share/vm/jfr/support/jfrThreadLocal.hpp
src/share/vm/jfr/support/jfrThreadLocal.hpp
+1
-3
src/share/vm/runtime/vm_operations.hpp
src/share/vm/runtime/vm_operations.hpp
+1
-0
未找到文件。
src/share/vm/jfr/leakprofiler/chains/bfsClosure.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2014, 201
8
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -97,7 +97,6 @@ void BFSClosure::log_dfs_fallback() const {
}
void
BFSClosure
::
process
()
{
process_root_set
();
process_queue
();
}
...
...
@@ -136,7 +135,6 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
// if we are processinig initial root set, don't add to queue
if
(
_current_parent
!=
NULL
)
{
assert
(
_current_parent
->
distance_to_root
()
==
_current_frontier_level
,
"invariant"
);
_edge_queue
->
add
(
_current_parent
,
reference
);
}
...
...
@@ -149,20 +147,8 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
void
BFSClosure
::
add_chain
(
const
oop
*
reference
,
const
oop
pointee
)
{
assert
(
pointee
!=
NULL
,
"invariant"
);
assert
(
NULL
==
pointee
->
mark
(),
"invariant"
);
const
size_t
length
=
_current_parent
==
NULL
?
1
:
_current_parent
->
distance_to_root
()
+
2
;
ResourceMark
rm
;
Edge
*
const
chain
=
NEW_RESOURCE_ARRAY
(
Edge
,
length
);
size_t
idx
=
0
;
chain
[
idx
++
]
=
Edge
(
NULL
,
reference
);
// aggregate from breadth-first search
const
Edge
*
current
=
_current_parent
;
while
(
current
!=
NULL
)
{
chain
[
idx
++
]
=
Edge
(
NULL
,
current
->
reference
());
current
=
current
->
parent
();
}
assert
(
length
==
idx
,
"invariant"
);
_edge_store
->
add_chain
(
chain
,
length
);
Edge
leak_edge
(
_current_parent
,
reference
);
_edge_store
->
put_chain
(
&
leak_edge
,
_current_parent
==
NULL
?
1
:
_current_frontier_level
+
2
);
}
void
BFSClosure
::
dfs_fallback
()
{
...
...
@@ -239,3 +225,10 @@ void BFSClosure::do_oop(narrowOop* ref) {
closure_impl
(
UnifiedOop
::
encode
(
ref
),
pointee
);
}
}
void
BFSClosure
::
do_root
(
const
oop
*
ref
)
{
assert
(
ref
!=
NULL
,
"invariant"
);
if
(
!
_edge_queue
->
is_full
())
{
_edge_queue
->
add
(
NULL
,
ref
);
}
}
src/share/vm/jfr/leakprofiler/chains/bfsClosure.hpp
浏览文件 @
2a8d5f43
...
...
@@ -26,7 +26,6 @@
#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class
BitSet
;
class
Edge
;
...
...
@@ -65,6 +64,7 @@ class BFSClosure : public ExtendedOopClosure { // XXX BasicOopIterateClosure
public:
BFSClosure
(
EdgeQueue
*
edge_queue
,
EdgeStore
*
edge_store
,
BitSet
*
mark_bits
);
void
process
();
void
do_root
(
const
oop
*
ref
);
virtual
void
do_oop
(
oop
*
ref
);
virtual
void
do_oop
(
narrowOop
*
ref
);
...
...
src/share/vm/jfr/leakprofiler/chains/bitset.hpp
浏览文件 @
2a8d5f43
...
...
@@ -47,7 +47,7 @@ class BitSet : public CHeapObj<mtTracing> {
BitMap
::
idx_t
mark_obj
(
const
HeapWord
*
addr
)
{
const
BitMap
::
idx_t
bit
=
addr_to_bit
(
addr
);
_bits
.
par_
set_bit
(
bit
);
_bits
.
set_bit
(
bit
);
return
bit
;
}
...
...
src/share/vm/jfr/leakprofiler/chains/dfsClosure.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2014, 201
8
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -23,14 +23,14 @@
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/leakprofiler/
chains/rootSetClosure
.hpp"
#include "jfr/leakprofiler/
utilities/unifiedOop
.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
...
...
@@ -87,15 +87,15 @@ void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
// Mark root set, to avoid going sideways
_max_depth
=
1
;
_ignore_root_set
=
false
;
DFSClosure
dfs1
;
RootSetClosure
::
process_roots
(
&
dfs1
);
DFSClosure
dfs
;
RootSetClosure
<
DFSClosure
>
rs
(
&
dfs
);
rs
.
process
();
// Depth-first search
_max_depth
=
max_dfs_depth
;
_ignore_root_set
=
true
;
assert
(
_start_edge
==
NULL
,
"invariant"
);
DFSClosure
dfs2
;
RootSetClosure
::
process_roots
(
&
dfs2
);
rs
.
process
();
}
void
DFSClosure
::
closure_impl
(
const
oop
*
reference
,
const
oop
pointee
)
{
...
...
@@ -132,30 +132,29 @@ void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
}
void
DFSClosure
::
add_chain
()
{
const
size_t
length
=
_start_edge
==
NULL
?
_depth
+
1
:
_start_edge
->
distance_to_root
()
+
1
+
_depth
+
1
;
const
size_t
array_length
=
_depth
+
2
;
ResourceMark
rm
;
Edge
*
const
chain
=
NEW_RESOURCE_ARRAY
(
Edge
,
length
);
Edge
*
const
chain
=
NEW_RESOURCE_ARRAY
(
Edge
,
array_
length
);
size_t
idx
=
0
;
// aggregate from depth-first search
const
DFSClosure
*
c
=
this
;
while
(
c
!=
NULL
)
{
chain
[
idx
++
]
=
Edge
(
NULL
,
c
->
reference
());
const
size_t
next
=
idx
+
1
;
chain
[
idx
++
]
=
Edge
(
&
chain
[
next
],
c
->
reference
());
c
=
c
->
parent
();
}
assert
(
idx
==
_depth
+
1
,
"invariant"
);
assert
(
_depth
+
1
==
idx
,
"invariant"
);
assert
(
array_length
==
idx
+
1
,
"invariant"
);
// aggregate from breadth-first search
const
Edge
*
current
=
_start_edge
;
while
(
current
!=
NULL
)
{
chain
[
idx
++
]
=
Edge
(
NULL
,
current
->
reference
());
c
urrent
=
current
->
parent
(
);
if
(
_start_edge
!=
NULL
)
{
chain
[
idx
++
]
=
*
_start_edge
;
}
else
{
c
hain
[
idx
-
1
]
=
Edge
(
NULL
,
chain
[
idx
-
1
].
reference
()
);
}
assert
(
idx
==
length
,
"invariant"
);
_edge_store
->
add_chain
(
chain
,
length
);
_edge_store
->
put_chain
(
chain
,
idx
+
(
_start_edge
!=
NULL
?
_start_edge
->
distance_to_root
()
:
0
));
}
void
DFSClosure
::
do_oop
(
oop
*
ref
)
{
...
...
@@ -175,3 +174,10 @@ void DFSClosure::do_oop(narrowOop* ref) {
closure_impl
(
UnifiedOop
::
encode
(
ref
),
pointee
);
}
}
void
DFSClosure
::
do_root
(
const
oop
*
ref
)
{
assert
(
ref
!=
NULL
,
"invariant"
);
const
oop
pointee
=
UnifiedOop
::
dereference
(
ref
);
assert
(
pointee
!=
NULL
,
"invariant"
);
closure_impl
(
ref
,
pointee
);
}
src/share/vm/jfr/leakprofiler/chains/dfsClosure.hpp
浏览文件 @
2a8d5f43
...
...
@@ -26,7 +26,6 @@
#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class
BitSet
;
class
Edge
;
...
...
@@ -34,7 +33,7 @@ class EdgeStore;
class
EdgeQueue
;
// Class responsible for iterating the heap depth-first
class
DFSClosure
:
public
ExtendedOopClosure
{
// XXX BasicOopIterateClosure
class
DFSClosure
:
public
ExtendedOopClosure
{
// XXX BasicOopIterateClosure
private:
static
EdgeStore
*
_edge_store
;
static
BitSet
*
_mark_bits
;
...
...
@@ -57,6 +56,7 @@ class DFSClosure: public ExtendedOopClosure { // XXX BasicOopIterateClosure
public:
static
void
find_leaks_from_edge
(
EdgeStore
*
edge_store
,
BitSet
*
mark_bits
,
const
Edge
*
start_edge
);
static
void
find_leaks_from_root_set
(
EdgeStore
*
edge_store
,
BitSet
*
mark_bits
);
void
do_root
(
const
oop
*
ref
);
virtual
void
do_oop
(
oop
*
ref
);
virtual
void
do_oop
(
narrowOop
*
ref
);
...
...
src/share/vm/jfr/leakprofiler/chains/edge.hpp
浏览文件 @
2a8d5f43
...
...
@@ -29,7 +29,7 @@
#include "oops/oopsHierarchy.hpp"
class
Edge
{
pr
ivate
:
pr
otected
:
const
Edge
*
_parent
;
const
oop
*
_reference
;
public:
...
...
src/share/vm/jfr/leakprofiler/chains/edgeStore.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2014, 201
8
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -27,37 +27,17 @@
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
#include "oops/oop.inline.hpp"
RoutableEdge
::
RoutableEdge
()
:
Edge
()
{}
RoutableEdge
::
RoutableEdge
(
const
Edge
*
parent
,
const
oop
*
reference
)
:
Edge
(
parent
,
reference
),
_skip_edge
(
NULL
),
_skip_length
(
0
),
_processed
(
false
)
{}
StoredEdge
::
StoredEdge
()
:
Edge
()
{}
StoredEdge
::
StoredEdge
(
const
Edge
*
parent
,
const
oop
*
reference
)
:
Edge
(
parent
,
reference
),
_gc_root_id
(
0
),
_skip_length
(
0
)
{}
RoutableEdge
::
RoutableEdge
(
const
Edge
&
edge
)
:
Edge
(
edge
),
_skip_edge
(
NULL
),
_skip_length
(
0
),
_processed
(
false
)
{}
StoredEdge
::
StoredEdge
(
const
Edge
&
edge
)
:
Edge
(
edge
),
_gc_root_id
(
0
),
_skip_length
(
0
)
{}
RoutableEdge
::
RoutableEdge
(
const
RoutableEdge
&
edge
)
:
Edge
(
edge
),
_skip_edge
(
edge
.
_skip_edge
),
_skip_length
(
edge
.
_skip_length
),
_processed
(
edge
.
_processed
)
{}
StoredEdge
::
StoredEdge
(
const
StoredEdge
&
edge
)
:
Edge
(
edge
),
_gc_root_id
(
edge
.
_gc_root_id
),
_skip_length
(
edge
.
_skip_length
)
{}
void
RoutableEdge
::
operator
=
(
const
Routable
Edge
&
edge
)
{
void
StoredEdge
::
operator
=
(
const
Stored
Edge
&
edge
)
{
Edge
::
operator
=
(
edge
);
_
skip_edge
=
edge
.
_skip_edge
;
_
gc_root_id
=
edge
.
_gc_root_id
;
_skip_length
=
edge
.
_skip_length
;
_processed
=
edge
.
_processed
;
}
size_t
RoutableEdge
::
logical_distance_to_root
()
const
{
size_t
depth
=
0
;
const
RoutableEdge
*
current
=
logical_parent
();
while
(
current
!=
NULL
)
{
depth
++
;
current
=
current
->
logical_parent
();
}
return
depth
;
}
traceid
EdgeStore
::
_edge_id_counter
=
0
;
...
...
@@ -69,20 +49,42 @@ EdgeStore::EdgeStore() : _edges(NULL) {
EdgeStore
::~
EdgeStore
()
{
assert
(
_edges
!=
NULL
,
"invariant"
);
delete
_edges
;
_edges
=
NULL
;
}
const
Edge
*
EdgeStore
::
get_edge
(
const
Edge
*
edge
)
const
{
assert
(
edge
!=
NULL
,
"invariant"
);
EdgeEntry
*
const
entry
=
_edges
->
lookup_only
(
*
edge
,
(
uintptr_t
)
edge
->
reference
());
bool
EdgeStore
::
is_empty
()
const
{
return
!
_edges
->
has_entries
();
}
void
EdgeStore
::
assign_id
(
EdgeEntry
*
entry
)
{
assert
(
entry
!=
NULL
,
"invariant"
);
assert
(
entry
->
id
()
==
0
,
"invariant"
);
entry
->
set_id
(
++
_edge_id_counter
);
}
bool
EdgeStore
::
equals
(
const
Edge
&
query
,
uintptr_t
hash
,
const
EdgeEntry
*
entry
)
{
assert
(
entry
!=
NULL
,
"invariant"
);
assert
(
entry
->
hash
()
==
hash
,
"invariant"
);
return
true
;
}
#ifdef ASSERT
bool
EdgeStore
::
contains
(
const
oop
*
reference
)
const
{
return
get
(
reference
)
!=
NULL
;
}
#endif
StoredEdge
*
EdgeStore
::
get
(
const
oop
*
reference
)
const
{
assert
(
reference
!=
NULL
,
"invariant"
);
const
StoredEdge
e
(
NULL
,
reference
);
EdgeEntry
*
const
entry
=
_edges
->
lookup_only
(
e
,
(
uintptr_t
)
reference
);
return
entry
!=
NULL
?
entry
->
literal_addr
()
:
NULL
;
}
const
Edge
*
EdgeStore
::
put
(
const
Edge
*
edg
e
)
{
assert
(
edg
e
!=
NULL
,
"invariant"
);
const
RoutableEdge
e
=
*
edge
;
assert
(
NULL
==
_edges
->
lookup_only
(
e
,
(
uintptr_t
)
e
.
reference
()
),
"invariant"
);
EdgeEntry
&
entry
=
_edges
->
put
(
e
,
(
uintptr_t
)
e
.
reference
()
);
StoredEdge
*
EdgeStore
::
put
(
const
oop
*
referenc
e
)
{
assert
(
referenc
e
!=
NULL
,
"invariant"
);
const
StoredEdge
e
(
NULL
,
reference
)
;
assert
(
NULL
==
_edges
->
lookup_only
(
e
,
(
uintptr_t
)
reference
),
"invariant"
);
EdgeEntry
&
entry
=
_edges
->
put
(
e
,
(
uintptr_t
)
reference
);
return
entry
.
literal_addr
();
}
...
...
@@ -93,63 +95,225 @@ traceid EdgeStore::get_id(const Edge* edge) const {
return
entry
->
id
();
}
traceid
EdgeStore
::
get_root_id
(
const
Edge
*
edge
)
const
{
traceid
EdgeStore
::
gc_root_id
(
const
Edge
*
edge
)
const
{
assert
(
edge
!=
NULL
,
"invariant"
);
const
traceid
gc_root_id
=
static_cast
<
const
StoredEdge
*>
(
edge
)
->
gc_root_id
();
if
(
gc_root_id
!=
0
)
{
return
gc_root_id
;
}
// not cached
assert
(
edge
!=
NULL
,
"invariant"
);
const
Edge
*
root
=
EdgeUtils
::
root
(
*
edge
);
const
Edge
*
const
root
=
EdgeUtils
::
root
(
*
edge
);
assert
(
root
!=
NULL
,
"invariant"
);
assert
(
root
->
parent
()
==
NULL
,
"invariant"
);
return
get_id
(
root
);
}
void
EdgeStore
::
add_chain
(
const
Edge
*
chain
,
size_t
length
)
{
assert
(
chain
!=
NULL
,
"invariant"
);
assert
(
length
>
0
,
"invariant"
);
static
const
Edge
*
get_skip_ancestor
(
const
Edge
**
current
,
size_t
distance_to_root
,
size_t
*
skip_length
)
{
assert
(
distance_to_root
>=
EdgeUtils
::
root_context
,
"invariant"
);
assert
(
*
skip_length
==
0
,
"invariant"
);
*
skip_length
=
distance_to_root
-
(
EdgeUtils
::
root_context
-
1
);
const
Edge
*
const
target
=
EdgeUtils
::
ancestor
(
**
current
,
*
skip_length
);
assert
(
target
!=
NULL
,
"invariant"
);
assert
(
target
->
distance_to_root
()
+
1
==
EdgeUtils
::
root_context
,
"invariant"
);
return
target
;
}
size_t
bottom_index
=
length
-
1
;
const
size_t
top_index
=
0
;
bool
EdgeStore
::
put_skip_edge
(
StoredEdge
**
previous
,
const
Edge
**
current
,
size_t
distance_to_root
)
{
assert
(
*
previous
!=
NULL
,
"invariant"
);
assert
((
*
previous
)
->
parent
()
==
NULL
,
"invariant"
);
assert
(
*
current
!=
NULL
,
"invariant"
);
assert
((
*
current
)
->
distance_to_root
()
==
distance_to_root
,
"invariant"
);
const
Edge
*
stored_parent_edge
=
NULL
;
if
(
distance_to_root
<
EdgeUtils
::
root_context
)
{
// nothing to skip
return
false
;
}
// determine level of shared ancestry
for
(;
bottom_index
>
top_index
;
--
bottom_index
)
{
const
Edge
*
stored_edge
=
get_edge
(
&
chain
[
bottom_index
]);
if
(
stored_edge
!=
NULL
)
{
stored_parent_edge
=
stored_edge
;
continue
;
size_t
skip_length
=
0
;
const
Edge
*
const
skip_ancestor
=
get_skip_ancestor
(
current
,
distance_to_root
,
&
skip_length
);
assert
(
skip_ancestor
!=
NULL
,
"invariant"
);
(
*
previous
)
->
set_skip_length
(
skip_length
);
// lookup target
StoredEdge
*
stored_target
=
get
(
skip_ancestor
->
reference
());
if
(
stored_target
!=
NULL
)
{
(
*
previous
)
->
set_parent
(
stored_target
);
// linked to existing, complete
return
true
;
}
assert
(
stored_target
==
NULL
,
"invariant"
);
stored_target
=
put
(
skip_ancestor
->
reference
());
assert
(
stored_target
!=
NULL
,
"invariant"
);
(
*
previous
)
->
set_parent
(
stored_target
);
*
previous
=
stored_target
;
*
current
=
skip_ancestor
->
parent
();
return
false
;
}
static
void
link_edge
(
const
StoredEdge
*
current_stored
,
StoredEdge
**
previous
)
{
assert
(
current_stored
!=
NULL
,
"invariant"
);
assert
(
*
previous
!=
NULL
,
"invariant"
);
assert
((
*
previous
)
->
parent
()
==
NULL
,
"invariant"
);
(
*
previous
)
->
set_parent
(
current_stored
);
}
static
const
StoredEdge
*
find_closest_skip_edge
(
const
StoredEdge
*
edge
,
size_t
*
distance
)
{
assert
(
edge
!=
NULL
,
"invariant"
);
assert
(
distance
!=
NULL
,
"invariant"
);
const
StoredEdge
*
current
=
edge
;
*
distance
=
1
;
while
(
current
!=
NULL
&&
!
current
->
is_skip_edge
())
{
++
(
*
distance
);
current
=
current
->
parent
();
}
return
current
;
}
void
EdgeStore
::
link_with_existing_chain
(
const
StoredEdge
*
current_stored
,
StoredEdge
**
previous
,
size_t
previous_length
)
{
assert
(
current_stored
!=
NULL
,
"invariant"
);
assert
((
*
previous
)
->
parent
()
==
NULL
,
"invariant"
);
size_t
distance_to_skip_edge
;
// including the skip edge itself
const
StoredEdge
*
const
closest_skip_edge
=
find_closest_skip_edge
(
current_stored
,
&
distance_to_skip_edge
);
if
(
closest_skip_edge
==
NULL
)
{
// no found skip edge implies root
if
(
distance_to_skip_edge
+
previous_length
<=
EdgeUtils
::
max_ref_chain_depth
)
{
link_edge
(
current_stored
,
previous
);
return
;
}
break
;
assert
(
current_stored
->
distance_to_root
()
==
distance_to_skip_edge
-
2
,
"invariant"
);
put_skip_edge
(
previous
,
reinterpret_cast
<
const
Edge
**>
(
&
current_stored
),
distance_to_skip_edge
-
2
);
return
;
}
assert
(
closest_skip_edge
->
is_skip_edge
(),
"invariant"
);
if
(
distance_to_skip_edge
+
previous_length
<=
EdgeUtils
::
leak_context
)
{
link_edge
(
current_stored
,
previous
);
return
;
}
// create a new skip edge with derived information from closest skip edge
(
*
previous
)
->
set_skip_length
(
distance_to_skip_edge
+
closest_skip_edge
->
skip_length
());
(
*
previous
)
->
set_parent
(
closest_skip_edge
->
parent
());
}
StoredEdge
*
EdgeStore
::
link_new_edge
(
StoredEdge
**
previous
,
const
Edge
**
current
)
{
assert
(
*
previous
!=
NULL
,
"invariant"
);
assert
((
*
previous
)
->
parent
()
==
NULL
,
"invariant"
);
assert
(
*
current
!=
NULL
,
"invariant"
);
assert
(
!
contains
((
*
current
)
->
reference
()),
"invariant"
);
StoredEdge
*
const
stored_edge
=
put
((
*
current
)
->
reference
());
assert
(
stored_edge
!=
NULL
,
"invariant"
);
link_edge
(
stored_edge
,
previous
);
return
stored_edge
;
}
// insertion of new Edges
for
(
int
i
=
(
int
)
bottom_index
;
i
>=
(
int
)
top_index
;
--
i
)
{
Edge
edge
(
stored_parent_edge
,
chain
[
i
].
reference
());
stored_parent_edge
=
put
(
&
edge
);
bool
EdgeStore
::
put_edges
(
StoredEdge
**
previous
,
const
Edge
**
current
,
size_t
limit
)
{
assert
(
*
previous
!=
NULL
,
"invariant"
);
assert
(
*
current
!=
NULL
,
"invariant"
);
size_t
depth
=
1
;
while
(
*
current
!=
NULL
&&
depth
<
limit
)
{
StoredEdge
*
stored_edge
=
get
((
*
current
)
->
reference
());
if
(
stored_edge
!=
NULL
)
{
link_with_existing_chain
(
stored_edge
,
previous
,
depth
);
return
true
;
}
stored_edge
=
link_new_edge
(
previous
,
current
);
assert
((
*
previous
)
->
parent
()
!=
NULL
,
"invariant"
);
*
previous
=
stored_edge
;
*
current
=
(
*
current
)
->
parent
();
++
depth
;
}
return
NULL
==
*
current
;
}
const
oop
sample_object
=
stored_parent_edge
->
pointee
();
// Install the immediate edge into the mark word of the leak candidate object
StoredEdge
*
EdgeStore
::
associate_leak_context_with_candidate
(
const
Edge
*
edge
)
{
assert
(
edge
!=
NULL
,
"invariant"
);
assert
(
!
contains
(
edge
->
reference
()),
"invariant"
);
StoredEdge
*
const
leak_context_edge
=
put
(
edge
->
reference
());
oop
sample_object
=
edge
->
pointee
();
assert
(
sample_object
!=
NULL
,
"invariant"
);
assert
(
NULL
==
sample_object
->
mark
(),
"invariant"
);
// Install the "top" edge of the chain into the sample object mark oop.
// This associates the sample object with its navigable reference chain.
sample_object
->
set_mark
(
markOop
(
stored_parent_edge
));
sample_object
->
set_mark
(
markOop
(
leak_context_edge
));
return
leak_context_edge
;
}
bool
EdgeStore
::
is_empty
()
const
{
return
!
_edges
->
has_entries
();
}
/*
* The purpose of put_chain() is to reify the edge sequence
* discovered during heap traversal with a normalized logical copy.
* This copy consist of two sub-sequences and a connecting link (skip edge).
*
* "current" can be thought of as the cursor (search) edge, it is not in the edge store.
* "previous" is always an edge in the edge store.
* The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store.
*/
void
EdgeStore
::
put_chain
(
const
Edge
*
chain
,
size_t
length
)
{
assert
(
chain
!=
NULL
,
"invariant"
);
assert
(
chain
->
distance_to_root
()
+
1
==
length
,
"invariant"
);
StoredEdge
*
const
leak_context_edge
=
associate_leak_context_with_candidate
(
chain
);
assert
(
leak_context_edge
!=
NULL
,
"invariant"
);
assert
(
leak_context_edge
->
parent
()
==
NULL
,
"invariant"
);
if
(
1
==
length
)
{
return
;
}
const
Edge
*
current
=
chain
->
parent
();
assert
(
current
!=
NULL
,
"invariant"
);
StoredEdge
*
previous
=
leak_context_edge
;
// a leak context is the sequence of (limited) edges reachable from the leak candidate
if
(
put_edges
(
&
previous
,
&
current
,
EdgeUtils
::
leak_context
))
{
// complete
assert
(
previous
!=
NULL
,
"invariant"
);
put_chain_epilogue
(
leak_context_edge
,
EdgeUtils
::
root
(
*
previous
));
return
;
}
const
size_t
distance_to_root
=
length
>
EdgeUtils
::
leak_context
?
length
-
1
-
EdgeUtils
::
leak_context
:
length
-
1
;
assert
(
current
->
distance_to_root
()
==
distance_to_root
,
"invariant"
);
size_t
EdgeStore
::
number_of_entries
()
const
{
return
_edges
->
cardinality
();
// a skip edge is the logical link
// connecting the leak context sequence with the root context sequence
if
(
put_skip_edge
(
&
previous
,
&
current
,
distance_to_root
))
{
// complete
assert
(
previous
!=
NULL
,
"invariant"
);
assert
(
previous
->
is_skip_edge
(),
"invariant"
);
assert
(
previous
->
parent
()
!=
NULL
,
"invariant"
);
put_chain_epilogue
(
leak_context_edge
,
EdgeUtils
::
root
(
*
previous
->
parent
()));
return
;
}
assert
(
current
->
distance_to_root
()
<
EdgeUtils
::
root_context
,
"invariant"
);
// a root context is the sequence of (limited) edges reachable from the root
put_edges
(
&
previous
,
&
current
,
EdgeUtils
::
root_context
);
assert
(
previous
!=
NULL
,
"invariant"
);
put_chain_epilogue
(
leak_context_edge
,
EdgeUtils
::
root
(
*
previous
));
}
void
EdgeStore
::
assign_id
(
EdgeEntry
*
entry
)
{
assert
(
entry
!=
NULL
,
"invariant"
);
assert
(
entry
->
id
()
==
0
,
"invariant"
);
entry
->
set_id
(
++
_edge_id_counter
);
void
EdgeStore
::
put_chain_epilogue
(
StoredEdge
*
leak_context_edge
,
const
Edge
*
root
)
const
{
assert
(
leak_context_edge
!=
NULL
,
"invariant"
);
assert
(
root
!=
NULL
,
"invariant"
);
store_gc_root_id_in_leak_context_edge
(
leak_context_edge
,
root
);
assert
(
leak_context_edge
->
distance_to_root
()
+
1
<=
EdgeUtils
::
max_ref_chain_depth
,
"invariant"
);
}
bool
EdgeStore
::
equals
(
const
Edge
&
query
,
uintptr_t
hash
,
const
EdgeEntry
*
entry
)
{
assert
(
entry
!=
NULL
,
"invariant"
);
assert
(
entry
->
hash
()
==
hash
,
"invariant"
);
return
true
;
// To avoid another traversal to resolve the root edge id later,
// cache it in the immediate leak context edge for fast retrieval.
void
EdgeStore
::
store_gc_root_id_in_leak_context_edge
(
StoredEdge
*
leak_context_edge
,
const
Edge
*
root
)
const
{
assert
(
leak_context_edge
!=
NULL
,
"invariant"
);
assert
(
leak_context_edge
->
gc_root_id
()
==
0
,
"invariant"
);
assert
(
root
!=
NULL
,
"invariant"
);
assert
(
root
->
parent
()
==
NULL
,
"invariant"
);
assert
(
root
->
distance_to_root
()
==
0
,
"invariant"
);
const
StoredEdge
*
const
stored_root
=
static_cast
<
const
StoredEdge
*>
(
root
);
traceid
root_id
=
stored_root
->
gc_root_id
();
if
(
root_id
==
0
)
{
root_id
=
get_id
(
root
);
stored_root
->
set_gc_root_id
(
root_id
);
}
assert
(
root_id
!=
0
,
"invariant"
);
leak_context_edge
->
set_gc_root_id
(
root_id
);
assert
(
leak_context_edge
->
gc_root_id
()
==
stored_root
->
gc_root_id
(),
"invariant"
);
}
src/share/vm/jfr/leakprofiler/chains/edgeStore.hpp
浏览文件 @
2a8d5f43
...
...
@@ -25,64 +25,40 @@
#ifndef SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
#define SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
#include "jfr/utilities/jfrHashtable.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/utilities/jfrHashtable.hpp"
#include "memory/allocation.hpp"
typedef
u8
traceid
;
class
Routable
Edge
:
public
Edge
{
class
Stored
Edge
:
public
Edge
{
private:
mutable
const
RoutableEdge
*
_skip_edge
;
mutable
size_t
_skip_length
;
mutable
bool
_processed
;
mutable
traceid
_gc_root_id
;
size_t
_skip_length
;
public:
RoutableEdge
();
RoutableEdge
(
const
Edge
*
parent
,
const
oop
*
reference
);
RoutableEdge
(
const
Edge
&
edge
);
RoutableEdge
(
const
RoutableEdge
&
edge
);
void
operator
=
(
const
RoutableEdge
&
edge
);
const
RoutableEdge
*
skip_edge
()
const
{
return
_skip_edge
;
}
size_t
skip_length
()
const
{
return
_skip_length
;
}
StoredEdge
();
StoredEdge
(
const
Edge
*
parent
,
const
oop
*
reference
);
StoredEdge
(
const
Edge
&
edge
);
StoredEdge
(
const
StoredEdge
&
edge
);
void
operator
=
(
const
StoredEdge
&
edge
);
bool
is_skip_edge
()
const
{
return
_skip_edge
!=
NULL
;
}
bool
processed
()
const
{
return
_processed
;
}
bool
is_sentinel
()
const
{
return
_skip_edge
==
NULL
&&
_skip_length
==
1
;
}
traceid
gc_root_id
()
const
{
return
_gc_root_id
;
}
void
set_gc_root_id
(
traceid
root_id
)
const
{
_gc_root_id
=
root_id
;
}
void
set_skip_edge
(
const
RoutableEdge
*
edge
)
const
{
assert
(
!
is_skip_edge
(),
"invariant"
);
assert
(
edge
!=
this
,
"invariant"
);
_skip_edge
=
edge
;
}
bool
is_skip_edge
()
const
{
return
_skip_length
!=
0
;
}
size_t
skip_length
()
const
{
return
_skip_length
;
}
void
set_skip_length
(
size_t
length
)
{
_skip_length
=
length
;
}
void
set_skip_length
(
size_t
length
)
const
{
_skip_length
=
length
;
}
void
set_parent
(
const
Edge
*
edge
)
{
this
->
_parent
=
edge
;
}
void
set_processed
()
const
{
assert
(
!
_processed
,
"invariant"
);
_processed
=
true
;
StoredEdge
*
parent
()
const
{
return
const_cast
<
StoredEdge
*>
(
static_cast
<
const
StoredEdge
*>
(
Edge
::
parent
()));
}
// true navigation according to physical tree representation
const
RoutableEdge
*
physical_parent
()
const
{
return
static_cast
<
const
RoutableEdge
*>
(
parent
());
}
// logical navigation taking skip levels into account
const
RoutableEdge
*
logical_parent
()
const
{
return
is_skip_edge
()
?
skip_edge
()
:
physical_parent
();
}
size_t
logical_distance_to_root
()
const
;
};
class
EdgeStore
:
public
CHeapObj
<
mtTracing
>
{
typedef
HashTableHost
<
Routable
Edge
,
traceid
,
Entry
,
EdgeStore
>
EdgeHashTable
;
typedef
HashTableHost
<
Stored
Edge
,
traceid
,
Entry
,
EdgeStore
>
EdgeHashTable
;
typedef
EdgeHashTable
::
HashEntry
EdgeEntry
;
template
<
typename
,
typename
,
...
...
@@ -90,6 +66,9 @@ class EdgeStore : public CHeapObj<mtTracing> {
typename
,
size_t
>
friend
class
HashTableHost
;
friend
class
EventEmitter
;
friend
class
ObjectSampleWriter
;
friend
class
ObjectSampleCheckpoint
;
private:
static
traceid
_edge_id_counter
;
EdgeHashTable
*
_edges
;
...
...
@@ -98,22 +77,31 @@ class EdgeStore : public CHeapObj<mtTracing> {
void
assign_id
(
EdgeEntry
*
entry
);
bool
equals
(
const
Edge
&
query
,
uintptr_t
hash
,
const
EdgeEntry
*
entry
);
const
Edge
*
get_edge
(
const
Edge
*
edge
)
const
;
const
Edge
*
put
(
const
Edge
*
edge
);
StoredEdge
*
get
(
const
oop
*
reference
)
const
;
StoredEdge
*
put
(
const
oop
*
reference
);
traceid
gc_root_id
(
const
Edge
*
edge
)
const
;
bool
put_edges
(
StoredEdge
**
previous
,
const
Edge
**
current
,
size_t
length
);
bool
put_skip_edge
(
StoredEdge
**
previous
,
const
Edge
**
current
,
size_t
distance_to_root
);
void
put_chain_epilogue
(
StoredEdge
*
leak_context_edge
,
const
Edge
*
root
)
const
;
StoredEdge
*
associate_leak_context_with_candidate
(
const
Edge
*
edge
);
void
store_gc_root_id_in_leak_context_edge
(
StoredEdge
*
leak_context_edge
,
const
Edge
*
root
)
const
;
StoredEdge
*
link_new_edge
(
StoredEdge
**
previous
,
const
Edge
**
current
);
void
link_with_existing_chain
(
const
StoredEdge
*
current_stored
,
StoredEdge
**
previous
,
size_t
previous_length
);
template
<
typename
T
>
void
iterate
(
T
&
functor
)
const
{
_edges
->
iterate_value
<
T
>
(
functor
);
}
DEBUG_ONLY
(
bool
contains
(
const
oop
*
reference
)
const
;)
public:
EdgeStore
();
~
EdgeStore
();
void
add_chain
(
const
Edge
*
chain
,
size_t
length
);
bool
is_empty
()
const
;
size_t
number_of_entries
()
const
;
traceid
get_id
(
const
Edge
*
edge
)
const
;
traceid
get_root_id
(
const
Edge
*
edge
)
const
;
template
<
typename
T
>
void
iterate_edges
(
T
&
functor
)
const
{
_edges
->
iterate_value
<
T
>
(
functor
);
}
void
put_chain
(
const
Edge
*
chain
,
size_t
length
);
};
#endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGESTORE_HPP
src/share/vm/jfr/leakprofiler/chains/edgeUtils.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2014, 201
8
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -38,11 +38,7 @@ bool EdgeUtils::is_leak_edge(const Edge& edge) {
return
(
const
Edge
*
)
edge
.
pointee
()
->
mark
()
==
&
edge
;
}
bool
EdgeUtils
::
is_root
(
const
Edge
&
edge
)
{
return
edge
.
is_root
();
}
static
int
field_offset
(
const
Edge
&
edge
)
{
static
int
field_offset
(
const
StoredEdge
&
edge
)
{
assert
(
!
edge
.
is_root
(),
"invariant"
);
const
oop
ref_owner
=
edge
.
reference_owner
();
assert
(
ref_owner
!=
NULL
,
"invariant"
);
...
...
@@ -56,7 +52,7 @@ static int field_offset(const Edge& edge) {
return
offset
;
}
static
const
InstanceKlass
*
field_type
(
const
Edge
&
edge
)
{
static
const
InstanceKlass
*
field_type
(
const
Stored
Edge
&
edge
)
{
assert
(
!
edge
.
is_root
()
||
!
EdgeUtils
::
is_array_element
(
edge
),
"invariant"
);
return
(
const
InstanceKlass
*
)
edge
.
reference_owner_klass
();
}
...
...
@@ -138,175 +134,18 @@ const Edge* EdgeUtils::root(const Edge& edge) {
current
=
parent
;
parent
=
current
->
parent
();
}
assert
(
current
!=
NULL
,
"invariant"
);
return
current
;
}
// The number of references associated with the leak node;
// can be viewed as the leak node "context".
// Used to provide leak context for a "capped/skipped" reference chain.
static
const
size_t
leak_context
=
100
;
// The number of references associated with the root node;
// can be viewed as the root node "context".
// Used to provide root context for a "capped/skipped" reference chain.
static
const
size_t
root_context
=
100
;
// A limit on the reference chain depth to be serialized,
static
const
size_t
max_ref_chain_depth
=
leak_context
+
root_context
;
const
RoutableEdge
*
skip_to
(
const
RoutableEdge
&
edge
,
size_t
skip_length
)
{
const
RoutableEdge
*
current
=
&
edge
;
const
RoutableEdge
*
parent
=
current
->
physical_parent
();
const
Edge
*
EdgeUtils
::
ancestor
(
const
Edge
&
edge
,
size_t
distance
)
{
const
Edge
*
current
=
&
edge
;
const
Edge
*
parent
=
current
->
parent
();
size_t
seek
=
0
;
while
(
parent
!=
NULL
&&
seek
!=
skip_length
)
{
while
(
parent
!=
NULL
&&
seek
!=
distance
)
{
seek
++
;
current
=
parent
;
parent
=
parent
->
physical_parent
();
}
return
current
;
}
#ifdef ASSERT
static
void
validate_skip_target
(
const
RoutableEdge
*
skip_target
)
{
assert
(
skip_target
!=
NULL
,
"invariant"
);
assert
(
skip_target
->
distance_to_root
()
+
1
==
root_context
,
"invariant"
);
assert
(
skip_target
->
is_sentinel
(),
"invariant"
);
}
static
void
validate_new_skip_edge
(
const
RoutableEdge
*
new_skip_edge
,
const
RoutableEdge
*
last_skip_edge
,
size_t
adjustment
)
{
assert
(
new_skip_edge
!=
NULL
,
"invariant"
);
assert
(
new_skip_edge
->
is_skip_edge
(),
"invariant"
);
if
(
last_skip_edge
!=
NULL
)
{
const
RoutableEdge
*
const
target
=
skip_to
(
*
new_skip_edge
->
logical_parent
(),
adjustment
);
validate_skip_target
(
target
->
logical_parent
());
return
;
}
assert
(
last_skip_edge
==
NULL
,
"invariant"
);
// only one level of logical indirection
validate_skip_target
(
new_skip_edge
->
logical_parent
());
}
#endif // ASSERT
static
void
install_logical_route
(
const
RoutableEdge
*
new_skip_edge
,
size_t
skip_target_distance
)
{
assert
(
new_skip_edge
!=
NULL
,
"invariant"
);
assert
(
!
new_skip_edge
->
is_skip_edge
(),
"invariant"
);
assert
(
!
new_skip_edge
->
processed
(),
"invariant"
);
const
RoutableEdge
*
const
skip_target
=
skip_to
(
*
new_skip_edge
,
skip_target_distance
);
assert
(
skip_target
!=
NULL
,
"invariant"
);
new_skip_edge
->
set_skip_edge
(
skip_target
);
new_skip_edge
->
set_skip_length
(
skip_target_distance
);
assert
(
new_skip_edge
->
is_skip_edge
(),
"invariant"
);
assert
(
new_skip_edge
->
logical_parent
()
==
skip_target
,
"invariant"
);
}
static
const
RoutableEdge
*
find_last_skip_edge
(
const
RoutableEdge
&
edge
,
size_t
&
distance
)
{
assert
(
distance
==
0
,
"invariant"
);
const
RoutableEdge
*
current
=
&
edge
;
while
(
current
!=
NULL
)
{
if
(
current
->
is_skip_edge
()
&&
current
->
skip_edge
()
->
is_sentinel
())
{
return
current
;
}
current
=
current
->
physical_parent
();
++
distance
;
}
return
current
;
}
static
void
collapse_overlapping_chain
(
const
RoutableEdge
&
edge
,
const
RoutableEdge
*
first_processed_edge
,
size_t
first_processed_distance
)
{
assert
(
first_processed_edge
!=
NULL
,
"invariant"
);
// first_processed_edge is already processed / written
assert
(
first_processed_edge
->
processed
(),
"invariant"
);
assert
(
first_processed_distance
+
1
<=
leak_context
,
"invariant"
);
// from this first processed edge, attempt to fetch the last skip edge
size_t
last_skip_edge_distance
=
0
;
const
RoutableEdge
*
const
last_skip_edge
=
find_last_skip_edge
(
*
first_processed_edge
,
last_skip_edge_distance
);
const
size_t
distance_discovered
=
first_processed_distance
+
last_skip_edge_distance
+
1
;
if
(
distance_discovered
<=
leak_context
||
(
last_skip_edge
==
NULL
&&
distance_discovered
<=
max_ref_chain_depth
))
{
// complete chain can be accommodated without modification
return
;
}
// backtrack one edge from existing processed edge
const
RoutableEdge
*
const
new_skip_edge
=
skip_to
(
edge
,
first_processed_distance
-
1
);
assert
(
new_skip_edge
!=
NULL
,
"invariant"
);
assert
(
!
new_skip_edge
->
processed
(),
"invariant"
);
assert
(
new_skip_edge
->
parent
()
==
first_processed_edge
,
"invariant"
);
size_t
adjustment
=
0
;
if
(
last_skip_edge
!=
NULL
)
{
assert
(
leak_context
-
1
>
first_processed_distance
-
1
,
"invariant"
);
adjustment
=
leak_context
-
first_processed_distance
-
1
;
assert
(
last_skip_edge_distance
+
1
>
adjustment
,
"invariant"
);
install_logical_route
(
new_skip_edge
,
last_skip_edge_distance
+
1
-
adjustment
);
}
else
{
install_logical_route
(
new_skip_edge
,
last_skip_edge_distance
+
1
-
root_context
);
new_skip_edge
->
logical_parent
()
->
set_skip_length
(
1
);
// sentinel
}
DEBUG_ONLY
(
validate_new_skip_edge
(
new_skip_edge
,
last_skip_edge
,
adjustment
);)
}
static
void
collapse_non_overlapping_chain
(
const
RoutableEdge
&
edge
,
const
RoutableEdge
*
first_processed_edge
,
size_t
first_processed_distance
)
{
assert
(
first_processed_edge
!=
NULL
,
"invariant"
);
assert
(
!
first_processed_edge
->
processed
(),
"invariant"
);
// this implies that the first "processed" edge is the leak context relative "leaf"
assert
(
first_processed_distance
+
1
==
leak_context
,
"invariant"
);
const
size_t
distance_to_root
=
edge
.
distance_to_root
();
if
(
distance_to_root
+
1
<=
max_ref_chain_depth
)
{
// complete chain can be accommodated without constructing a skip edge
return
;
}
install_logical_route
(
first_processed_edge
,
distance_to_root
+
1
-
first_processed_distance
-
root_context
);
first_processed_edge
->
logical_parent
()
->
set_skip_length
(
1
);
// sentinel
DEBUG_ONLY
(
validate_new_skip_edge
(
first_processed_edge
,
NULL
,
0
);)
}
static
const
RoutableEdge
*
processed_edge
(
const
RoutableEdge
&
edge
,
size_t
&
distance
)
{
assert
(
distance
==
0
,
"invariant"
);
const
RoutableEdge
*
current
=
&
edge
;
while
(
current
!=
NULL
&&
distance
<
leak_context
-
1
)
{
if
(
current
->
processed
())
{
return
current
;
}
current
=
current
->
physical_parent
();
++
distance
;
parent
=
parent
->
parent
();
}
assert
(
distance
<=
leak_context
-
1
,
"invariant"
);
return
current
;
}
/*
* Some vocabulary:
* -----------
* "Context" is an interval in the chain, it is associcated with an edge and it signifies a number of connected edges.
* "Processed / written" means an edge that has already been serialized.
* "Skip edge" is an edge that contains additional information for logical routing purposes.
* "Skip target" is an edge used as a destination for a skip edge
*/
void
EdgeUtils
::
collapse_chain
(
const
RoutableEdge
&
edge
)
{
assert
(
is_leak_edge
(
edge
),
"invariant"
);
// attempt to locate an already processed edge inside current leak context (if any)
size_t
first_processed_distance
=
0
;
const
RoutableEdge
*
const
first_processed_edge
=
processed_edge
(
edge
,
first_processed_distance
);
if
(
first_processed_edge
==
NULL
)
{
return
;
}
if
(
first_processed_edge
->
processed
())
{
collapse_overlapping_chain
(
edge
,
first_processed_edge
,
first_processed_distance
);
}
else
{
collapse_non_overlapping_chain
(
edge
,
first_processed_edge
,
first_processed_distance
);
}
assert
(
edge
.
logical_distance_to_root
()
+
1
<=
max_ref_chain_depth
,
"invariant"
);
}
src/share/vm/jfr/leakprofiler/chains/edgeUtils.hpp
浏览文件 @
2a8d5f43
...
...
@@ -28,15 +28,17 @@
#include "memory/allocation.hpp"
class
Edge
;
class
RoutableEdge
;
class
Symbol
;
class
EdgeUtils
:
public
AllStatic
{
public:
static
bool
is_leak_edge
(
const
Edge
&
edge
);
static
const
size_t
leak_context
=
100
;
static
const
size_t
root_context
=
100
;
static
const
size_t
max_ref_chain_depth
=
leak_context
+
root_context
;
static
bool
is_leak_edge
(
const
Edge
&
edge
);
static
const
Edge
*
root
(
const
Edge
&
edge
);
static
bool
is_root
(
const
Edge
&
edg
e
);
static
const
Edge
*
ancestor
(
const
Edge
&
edge
,
size_t
distanc
e
);
static
bool
is_array_element
(
const
Edge
&
edge
);
static
int
array_index
(
const
Edge
&
edge
);
...
...
@@ -44,8 +46,6 @@ class EdgeUtils : public AllStatic {
static
const
Symbol
*
field_name_symbol
(
const
Edge
&
edge
);
static
jshort
field_modifiers
(
const
Edge
&
edge
);
static
void
collapse_chain
(
const
RoutableEdge
&
edge
);
};
#endif // SHARE_VM_LEAKPROFILER_CHAINS_EDGEUTILS_HPP
src/share/vm/jfr/leakprofiler/
emitEvent
Operation.cpp
→
src/share/vm/jfr/leakprofiler/
chains/pathToGcRoots
Operation.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 201
4, 2018
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -21,33 +21,35 @@
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/emitEventOperation.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/support/jfrThreadId.hpp"
#include "memory/resourceArea.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "memory/universe.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
PathToGcRootsOperation
::
PathToGcRootsOperation
(
ObjectSampler
*
sampler
,
EdgeStore
*
edge_store
,
int64_t
cutoff
,
bool
emit_all
)
:
_sampler
(
sampler
),
_edge_store
(
edge_store
),
_cutoff_ticks
(
cutoff
),
_emit_all
(
emit_all
)
{}
/* The EdgeQueue is backed by directly managed virtual memory.
* We will attempt to dimension an initial reservation
* in proportion to the size of the heap (represented by heap_region).
...
...
@@ -76,36 +78,8 @@ static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
}
}
void
EmitEventOperation
::
doit
()
{
assert
(
LeakProfiler
::
is_running
(),
"invariant"
);
_object_sampler
=
LeakProfiler
::
object_sampler
();
assert
(
_object_sampler
!=
NULL
,
"invariant"
);
_vm_thread
=
VMThread
::
vm_thread
();
assert
(
_vm_thread
==
Thread
::
current
(),
"invariant"
);
_vm_thread_local
=
_vm_thread
->
jfr_thread_local
();
assert
(
_vm_thread_local
!=
NULL
,
"invariant"
);
assert
(
_vm_thread
->
jfr_thread_local
()
->
thread_id
()
==
JFR_THREAD_ID
(
_vm_thread
),
"invariant"
);
// The VM_Operation::evaluate() which invoked doit()
// contains a top level ResourceMark
// save the original markWord for the potential leak objects
// to be restored on function exit
ObjectSampleMarker
marker
;
if
(
ObjectSampleCheckpoint
::
mark
(
marker
,
_emit_all
)
==
0
)
{
return
;
}
EdgeStore
edge_store
;
GranularTimer
::
start
(
_cutoff_ticks
,
1000000
);
if
(
_cutoff_ticks
<=
0
)
{
// no chains
write_events
(
&
edge_store
);
return
;
}
void
PathToGcRootsOperation
::
doit
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
assert
(
_cutoff_ticks
>
0
,
"invariant"
);
// The bitset used for marking is dimensioned as a function of the heap size
...
...
@@ -121,115 +95,37 @@ void EmitEventOperation::doit() {
// As a fallback on failure, just write out the existing samples, flat, without chains.
if
(
!
(
mark_bits
.
initialize
()
&&
edge_queue
.
initialize
()))
{
if
(
LogJFR
)
tty
->
print_cr
(
"Unable to allocate memory for root chain processing"
);
write_events
(
&
edge_store
);
return
;
}
// necessary condition for attempting a root set iteration
// Save the original markWord for the potential leak objects,
// to be restored on function exit
ObjectSampleMarker
marker
;
if
(
ObjectSampleCheckpoint
::
mark
(
_sampler
,
marker
,
_emit_all
)
==
0
)
{
// no valid samples to process
return
;
}
// Necessary condition for attempting a root set iteration
Universe
::
heap
()
->
ensure_parsability
(
false
);
RootSetClosure
::
add_to_queue
(
&
edge_queue
);
BFSClosure
bfs
(
&
edge_queue
,
_edge_store
,
&
mark_bits
);
RootSetClosure
<
BFSClosure
>
roots
(
&
bfs
);
GranularTimer
::
start
(
_cutoff_ticks
,
1000000
);
roots
.
process
();
if
(
edge_queue
.
is_full
())
{
// Pathological case where roots don't fit in queue
// Do a depth-first search, but mark roots first
// to avoid walking sideways over roots
DFSClosure
::
find_leaks_from_root_set
(
&
edge_store
,
&
mark_bits
);
DFSClosure
::
find_leaks_from_root_set
(
_
edge_store
,
&
mark_bits
);
}
else
{
BFSClosure
bfs
(
&
edge_queue
,
&
edge_store
,
&
mark_bits
);
bfs
.
process
();
}
GranularTimer
::
stop
();
write_events
(
&
edge_store
);
log_edge_queue_summary
(
edge_queue
);
}
int
EmitEventOperation
::
write_events
(
EdgeStore
*
edge_store
)
{
assert
(
_object_sampler
!=
NULL
,
"invariant"
);
assert
(
edge_store
!=
NULL
,
"invariant"
);
assert
(
_vm_thread
!=
NULL
,
"invariant"
);
assert
(
_vm_thread_local
!=
NULL
,
"invariant"
);
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
// save thread id in preparation for thread local trace data manipulations
const
traceid
vmthread_id
=
_vm_thread_local
->
thread_id
();
assert
(
_vm_thread_local
->
thread_id
()
==
JFR_THREAD_ID
(
_vm_thread
),
"invariant"
);
const
jlong
last_sweep
=
_emit_all
?
max_jlong
:
_object_sampler
->
last_sweep
().
value
();
int
count
=
0
;
const
ObjectSample
*
current
=
_object_sampler
->
first
();
while
(
current
!=
NULL
)
{
ObjectSample
*
prev
=
current
->
prev
();
if
(
current
->
is_alive_and_older_than
(
last_sweep
))
{
write_event
(
current
,
edge_store
);
++
count
;
}
current
=
prev
;
}
// restore thread local stack trace and thread id
_vm_thread_local
->
set_thread_id
(
vmthread_id
);
_vm_thread_local
->
clear_cached_stack_trace
();
assert
(
_vm_thread_local
->
thread_id
()
==
JFR_THREAD_ID
(
_vm_thread
),
"invariant"
);
if
(
count
>
0
)
{
// serialize assoicated checkpoints
ObjectSampleCheckpoint
::
write
(
edge_store
,
_emit_all
,
_vm_thread
);
}
return
count
;
}
static
int
array_size
(
const
oop
object
)
{
assert
(
object
!=
NULL
,
"invariant"
);
if
(
object
->
is_array
())
{
return
arrayOop
(
object
)
->
length
();
}
return
min_jint
;
}
void
EmitEventOperation
::
write_event
(
const
ObjectSample
*
sample
,
EdgeStore
*
edge_store
)
{
assert
(
sample
!=
NULL
,
"invariant"
);
assert
(
!
sample
->
is_dead
(),
"invariant"
);
assert
(
edge_store
!=
NULL
,
"invariant"
);
assert
(
_vm_thread_local
!=
NULL
,
"invariant"
);
const
oop
*
object_addr
=
sample
->
object_addr
();
assert
(
*
object_addr
!=
NULL
,
"invariant"
);
const
Edge
*
edge
=
(
const
Edge
*
)(
*
object_addr
)
->
mark
();
traceid
gc_root_id
=
0
;
if
(
edge
==
NULL
)
{
// In order to dump out a representation of the event
// even though it was not reachable / too long to reach,
// we need to register a top level edge for this object
Edge
e
(
NULL
,
object_addr
);
edge_store
->
add_chain
(
&
e
,
1
);
edge
=
(
const
Edge
*
)(
*
object_addr
)
->
mark
();
}
else
{
gc_root_id
=
edge_store
->
get_root_id
(
edge
);
}
assert
(
edge
!=
NULL
,
"invariant"
);
assert
(
edge
->
pointee
()
==
*
object_addr
,
"invariant"
);
const
traceid
object_id
=
edge_store
->
get_id
(
edge
);
assert
(
object_id
!=
0
,
"invariant"
);
EventOldObjectSample
e
(
UNTIMED
);
e
.
set_starttime
(
GranularTimer
::
start_time
());
e
.
set_endtime
(
GranularTimer
::
end_time
());
e
.
set_allocationTime
(
sample
->
allocation_time
());
e
.
set_lastKnownHeapUsage
(
sample
->
heap_used_at_last_gc
());
e
.
set_object
(
object_id
);
e
.
set_arrayElements
(
array_size
(
*
object_addr
));
e
.
set_root
(
gc_root_id
);
// Temporarily assigning both the stack trace id and thread id
// onto the thread local data structure of the VMThread (for the duration
// of the commit() call). This trick provides a means to override
// the event generation mechanism by injecting externally provided id's.
// Here, in particular, this allows us to emit an old object event
// supplying information from where the actual sampling occurred.
_vm_thread_local
->
set_cached_stack_trace_id
(
sample
->
stack_trace_id
());
assert
(
sample
->
has_thread
(),
"invariant"
);
_vm_thread_local
->
set_thread_id
(
sample
->
thread_id
());
e
.
commit
();
// Emit old objects including their reference chains as events
EventEmitter
emitter
(
GranularTimer
::
start_time
(),
GranularTimer
::
end_time
());
emitter
.
write_events
(
_sampler
,
_edge_store
,
_emit_all
);
}
src/share/vm/jfr/leakprofiler/chains/pathToGcRootsOperation.hpp
0 → 100644
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
class
EdgeStore
;
class
ObjectSampler
;
// Safepoint operation for finding paths to gc roots
class
PathToGcRootsOperation
:
public
OldObjectVMOperation
{
private:
ObjectSampler
*
_sampler
;
EdgeStore
*
const
_edge_store
;
const
int64_t
_cutoff_ticks
;
const
bool
_emit_all
;
public:
PathToGcRootsOperation
(
ObjectSampler
*
sampler
,
EdgeStore
*
edge_store
,
int64_t
cutoff
,
bool
emit_all
);
virtual
void
doit
();
};
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_PATHTOGCROOTSOPERATION_HPP
src/share/vm/jfr/leakprofiler/chains/rootSetClosure.cpp
浏览文件 @
2a8d5f43
...
...
@@ -25,11 +25,14 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/systemDictionary.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/utilities/saveRestore.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/synchronizer.hpp"
...
...
@@ -37,11 +40,11 @@
#include "services/management.hpp"
#include "utilities/align.hpp"
RootSetClosure
::
RootSetClosure
(
EdgeQueue
*
edge_queue
)
:
_edge_queue
(
edge_queue
)
{
}
template
<
typename
Delegate
>
RootSetClosure
<
Delegate
>::
RootSetClosure
(
Delegate
*
delegate
)
:
_delegate
(
delegate
)
{}
void
RootSetClosure
::
do_oop
(
oop
*
ref
)
{
template
<
typename
Delegate
>
void
RootSetClosure
<
Delegate
>::
do_oop
(
oop
*
ref
)
{
assert
(
ref
!=
NULL
,
"invariant"
);
// We discard unaligned root references because
// our reference tagging scheme will use
...
...
@@ -55,48 +58,38 @@ void RootSetClosure::do_oop(oop* ref) {
}
assert
(
is_aligned
(
ref
,
HeapWordSize
),
"invariant"
);
const
oop
pointee
=
*
ref
;
if
(
pointee
!=
NULL
)
{
closure_impl
(
ref
,
pointee
);
if
(
*
ref
!=
NULL
)
{
_delegate
->
do_root
(
ref
);
}
}
void
RootSetClosure
::
do_oop
(
narrowOop
*
ref
)
{
template
<
typename
Delegate
>
void
RootSetClosure
<
Delegate
>::
do_oop
(
narrowOop
*
ref
)
{
assert
(
ref
!=
NULL
,
"invariant"
);
assert
(
is_aligned
(
ref
,
sizeof
(
narrowOop
)),
"invariant"
);
const
oop
pointee
=
oopDesc
::
load_decode_heap_oop
(
ref
);
if
(
pointee
!=
NULL
)
{
closure_impl
(
UnifiedOop
::
encode
(
ref
),
pointee
);
_delegate
->
do_root
(
UnifiedOop
::
encode
(
ref
)
);
}
}
void
RootSetClosure
::
closure_impl
(
const
oop
*
reference
,
const
oop
pointee
)
{
if
(
!
_edge_queue
->
is_full
())
{
_edge_queue
->
add
(
NULL
,
reference
);
}
}
class
RootSetClosureMarkScope
:
public
MarkingCodeBlobClosure
::
MarkScope
{};
void
RootSetClosure
::
add_to_queue
(
EdgeQueue
*
edge_queue
)
{
RootSetClosure
rs
(
edge_queue
);
process_roots
(
&
rs
);
}
class
RootSetClosureMarkScope
:
public
MarkingCodeBlobClosure
::
MarkScope
{
};
void
RootSetClosure
::
process_roots
(
OopClosure
*
closure
)
{
SaveRestoreCLDClaimBits
save_restore_cld_claim_bits
;
template
<
typename
Delegate
>
void
RootSetClosure
<
Delegate
>::
process
()
{
RootSetClosureMarkScope
mark_scope
;
CLDToOopClosure
cldt_closure
(
closure
);
CLDToOopClosure
cldt_closure
(
this
);
ClassLoaderDataGraph
::
always_strong_cld_do
(
&
cldt_closure
);
CodeBlobToOopClosure
blobs
(
closure
,
false
);
Threads
::
oops_do
(
closure
,
NULL
,
&
blobs
);
// XXX set CLDClosure to NULL
ObjectSynchronizer
::
oops_do
(
closure
);
Universe
::
oops_do
(
closure
);
JNIHandles
::
oops_do
(
closure
);
JvmtiExport
::
oops_do
(
closure
);
SystemDictionary
::
oops_do
(
closure
);
Management
::
oops_do
(
closure
);
StringTable
::
oops_do
(
closure
);
CodeBlobToOopClosure
blobs
(
this
,
false
);
Threads
::
oops_do
(
this
,
NULL
,
&
blobs
);
// XXX set CLDClosure to NULL
ObjectSynchronizer
::
oops_do
(
this
);
Universe
::
oops_do
(
this
);
JNIHandles
::
oops_do
(
this
);
JvmtiExport
::
oops_do
(
this
);
SystemDictionary
::
oops_do
(
this
);
Management
::
oops_do
(
this
);
StringTable
::
oops_do
(
this
);
}
template
class
RootSetClosure
<
BFSClosure
>;
template
class
RootSetClosure
<
DFSClosure
>;
src/share/vm/jfr/leakprofiler/chains/rootSetClosure.hpp
浏览文件 @
2a8d5f43
...
...
@@ -26,18 +26,14 @@
#define SHARE_VM_JFR_LEAKPROFILER_CHAINS_ROOTSETCLOSURE_HPP
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class
EdgeQueue
;
template
<
typename
Delegate
>
class
RootSetClosure
:
public
ExtendedOopClosure
{
// BasicOopIterateClosure
private:
RootSetClosure
(
EdgeQueue
*
edge_queue
);
EdgeQueue
*
_edge_queue
;
void
closure_impl
(
const
oop
*
reference
,
const
oop
pointee
);
Delegate
*
const
_delegate
;
public:
static
void
add_to_queue
(
EdgeQueue
*
edge_queu
e
);
static
void
process_roots
(
OopClosure
*
closure
);
RootSetClosure
(
Delegate
*
delegat
e
);
void
process
(
);
virtual
void
do_oop
(
oop
*
reference
);
virtual
void
do_oop
(
narrowOop
*
reference
);
...
...
src/share/vm/jfr/leakprofiler/checkpoint/eventEmitter.cpp
0 → 100644
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/pathToGcRootsOperation.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
EventEmitter
::
EventEmitter
(
const
JfrTicks
&
start_time
,
const
JfrTicks
&
end_time
)
:
_start_time
(
start_time
),
_end_time
(
end_time
),
_thread
(
Thread
::
current
()),
_jfr_thread_local
(
_thread
->
jfr_thread_local
()),
_thread_id
(
_thread
->
jfr_thread_local
()
->
thread_id
())
{}
EventEmitter
::~
EventEmitter
()
{
// restore / reset thread local stack trace and thread id
_jfr_thread_local
->
set_thread_id
(
_thread_id
);
_jfr_thread_local
->
clear_cached_stack_trace
();
}
void
EventEmitter
::
emit
(
ObjectSampler
*
sampler
,
int64_t
cutoff_ticks
,
bool
emit_all
)
{
assert
(
sampler
!=
NULL
,
"invariant"
);
ResourceMark
rm
;
EdgeStore
edge_store
;
if
(
cutoff_ticks
<=
0
)
{
// no reference chains
JfrTicks
time_stamp
=
JfrTicks
::
now
();
EventEmitter
emitter
(
time_stamp
,
time_stamp
);
emitter
.
write_events
(
sampler
,
&
edge_store
,
emit_all
);
return
;
}
// events emitted with reference chains require a safepoint operation
PathToGcRootsOperation
op
(
sampler
,
&
edge_store
,
cutoff_ticks
,
emit_all
);
VMThread
::
execute
(
&
op
);
}
size_t
EventEmitter
::
write_events
(
ObjectSampler
*
object_sampler
,
EdgeStore
*
edge_store
,
bool
emit_all
)
{
assert
(
_thread
==
Thread
::
current
(),
"invariant"
);
assert
(
_thread
->
jfr_thread_local
()
==
_jfr_thread_local
,
"invariant"
);
assert
(
object_sampler
!=
NULL
,
"invariant"
);
assert
(
edge_store
!=
NULL
,
"invariant"
);
const
jlong
last_sweep
=
emit_all
?
max_jlong
:
object_sampler
->
last_sweep
().
value
();
size_t
count
=
0
;
const
ObjectSample
*
current
=
object_sampler
->
first
();
while
(
current
!=
NULL
)
{
ObjectSample
*
prev
=
current
->
prev
();
if
(
current
->
is_alive_and_older_than
(
last_sweep
))
{
write_event
(
current
,
edge_store
);
++
count
;
}
current
=
prev
;
}
if
(
count
>
0
)
{
// serialize associated checkpoints and potential chains
ObjectSampleCheckpoint
::
write
(
object_sampler
,
edge_store
,
emit_all
,
_thread
);
}
return
count
;
}
static
int
array_size
(
const
oop
object
)
{
assert
(
object
!=
NULL
,
"invariant"
);
if
(
object
->
is_array
())
{
return
arrayOop
(
object
)
->
length
();
}
return
min_jint
;
}
void
EventEmitter
::
write_event
(
const
ObjectSample
*
sample
,
EdgeStore
*
edge_store
)
{
assert
(
sample
!=
NULL
,
"invariant"
);
assert
(
!
sample
->
is_dead
(),
"invariant"
);
assert
(
edge_store
!=
NULL
,
"invariant"
);
assert
(
_jfr_thread_local
!=
NULL
,
"invariant"
);
const
oop
*
object_addr
=
sample
->
object_addr
();
traceid
gc_root_id
=
0
;
const
Edge
*
edge
=
NULL
;
if
(
SafepointSynchronize
::
is_at_safepoint
())
{
edge
=
(
const
Edge
*
)(
*
object_addr
)
->
mark
();
}
if
(
edge
==
NULL
)
{
// In order to dump out a representation of the event
// even though it was not reachable / too long to reach,
// we need to register a top level edge for this object.
edge
=
edge_store
->
put
(
object_addr
);
}
else
{
gc_root_id
=
edge_store
->
gc_root_id
(
edge
);
}
assert
(
edge
!=
NULL
,
"invariant"
);
const
traceid
object_id
=
edge_store
->
get_id
(
edge
);
assert
(
object_id
!=
0
,
"invariant"
);
EventOldObjectSample
e
(
UNTIMED
);
e
.
set_starttime
(
_start_time
);
e
.
set_endtime
(
_end_time
);
e
.
set_allocationTime
(
sample
->
allocation_time
());
e
.
set_lastKnownHeapUsage
(
sample
->
heap_used_at_last_gc
());
e
.
set_object
(
object_id
);
e
.
set_arrayElements
(
array_size
(
edge
->
pointee
()));
e
.
set_root
(
gc_root_id
);
// Temporarily assigning both the stack trace id and thread id
// onto the thread local data structure of the emitter thread (for the duration
// of the commit() call). This trick provides a means to override
// the event generation mechanism by injecting externally provided id's.
// At this particular location, it allows us to emit an old object event
// supplying information from where the actual sampling occurred.
_jfr_thread_local
->
set_cached_stack_trace_id
(
sample
->
stack_trace_id
());
assert
(
sample
->
has_thread
(),
"invariant"
);
_jfr_thread_local
->
set_thread_id
(
sample
->
thread_id
());
e
.
commit
();
}
src/share/vm/jfr/leakprofiler/
emitEventOperation
.hpp
→
src/share/vm/jfr/leakprofiler/
checkpoint/eventEmitter
.hpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 201
4, 2018
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -22,48 +22,37 @@
*
*/
#ifndef SHARE_
VM_LEAKPROFILER_EMITEVENTOPERATION
_HPP
#define SHARE_
VM_LEAKPROFILER_EMITEVENTOPERATION
_HPP
#ifndef SHARE_
JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER
_HPP
#define SHARE_
JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER
_HPP
#include "runtime/vm_operations.hpp"
#include "memory/allocation.hpp"
#include "jfr/utilities/jfrTime.hpp"
typedef
u8
traceid
;
class
BFSClosure
;
class
EdgeStore
;
class
EdgeQueue
;
class
JfrThreadData
;
class
JfrThreadLocal
;
class
ObjectSample
;
class
ObjectSampler
;
class
Thread
;
// Safepoint operation for emitting object sample events
class
EmitEventOperation
:
public
VM_Operation
{
class
EventEmitter
:
public
CHeapObj
<
mtTracing
>
{
friend
class
LeakProfiler
;
friend
class
PathToGcRootsOperation
;
private:
jlong
_cutoff_ticks
;
bool
_emit_all
;
VMThread
*
_vm_thread
;
JfrThreadLocal
*
_vm_thread_local
;
ObjectSampler
*
_object_sampler
;
void
write_event
(
const
ObjectSample
*
sample
,
EdgeStore
*
edge_store
);
int
write_events
(
EdgeStore
*
edge_store
);
const
JfrTicks
&
_start_time
;
const
JfrTicks
&
_end_time
;
Thread
*
_thread
;
JfrThreadLocal
*
_jfr_thread_local
;
traceid
_thread_id
;
public:
EmitEventOperation
(
jlong
cutoff_ticks
,
bool
emit_all
)
:
_cutoff_ticks
(
cutoff_ticks
),
_emit_all
(
emit_all
),
_vm_thread
(
NULL
),
_vm_thread_local
(
NULL
),
_object_sampler
(
NULL
)
{
}
EventEmitter
(
const
JfrTicks
&
start_time
,
const
JfrTicks
&
end_time
);
~
EventEmitter
();
VMOp_Type
type
()
const
{
return
VMOp_GC_HeapInspection
;
}
Mode
evaluation_mode
()
const
{
return
_safepoint
;
}
void
write_event
(
const
ObjectSample
*
sample
,
EdgeStore
*
edge_store
);
size_t
write_events
(
ObjectSampler
*
sampler
,
EdgeStore
*
store
,
bool
emit_all
);
virtual
void
doit
(
);
static
void
emit
(
ObjectSampler
*
sampler
,
int64_t
cutoff_ticks
,
bool
emit_all
);
};
#endif // SHARE_
VM_LEAKPROFILER_EMITEVENTOPERATION
_HPP
#endif // SHARE_
JFR_LEAKPROFILER_CHECKPOINT_EVENTEMITTER
_HPP
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp
浏览文件 @
2a8d5f43
...
...
@@ -181,102 +181,89 @@ class SampleMark {
}
};
void
ObjectSampleCheckpoint
::
install
(
JfrCheckpointWriter
&
writer
,
bool
class_unload
,
bool
resume
)
{
assert
(
class_unload
?
SafepointSynchronize
::
is_at_safepoint
()
:
LeakProfiler
::
is_suspended
(),
"invariant"
);
void
ObjectSampleCheckpoint
::
install
(
JfrCheckpointWriter
&
writer
,
bool
class_unload
,
bool
type_set
)
{
if
(
!
writer
.
has_data
())
{
if
(
!
class_unload
)
{
LeakProfiler
::
resume
();
}
assert
(
LeakProfiler
::
is_running
(),
"invariant"
);
return
;
}
assert
(
writer
.
has_data
(),
"invariant"
);
const
JfrCheckpointBlobHandle
h_cp
=
writer
.
checkpoint_blob
();
CheckpointInstall
install
(
h_cp
);
const
ObjectSampler
*
const
object_sampler
=
LeakProfiler
::
object_sampler
();
// Class unload implies a safepoint.
// Not class unload implies the object sampler is locked, because it was claimed exclusively earlier.
// Therefore: direct access the object sampler instance is safe.
ObjectSampler
*
const
object_sampler
=
ObjectSampler
::
sampler
();
assert
(
object_sampler
!=
NULL
,
"invariant"
);
ObjectSample
*
const
last
=
const_cast
<
ObjectSample
*>
(
object_sampler
->
last
());
const
ObjectSample
*
const
last_resolved
=
object_sampler
->
last_resolved
();
CheckpointInstall
install
(
h_cp
);
if
(
class_unload
)
{
if
(
last
!=
NULL
)
{
// all samples need the class unload information
do_samples
(
last
,
NULL
,
install
);
}
assert
(
LeakProfiler
::
is_running
(),
"invariant"
);
return
;
}
// only new samples since last resolved checkpoint
// install only to new samples since last resolved checkpoint
if
(
last
!=
last_resolved
)
{
do_samples
(
last
,
last_resolved
,
install
);
if
(
resume
)
{
const_cast
<
ObjectSampler
*>
(
object_sampler
)
->
set_last_resolved
(
last
);
if
(
class_unload
)
{
return
;
}
if
(
type_set
)
{
object_sampler
->
set_last_resolved
(
last
);
}
}
assert
(
LeakProfiler
::
is_suspended
(),
"invariant"
);
if
(
resume
)
{
LeakProfiler
::
resume
();
assert
(
LeakProfiler
::
is_running
(),
"invariant"
);
}
}
void
ObjectSampleCheckpoint
::
write
(
const
EdgeStore
*
edge_store
,
bool
emit_all
,
Thread
*
thread
)
{
void
ObjectSampleCheckpoint
::
write
(
ObjectSampler
*
sampler
,
EdgeStore
*
edge_store
,
bool
emit_all
,
Thread
*
thread
)
{
assert
(
sampler
!=
NULL
,
"invariant"
);
assert
(
edge_store
!=
NULL
,
"invariant"
);
assert
(
thread
!=
NULL
,
"invariant"
);
static
bool
types_registered
=
false
;
if
(
!
types_registered
)
{
JfrSerializer
::
register_serializer
(
TYPE_OLDOBJECTROOTSYSTEM
,
false
,
true
,
new
RootSystemType
());
JfrSerializer
::
register_serializer
(
TYPE_OLDOBJECTROOTTYPE
,
false
,
true
,
new
RootType
());
types_registered
=
true
;
}
const
ObjectSampler
*
const
object_sampler
=
LeakProfiler
::
object_sampler
();
assert
(
object_sampler
!=
NULL
,
"invariant"
);
const
jlong
last_sweep
=
emit_all
?
max_jlong
:
object_sampler
->
last_sweep
().
value
();
ObjectSample
*
const
last
=
const_cast
<
ObjectSample
*>
(
object_sampler
->
last
());
const
jlong
last_sweep
=
emit_all
?
max_jlong
:
sampler
->
last_sweep
().
value
();
ObjectSample
*
const
last
=
const_cast
<
ObjectSample
*>
(
sampler
->
last
());
{
JfrCheckpointWriter
writer
(
false
,
false
,
thread
);
CheckpointWrite
checkpoint_write
(
writer
,
last_sweep
);
do_samples
(
last
,
NULL
,
checkpoint_write
);
}
CheckpointStateReset
state_reset
(
last_sweep
);
do_samples
(
last
,
NULL
,
state_reset
);
if
(
!
edge_store
->
is_empty
())
{
// java object and chain representations
JfrCheckpointWriter
writer
(
false
,
true
,
thread
);
ObjectSampleWriter
osw
(
writer
,
edge_store
);
edge_store
->
iterate
_edges
(
osw
);
edge_store
->
iterate
(
osw
);
}
}
WriteObjectSampleStacktrace
::
WriteObjectSampleStacktrace
(
JfrStackTraceRepository
&
repo
)
:
_stack_trace_repo
(
repo
)
{
int
ObjectSampleCheckpoint
::
mark
(
ObjectSampler
*
object_sampler
,
ObjectSampleMarker
&
marker
,
bool
emit_all
)
{
assert
(
object_sampler
!=
NULL
,
"invariant"
);
ObjectSample
*
const
last
=
const_cast
<
ObjectSample
*>
(
object_sampler
->
last
());
if
(
last
==
NULL
)
{
return
0
;
}
const
jlong
last_sweep
=
emit_all
?
max_jlong
:
object_sampler
->
last_sweep
().
value
();
SampleMark
mark
(
marker
,
last_sweep
);
do_samples
(
last
,
NULL
,
mark
);
return
mark
.
count
();
}
bool
WriteObjectSampleStacktrace
::
process
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
if
(
!
LeakProfiler
::
is_running
())
{
return
true
;
}
// Suspend the LeakProfiler subsystem
// to ensure stable samples even
// after we return from the safepoint.
LeakProfiler
::
suspend
();
assert
(
!
LeakProfiler
::
is_running
(),
"invariant"
);
assert
(
LeakProfiler
::
is_suspended
(),
"invariant"
);
WriteObjectSampleStacktrace
::
WriteObjectSampleStacktrace
(
ObjectSampler
*
sampler
,
JfrStackTraceRepository
&
repo
)
:
_sampler
(
sampler
),
_stack_trace_repo
(
repo
)
{}
const
ObjectSampler
*
object_sampler
=
LeakProfiler
::
object_sampler
();
assert
(
object_sampler
!=
NULL
,
"invariant"
);
assert
(
LeakProfiler
::
is_suspended
()
,
"invariant"
);
bool
WriteObjectSampleStacktrace
::
process
()
{
assert
(
LeakProfiler
::
is_running
()
,
"invariant"
);
assert
(
_sampler
!=
NULL
,
"invariant"
);
ObjectSample
*
const
last
=
const_cast
<
ObjectSample
*>
(
object
_sampler
->
last
());
const
ObjectSample
*
const
last_resolved
=
object
_sampler
->
last_resolved
();
ObjectSample
*
const
last
=
const_cast
<
ObjectSample
*>
(
_sampler
->
last
());
const
ObjectSample
*
const
last_resolved
=
_sampler
->
last_resolved
();
if
(
last
==
last_resolved
)
{
assert
(
LeakProfiler
::
is_suspended
(),
"invariant"
);
return
true
;
}
...
...
@@ -294,27 +281,13 @@ bool WriteObjectSampleStacktrace::process() {
}
if
(
count
==
0
)
{
writer
.
set_context
(
ctx
);
assert
(
LeakProfiler
::
is_suspended
(),
"invariant"
);
return
true
;
}
assert
(
count
>
0
,
"invariant"
);
writer
.
write_count
((
u4
)
count
,
count_offset
);
JfrStackTraceRepository
::
write_metadata
(
writer
);
// install the stacktrace checkpoint information to the candidates
ObjectSampleCheckpoint
::
install
(
writer
,
false
,
false
);
assert
(
LeakProfiler
::
is_suspended
(),
"invariant"
);
return
true
;
}
int
ObjectSampleCheckpoint
::
mark
(
ObjectSampleMarker
&
marker
,
bool
emit_all
)
{
const
ObjectSampler
*
object_sampler
=
LeakProfiler
::
object_sampler
();
assert
(
object_sampler
!=
NULL
,
"invariant"
);
ObjectSample
*
const
last
=
const_cast
<
ObjectSample
*>
(
object_sampler
->
last
());
if
(
last
==
NULL
)
{
return
0
;
}
const
jlong
last_sweep
=
emit_all
?
max_jlong
:
object_sampler
->
last_sweep
().
value
();
SampleMark
mark
(
marker
,
last_sweep
);
do_samples
(
last
,
NULL
,
mark
);
return
mark
.
count
();
}
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp
浏览文件 @
2a8d5f43
...
...
@@ -26,25 +26,26 @@
#define SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
#include "memory/allocation.hpp"
#include "utilities/exceptions.hpp"
class
EdgeStore
;
class
JfrStackTraceRepository
;
class
JfrCheckpointWriter
;
class
JfrStackTraceRepository
;
class
ObjectSampleMarker
;
class
ObjectSampler
;
class
ObjectSampleCheckpoint
:
AllStatic
{
public:
static
void
install
(
JfrCheckpointWriter
&
writer
,
bool
class_unload
,
bool
resume
);
static
void
write
(
const
EdgeStore
*
edge_store
,
bool
emit_all
,
Thread
*
thread
);
static
int
mark
(
ObjectSampleMarker
&
marker
,
bool
emit_all
);
static
void
install
(
JfrCheckpointWriter
&
writer
,
bool
class_unload
,
bool
type_set
);
static
void
write
(
ObjectSampler
*
sampler
,
EdgeStore
*
edge_store
,
bool
emit_all
,
Thread
*
thread
);
static
int
mark
(
ObjectSample
r
*
sampler
,
ObjectSample
Marker
&
marker
,
bool
emit_all
);
};
class
WriteObjectSampleStacktrace
:
public
StackObj
{
private:
ObjectSampler
*
const
_sampler
;
JfrStackTraceRepository
&
_stack_trace_repo
;
public:
WriteObjectSampleStacktrace
(
JfrStackTraceRepository
&
repo
);
WriteObjectSampleStacktrace
(
ObjectSampler
*
sampler
,
JfrStackTraceRepository
&
repo
);
bool
process
();
};
...
...
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2014, 201
8
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -350,7 +350,7 @@ int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet*
return
1
;
}
static
traceid
get_root_description_info_id
(
const
Edge
&
edge
,
traceid
id
)
{
static
traceid
get_
gc_
root_description_info_id
(
const
Edge
&
edge
,
traceid
id
)
{
assert
(
edge
.
is_root
(),
"invariant"
);
if
(
EdgeUtils
::
is_leak_edge
(
edge
))
{
return
0
;
...
...
@@ -518,7 +518,7 @@ static void write_root_descriptors(JfrCheckpointWriter& writer) {
}
}
static
void
add_old_object_sample_info
(
const
Edge
*
current
,
traceid
id
)
{
static
void
add_old_object_sample_info
(
const
Stored
Edge
*
current
,
traceid
id
)
{
assert
(
current
!=
NULL
,
"invariant"
);
if
(
sample_infos
==
NULL
)
{
sample_infos
=
new
SampleInfo
();
...
...
@@ -528,11 +528,11 @@ static void add_old_object_sample_info(const Edge* current, traceid id) {
assert
(
oosi
!=
NULL
,
"invariant"
);
oosi
->
_id
=
id
;
oosi
->
_data
.
_object
=
current
->
pointee
();
oosi
->
_data
.
_reference_id
=
current
->
is_root
()
?
(
traceid
)
0
:
id
;
oosi
->
_data
.
_reference_id
=
current
->
parent
()
==
NULL
?
(
traceid
)
0
:
id
;
sample_infos
->
store
(
oosi
);
}
static
void
add_reference_info
(
const
Routable
Edge
*
current
,
traceid
id
,
traceid
parent_id
)
{
static
void
add_reference_info
(
const
Stored
Edge
*
current
,
traceid
id
,
traceid
parent_id
)
{
assert
(
current
!=
NULL
,
"invariant"
);
if
(
ref_infos
==
NULL
)
{
ref_infos
=
new
RefInfo
();
...
...
@@ -544,37 +544,43 @@ static void add_reference_info(const RoutableEdge* current, traceid id, traceid
ri
->
_id
=
id
;
ri
->
_data
.
_array_info_id
=
!
current
->
is_skip_edge
()
?
get_array_info_id
(
*
current
,
id
)
:
0
;
ri
->
_data
.
_field_info_id
=
ri
->
_data
.
_array_info_id
==
0
&&
!
current
->
is_skip_edge
()
?
get_field_info_id
(
*
current
)
:
(
traceid
)
0
;
ri
->
_data
.
_field_info_id
=
ri
->
_data
.
_array_info_id
==
0
&&
!
current
->
is_skip_edge
()
?
get_field_info_id
(
*
current
)
:
(
traceid
)
0
;
ri
->
_data
.
_old_object_sample_id
=
parent_id
;
ri
->
_data
.
_skip
=
current
->
skip_length
();
ref_infos
->
store
(
ri
);
}
static
traceid
add_root_info
(
const
Edge
*
root
,
traceid
id
)
{
static
bool
is_gc_root
(
const
StoredEdge
*
current
)
{
assert
(
current
!=
NULL
,
"invariant"
);
return
current
->
parent
()
==
NULL
&&
current
->
gc_root_id
()
!=
0
;
}
static
traceid
add_gc_root_info
(
const
StoredEdge
*
root
,
traceid
id
)
{
assert
(
root
!=
NULL
,
"invariant"
);
assert
(
root
->
is_root
(
),
"invariant"
);
return
get_root_description_info_id
(
*
root
,
id
);
assert
(
is_gc_root
(
root
),
"invariant"
);
return
get_
gc_
root_description_info_id
(
*
root
,
id
);
}
void
ObjectSampleWriter
::
write
(
const
Routable
Edge
*
edge
)
{
void
ObjectSampleWriter
::
write
(
const
Stored
Edge
*
edge
)
{
assert
(
edge
!=
NULL
,
"invariant"
);
const
traceid
id
=
_store
->
get_id
(
edge
);
add_old_object_sample_info
(
edge
,
id
);
const
RoutableEdge
*
parent
=
edge
->
logical_
parent
();
const
StoredEdge
*
const
parent
=
edge
->
parent
();
if
(
parent
!=
NULL
)
{
add_reference_info
(
edge
,
id
,
_store
->
get_id
(
parent
));
}
else
{
assert
(
edge
->
is_root
(),
"invariant"
);
add_root_info
(
edge
,
id
);
if
(
is_gc_root
(
edge
))
{
assert
(
edge
->
gc_root_id
()
==
id
,
"invariant"
);
add_gc_root_info
(
edge
,
id
);
}
}
}
ObjectSampleWriter
::
ObjectSampleWriter
(
JfrCheckpointWriter
&
writer
,
const
EdgeStore
*
store
)
:
ObjectSampleWriter
::
ObjectSampleWriter
(
JfrCheckpointWriter
&
writer
,
EdgeStore
*
store
)
:
_writer
(
writer
),
_store
(
store
)
{
assert
(
store
!=
NULL
,
"invariant"
);
assert
(
store
->
number_of_entries
()
>
0
,
"invariant"
);
assert
(
!
store
->
is_empty
()
,
"invariant"
);
sample_infos
=
NULL
;
ref_infos
=
NULL
;
array_infos
=
NULL
;
...
...
@@ -590,26 +596,7 @@ ObjectSampleWriter::~ObjectSampleWriter() {
write_root_descriptors
(
_writer
);
}
void
ObjectSampleWriter
::
write_chain
(
const
RoutableEdge
&
edge
)
{
assert
(
EdgeUtils
::
is_leak_edge
(
edge
),
"invariant"
);
if
(
edge
.
processed
())
{
return
;
}
EdgeUtils
::
collapse_chain
(
edge
);
const
RoutableEdge
*
current
=
&
edge
;
while
(
current
!=
NULL
)
{
if
(
current
->
processed
())
{
return
;
}
write
(
current
);
current
->
set_processed
();
current
=
current
->
logical_parent
();
}
}
bool
ObjectSampleWriter
::
operator
()(
const
RoutableEdge
&
edge
)
{
if
(
EdgeUtils
::
is_leak_edge
(
edge
))
{
write_chain
(
edge
);
}
bool
ObjectSampleWriter
::
operator
()(
StoredEdge
&
e
)
{
write
(
&
e
);
return
true
;
}
src/share/vm/jfr/leakprofiler/checkpoint/objectSampleWriter.hpp
浏览文件 @
2a8d5f43
...
...
@@ -30,21 +30,17 @@
class
Edge
;
class
EdgeStore
;
class
JfrCheckpointWriter
;
class
Routable
Edge
;
class
Stored
Edge
;
class
ObjectSampleWriter
:
public
StackObj
{
private:
JfrCheckpointWriter
&
_writer
;
const
EdgeStore
*
const
_store
;
void
write
(
const
RoutableEdge
*
edge
);
void
write_chain
(
const
RoutableEdge
&
edge
);
EdgeStore
*
const
_store
;
void
write
(
const
StoredEdge
*
edge
);
public:
ObjectSampleWriter
(
JfrCheckpointWriter
&
writer
,
const
EdgeStore
*
store
);
ObjectSampleWriter
(
JfrCheckpointWriter
&
writer
,
EdgeStore
*
store
);
~
ObjectSampleWriter
();
bool
operator
()(
const
RoutableEdge
&
edge
);
bool
operator
()(
StoredEdge
&
edge
);
};
#endif // SHARE_VM_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLEWRITER_HPP
src/share/vm/jfr/leakprofiler/checkpoint/rootResolver.hpp
浏览文件 @
2a8d5f43
...
...
@@ -25,8 +25,8 @@
#ifndef SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
#define SHARE_VM_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
#include "memory/allocation.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
struct
RootCallbackInfo
{
...
...
src/share/vm/jfr/leakprofiler/leakProfiler.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2014, 201
8
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -23,68 +23,80 @@
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/emitEventOperation.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/startOperation.hpp"
#include "jfr/leakprofiler/stopOperation.hpp"
#include "jfr/leakprofiler/checkpoint/eventEmitter.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/ostream.hpp"
// Only to be updated during safepoint
ObjectSampler
*
LeakProfiler
::
_object_sampler
=
NULL
;
bool
LeakProfiler
::
is_running
()
{
return
ObjectSampler
::
is_created
();
}
static
volatile
jbyte
suspended
=
0
;
bool
LeakProfiler
::
start
(
jint
sample_count
)
{
if
(
_object_sampler
!=
NULL
)
{
// already started
bool
LeakProfiler
::
start
(
int
sample_count
)
{
if
(
is_running
())
{
return
true
;
}
// Allows user to disable leak profiler on command line by setting queue size to zero.
if
(
sample_count
>
0
)
{
StartOperation
op
(
sample_count
);
VMThread
::
execute
(
&
op
);
return
_object_sampler
!=
NULL
;
if
(
sample_count
==
0
)
{
return
false
;
}
assert
(
!
is_running
(),
"invariant"
);
assert
(
sample_count
>
0
,
"invariant"
);
// schedule the safepoint operation for installing the object sampler
StartOperation
op
(
sample_count
);
VMThread
::
execute
(
&
op
);
if
(
!
is_running
())
{
if
(
LogJFR
&&
Verbose
)
tty
->
print_cr
(
"Object sampling could not be started because the sampler could not be allocated"
);
return
false
;
}
return
false
;
assert
(
is_running
(),
"invariant"
);
if
(
LogJFR
&&
Verbose
)
tty
->
print_cr
(
"Object sampling started"
);
return
true
;
}
bool
LeakProfiler
::
stop
()
{
if
(
_object_sampler
==
NULL
)
{
// already stopped/not started
return
true
;
if
(
!
is_running
())
{
return
false
;
}
// schedule the safepoint operation for uninstalling and destroying the object sampler
StopOperation
op
;
VMThread
::
execute
(
&
op
);
return
_object_sampler
==
NULL
;
assert
(
!
is_running
(),
"invariant"
);
if
(
LogJFR
&&
Verbose
)
tty
->
print_cr
(
"Object sampling stopped"
);
return
true
;
}
void
LeakProfiler
::
emit_events
(
jlong
cutoff_ticks
,
bool
emit_all
)
{
void
LeakProfiler
::
emit_events
(
int64_t
cutoff_ticks
,
bool
emit_all
)
{
if
(
!
is_running
())
{
return
;
}
EmitEventOperation
op
(
cutoff_ticks
,
emit_all
);
VMThread
::
execute
(
&
op
);
// exclusive access to object sampler instance
ObjectSampler
*
const
sampler
=
ObjectSampler
::
acquire
();
assert
(
sampler
!=
NULL
,
"invariant"
);
EventEmitter
::
emit
(
sampler
,
cutoff_ticks
,
emit_all
);
ObjectSampler
::
release
();
}
void
LeakProfiler
::
oops_do
(
BoolObjectClosure
*
is_alive
,
OopClosure
*
f
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"Leak Profiler::oops_do(...) may only be called during safepoint"
);
if
(
_object_sampler
!=
NULL
)
{
_object_sampler
->
oops_do
(
is_alive
,
f
);
if
(
is_running
())
{
ObjectSampler
::
oops_do
(
is_alive
,
f
);
}
}
void
LeakProfiler
::
sample
(
HeapWord
*
object
,
size_t
size
,
JavaThread
*
thread
)
{
void
LeakProfiler
::
sample
(
HeapWord
*
object
,
size_t
size
,
JavaThread
*
thread
)
{
assert
(
is_running
(),
"invariant"
);
assert
(
thread
!=
NULL
,
"invariant"
);
assert
(
thread
->
thread_state
()
==
_thread_in_vm
,
"invariant"
);
...
...
@@ -94,39 +106,5 @@ void LeakProfiler::sample(HeapWord* object,
return
;
}
_object_sampler
->
add
(
object
,
size
,
thread
);
}
ObjectSampler
*
LeakProfiler
::
object_sampler
()
{
assert
(
is_suspended
()
||
SafepointSynchronize
::
is_at_safepoint
(),
"Leak Profiler::object_sampler() may only be called during safepoint"
);
return
_object_sampler
;
}
void
LeakProfiler
::
set_object_sampler
(
ObjectSampler
*
object_sampler
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"Leak Profiler::set_object_sampler() may only be called during safepoint"
);
_object_sampler
=
object_sampler
;
}
bool
LeakProfiler
::
is_running
()
{
return
_object_sampler
!=
NULL
&&
!
suspended
;
}
bool
LeakProfiler
::
is_suspended
()
{
return
_object_sampler
!=
NULL
&&
suspended
;
}
void
LeakProfiler
::
resume
()
{
assert
(
is_suspended
(),
"invariant"
);
OrderAccess
::
storestore
();
Atomic
::
store
((
jbyte
)
0
,
&
suspended
);
assert
(
is_running
(),
"invariant"
);
}
void
LeakProfiler
::
suspend
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
assert
(
_object_sampler
!=
NULL
,
"invariant"
);
assert
(
!
is_suspended
(),
"invariant"
);
suspended
=
(
jbyte
)
1
;
// safepoint visible
ObjectSampler
::
sample
(
object
,
size
,
thread
);
}
src/share/vm/jfr/leakprofiler/leakProfiler.hpp
浏览文件 @
2a8d5f43
...
...
@@ -28,35 +28,15 @@
#include "memory/allocation.hpp"
class
BoolObjectClosure
;
class
ObjectSampler
;
class
OopClosure
;
class
Thread
;
class
LeakProfiler
:
public
AllStatic
{
friend
class
ClassUnloadTypeSet
;
friend
class
EmitEventOperation
;
friend
class
ObjectSampleCheckpoint
;
friend
class
StartOperation
;
friend
class
StopOperation
;
friend
class
TypeSet
;
friend
class
WriteObjectSampleStacktrace
;
private:
static
ObjectSampler
*
_object_sampler
;
static
void
set_object_sampler
(
ObjectSampler
*
object_sampler
);
static
ObjectSampler
*
object_sampler
();
static
void
suspend
();
static
void
resume
();
static
bool
is_suspended
();
public:
static
bool
start
(
j
int
sample_count
);
static
bool
start
(
int
sample_count
);
static
bool
stop
();
static
void
emit_events
(
jlong
cutoff_ticks
,
bool
emit_all
);
static
bool
is_running
();
static
void
emit_events
(
int64_t
cutoff_ticks
,
bool
emit_all
);
static
void
sample
(
HeapWord
*
object
,
size_t
size
,
JavaThread
*
thread
);
// Called by GC
...
...
src/share/vm/jfr/leakprofiler/sampling/objectSampler.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 201
4
, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 201
7
, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -34,8 +34,18 @@
#include "jfr/utilities/jfrTryLock.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
static
ObjectSampler
*
_instance
=
NULL
;
static
ObjectSampler
&
instance
()
{
assert
(
_instance
!=
NULL
,
"invariant"
);
return
*
_instance
;
}
ObjectSampler
::
ObjectSampler
(
size_t
size
)
:
_priority_queue
(
new
SamplePriorityQueue
(
size
)),
_list
(
new
SampleList
(
size
)),
...
...
@@ -43,7 +53,6 @@ ObjectSampler::ObjectSampler(size_t size) :
_total_allocated
(
0
),
_threshold
(
0
),
_size
(
size
),
_tryLock
(
0
),
_dead_samples
(
false
)
{}
ObjectSampler
::~
ObjectSampler
()
{
...
...
@@ -53,32 +62,109 @@ ObjectSampler::~ObjectSampler() {
_list
=
NULL
;
}
void
ObjectSampler
::
add
(
HeapWord
*
obj
,
size_t
allocated
,
JavaThread
*
thread
)
{
assert
(
thread
!=
NULL
,
"invariant"
);
const
traceid
thread_id
=
thread
->
threadObj
()
!=
NULL
?
thread
->
jfr_thread_local
()
->
thread_id
()
:
0
;
if
(
thread_id
==
0
)
{
return
;
bool
ObjectSampler
::
create
(
size_t
size
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
assert
(
_instance
==
NULL
,
"invariant"
);
_instance
=
new
ObjectSampler
(
size
);
return
_instance
!=
NULL
;
}
bool
ObjectSampler
::
is_created
()
{
return
_instance
!=
NULL
;
}
ObjectSampler
*
ObjectSampler
::
sampler
()
{
assert
(
is_created
(),
"invariant"
);
return
_instance
;
}
void
ObjectSampler
::
destroy
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
if
(
_instance
!=
NULL
)
{
ObjectSampler
*
const
sampler
=
_instance
;
_instance
=
NULL
;
delete
sampler
;
}
assert
(
thread_id
!=
0
,
"invariant"
);
}
static
volatile
int
_lock
=
0
;
ObjectSampler
*
ObjectSampler
::
acquire
()
{
assert
(
is_created
(),
"invariant"
);
while
(
Atomic
::
cmpxchg
(
1
,
&
_lock
,
0
)
==
1
)
{}
return
_instance
;
}
void
ObjectSampler
::
release
()
{
assert
(
is_created
(),
"invariant"
);
OrderAccess
::
fence
();
_lock
=
0
;
}
if
(
!
thread
->
jfr_thread_local
()
->
has_thread_checkpoint
())
{
static
traceid
get_thread_id
(
JavaThread
*
thread
)
{
assert
(
thread
!=
NULL
,
"invariant"
);
if
(
thread
->
threadObj
()
==
NULL
)
{
return
0
;
}
const
JfrThreadLocal
*
const
tl
=
thread
->
jfr_thread_local
();
assert
(
tl
!=
NULL
,
"invariant"
);
if
(
!
tl
->
has_thread_checkpoint
())
{
JfrCheckpointManager
::
create_thread_checkpoint
(
thread
);
assert
(
thread
->
jfr_thread_local
()
->
has_thread_checkpoint
(),
"invariant"
);
}
assert
(
tl
->
has_thread_checkpoint
(),
"invariant"
);
return
tl
->
thread_id
();
}
traceid
stack_trace_id
=
0
;
unsigned
int
stack_trace_hash
=
0
;
// Populates the thread local stack frames, but does not add them
// to the stacktrace repository (...yet, see stacktrace_id() below)
//
void
ObjectSampler
::
fill_stacktrace
(
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
)
{
assert
(
stacktrace
!=
NULL
,
"invariant"
);
assert
(
thread
!=
NULL
,
"invariant"
);
if
(
JfrEventSetting
::
has_stacktrace
(
EventOldObjectSample
::
eventId
))
{
stack_trace_id
=
JfrStackTraceRepository
::
record
(
thread
,
0
,
&
stack_trace_hash
);
thread
->
jfr_thread_local
()
->
set_cached_stack_trace_id
(
stack_trace_id
,
stack_trace_hash
);
JfrStackTraceRepository
::
fill_stacktrace_for
(
thread
,
stacktrace
,
0
);
}
}
// We were successful in acquiring the try lock and have been selected for adding a sample.
// Go ahead with installing our previously taken stacktrace into the stacktrace repository.
//
traceid
ObjectSampler
::
stacktrace_id
(
const
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
)
{
assert
(
stacktrace
!=
NULL
,
"invariant"
);
assert
(
stacktrace
->
hash
()
!=
0
,
"invariant"
);
const
traceid
stacktrace_id
=
JfrStackTraceRepository
::
add
(
stacktrace
,
thread
);
thread
->
jfr_thread_local
()
->
set_cached_stack_trace_id
(
stacktrace_id
,
stacktrace
->
hash
());
return
stacktrace_id
;
}
void
ObjectSampler
::
sample
(
HeapWord
*
obj
,
size_t
allocated
,
JavaThread
*
thread
)
{
assert
(
thread
!=
NULL
,
"invariant"
);
assert
(
is_created
(),
"invariant"
);
const
traceid
thread_id
=
get_thread_id
(
thread
);
if
(
thread_id
==
0
)
{
return
;
}
const
JfrThreadLocal
*
const
tl
=
thread
->
jfr_thread_local
();
JfrStackTrace
stacktrace
(
tl
->
stackframes
(),
tl
->
stackdepth
());
fill_stacktrace
(
&
stacktrace
,
thread
);
JfrTryLock
tryLock
(
&
_tryLock
);
// try enter critical section
JfrTryLock
tryLock
(
&
_lock
);
if
(
!
tryLock
.
has_lock
())
{
if
(
LogJFR
&&
Verbose
)
tty
->
print_cr
(
"Skipping old object sample due to lock contention"
);
return
;
}
instance
().
add
(
obj
,
allocated
,
thread_id
,
&
stacktrace
,
thread
);
}
void
ObjectSampler
::
add
(
HeapWord
*
obj
,
size_t
allocated
,
traceid
thread_id
,
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
)
{
assert
(
stacktrace
!=
NULL
,
"invariant"
);
assert
(
thread_id
!=
0
,
"invariant"
);
assert
(
thread
!=
NULL
,
"invariant"
);
assert
(
thread
->
jfr_thread_local
()
->
has_thread_checkpoint
(),
"invariant"
);
if
(
_dead_samples
)
{
scavenge
();
assert
(
!
_dead_samples
,
"invariant"
);
...
...
@@ -100,13 +186,13 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
}
assert
(
sample
!=
NULL
,
"invariant"
);
assert
(
thread_id
!=
0
,
"invariant"
);
sample
->
set_thread_id
(
thread_id
);
sample
->
set_thread_checkpoint
(
thread
->
jfr_thread_local
()
->
thread_checkpoint
());
if
(
stack_trace_id
!=
0
)
{
sample
->
set_stack_trace_id
(
stack_trace_id
);
sample
->
set_stack_trace_hash
(
stack_trace_hash
);
const
unsigned
int
stacktrace_hash
=
stacktrace
->
hash
();
if
(
stacktrace_hash
!=
0
)
{
sample
->
set_stack_trace_id
(
stacktrace_id
(
stacktrace
,
thread
));
sample
->
set_stack_trace_hash
(
stacktrace_hash
);
}
sample
->
set_span
(
allocated
);
...
...
@@ -117,38 +203,16 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, JavaThread* thread) {
_priority_queue
->
push
(
sample
);
}
const
ObjectSample
*
ObjectSampler
::
last
()
const
{
return
_list
->
last
();
}
const
ObjectSample
*
ObjectSampler
::
first
()
const
{
return
_list
->
first
();
}
const
ObjectSample
*
ObjectSampler
::
last_resolved
()
const
{
return
_list
->
last_resolved
();
}
void
ObjectSampler
::
set_last_resolved
(
const
ObjectSample
*
sample
)
{
_list
->
set_last_resolved
(
sample
);
}
void
ObjectSampler
::
oops_do
(
BoolObjectClosure
*
is_alive
,
OopClosure
*
f
)
{
void
ObjectSampler
::
scavenge
()
{
ObjectSample
*
current
=
_list
->
last
();
while
(
current
!=
NULL
)
{
ObjectSample
*
next
=
current
->
next
();
if
(
!
current
->
is_dead
())
{
if
(
is_alive
->
do_object_b
(
current
->
object
()))
{
// The weakly referenced object is alive, update pointer
f
->
do_oop
(
const_cast
<
oop
*>
(
current
->
object_addr
()));
}
else
{
current
->
set_dead
();
_dead_samples
=
true
;
}
if
(
current
->
is_dead
())
{
remove_dead
(
current
);
}
current
=
next
;
}
_
last_sweep
=
JfrTicks
::
now
()
;
_
dead_samples
=
false
;
}
void
ObjectSampler
::
remove_dead
(
ObjectSample
*
sample
)
{
...
...
@@ -165,16 +229,41 @@ void ObjectSampler::remove_dead(ObjectSample* sample) {
_list
->
release
(
sample
);
}
void
ObjectSampler
::
scavenge
()
{
ObjectSample
*
current
=
_list
->
last
();
void
ObjectSampler
::
oops_do
(
BoolObjectClosure
*
is_alive
,
OopClosure
*
f
)
{
assert
(
is_created
(),
"invariant"
);
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
ObjectSampler
&
sampler
=
instance
();
ObjectSample
*
current
=
sampler
.
_list
->
last
();
while
(
current
!=
NULL
)
{
ObjectSample
*
next
=
current
->
next
();
if
(
current
->
is_dead
())
{
remove_dead
(
current
);
if
(
!
current
->
is_dead
())
{
if
(
is_alive
->
do_object_b
(
current
->
object
()))
{
// The weakly referenced object is alive, update pointer
f
->
do_oop
(
const_cast
<
oop
*>
(
current
->
object_addr
()));
}
else
{
current
->
set_dead
();
sampler
.
_dead_samples
=
true
;
}
}
current
=
next
;
}
_dead_samples
=
false
;
sampler
.
_last_sweep
=
JfrTicks
::
now
();
}
const
ObjectSample
*
ObjectSampler
::
last
()
const
{
return
_list
->
last
();
}
const
ObjectSample
*
ObjectSampler
::
first
()
const
{
return
_list
->
first
();
}
const
ObjectSample
*
ObjectSampler
::
last_resolved
()
const
{
return
_list
->
last_resolved
();
}
void
ObjectSampler
::
set_last_resolved
(
const
ObjectSample
*
sample
)
{
_list
->
set_last_resolved
(
sample
);
}
int
ObjectSampler
::
item_count
()
const
{
...
...
@@ -188,7 +277,7 @@ const ObjectSample* ObjectSampler::item_at(int index) const {
ObjectSample
*
ObjectSampler
::
item_at
(
int
index
)
{
return
const_cast
<
ObjectSample
*>
(
const_cast
<
const
ObjectSampler
*>
(
this
)
->
item_at
(
index
)
);
);
}
const
JfrTicks
&
ObjectSampler
::
last_sweep
()
const
{
...
...
src/share/vm/jfr/leakprofiler/sampling/objectSampler.hpp
浏览文件 @
2a8d5f43
...
...
@@ -28,7 +28,10 @@
#include "memory/allocation.hpp"
#include "jfr/utilities/jfrTime.hpp"
typedef
u8
traceid
;
class
BoolObjectClosure
;
class
JfrStackTrace
;
class
OopClosure
;
class
ObjectSample
;
class
ObjectSampler
;
...
...
@@ -40,11 +43,13 @@ class Thread;
// making sure the samples are evenly distributed as
// new entries are added and removed.
class
ObjectSampler
:
public
CHeapObj
<
mtTracing
>
{
friend
class
EventEmitter
;
friend
class
JfrRecorderService
;
friend
class
LeakProfiler
;
friend
class
ObjectSampleCheckpoint
;
friend
class
StartOperation
;
friend
class
StopOperation
;
friend
class
EmitEventOperation
;
friend
class
ObjectSampleCheckpoint
;
friend
class
WriteObjectSampleStacktrace
;
private:
SamplePriorityQueue
*
_priority_queue
;
SampleList
*
_list
;
...
...
@@ -52,20 +57,33 @@ class ObjectSampler : public CHeapObj<mtTracing> {
size_t
_total_allocated
;
size_t
_threshold
;
size_t
_size
;
volatile
int
_tryLock
;
bool
_dead_samples
;
// Lifecycle
explicit
ObjectSampler
(
size_t
size
);
~
ObjectSampler
();
static
bool
create
(
size_t
size
);
static
bool
is_created
();
static
ObjectSampler
*
sampler
();
static
void
destroy
();
void
add
(
HeapWord
*
object
,
size_t
size
,
JavaThread
*
thread
);
void
remove_dead
(
ObjectSample
*
sample
);
// For operations that require exclusive access (non-safepoint)
static
ObjectSampler
*
acquire
();
static
void
release
();
// Stacktrace
static
void
fill_stacktrace
(
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
);
traceid
stacktrace_id
(
const
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
);
// Sampling
static
void
sample
(
HeapWord
*
object
,
size_t
size
,
JavaThread
*
thread
);
void
add
(
HeapWord
*
object
,
size_t
size
,
traceid
thread_id
,
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
);
void
scavenge
();
void
remove_dead
(
ObjectSample
*
sample
);
// Called by GC
void
oops_do
(
BoolObjectClosure
*
is_alive
,
OopClosure
*
f
);
static
void
oops_do
(
BoolObjectClosure
*
is_alive
,
OopClosure
*
f
);
public:
const
ObjectSample
*
item_at
(
int
index
)
const
;
ObjectSample
*
item_at
(
int
index
);
int
item_count
()
const
;
...
...
src/share/vm/jfr/leakprofiler/startOperation.hpp
浏览文件 @
2a8d5f43
...
...
@@ -25,34 +25,17 @@
#ifndef SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP
#define SHARE_VM_LEAKPROFILER_STARTOPERATION_HPP
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "runtime/vm_operations.hpp"
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
// Safepoint operation for
starting
leak profiler object sampler
class
StartOperation
:
public
VM_
Operation
{
// Safepoint operation for
creating and starting the
leak profiler object sampler
class
StartOperation
:
public
OldObjectVM
Operation
{
private:
jlong
_sample_count
;
int
_sample_count
;
public:
StartOperation
(
jlong
sample_count
)
:
_sample_count
(
sample_count
)
{
}
Mode
evaluation_mode
()
const
{
return
_safepoint
;
}
VMOp_Type
type
()
const
{
return
VMOp_GC_HeapInspection
;
}
StartOperation
(
int
sample_count
)
:
_sample_count
(
sample_count
)
{}
virtual
void
doit
()
{
assert
(
!
LeakProfiler
::
is_running
(),
"invariant"
);
jint
queue_size
=
JfrOptionSet
::
old_object_queue_size
();
LeakProfiler
::
set_object_sampler
(
new
ObjectSampler
(
queue_size
));
if
(
LogJFR
&&
Verbose
)
tty
->
print_cr
(
"Object sampling started"
);
ObjectSampler
::
create
(
_sample_count
);
}
};
...
...
src/share/vm/jfr/leakprofiler/stopOperation.hpp
浏览文件 @
2a8d5f43
...
...
@@ -25,30 +25,14 @@
#ifndef SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP
#define SHARE_VM_LEAKPROFILER_STOPOPERATION_HPP
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "runtime/vm_operations.hpp"
#include "jfr/leakprofiler/utilities/vmOperation.hpp"
// Safepoint operation for stopping leak profiler object sampler
class
StopOperation
:
public
VM_
Operation
{
// Safepoint operation for stopping
and destroying the
leak profiler object sampler
class
StopOperation
:
public
OldObjectVM
Operation
{
public:
StopOperation
()
{}
Mode
evaluation_mode
()
const
{
return
_safepoint
;
}
VMOp_Type
type
()
const
{
return
VMOp_GC_HeapInspection
;
}
virtual
void
doit
()
{
assert
(
LeakProfiler
::
is_running
(),
"invariant"
);
ObjectSampler
*
object_sampler
=
LeakProfiler
::
object_sampler
();
delete
object_sampler
;
LeakProfiler
::
set_object_sampler
(
NULL
);
if
(
LogJFR
&&
Verbose
)
tty
->
print_cr
(
"Object sampling stopped"
);
ObjectSampler
::
destroy
();
}
};
...
...
src/share/vm/jfr/leakprofiler/utilities/vmOperation.hpp
0 → 100644
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
#define SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
#include "runtime/vm_operations.hpp"
class
OldObjectVMOperation
:
public
VM_Operation
{
public:
Mode
evaluation_mode
()
const
{
return
_safepoint
;
}
VMOp_Type
type
()
const
{
return
VMOp_JFROldObject
;
}
};
#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_VMOPERATION_HPP
src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp
浏览文件 @
2a8d5f43
/*
* Copyright (c) 2016, 201
8
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 201
9
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -316,7 +316,7 @@ void ClassUnloadTypeSet::serialize(JfrCheckpointWriter& writer) {
void
TypeSet
::
serialize
(
JfrCheckpointWriter
&
writer
)
{
TypeSetSerialization
type_set
(
false
);
if
(
LeakProfiler
::
is_
suspended
())
{
if
(
LeakProfiler
::
is_
running
())
{
JfrCheckpointWriter
leakp_writer
(
false
,
true
,
Thread
::
current
());
type_set
.
write
(
writer
,
&
leakp_writer
);
ObjectSampleCheckpoint
::
install
(
leakp_writer
,
false
,
true
);
...
...
src/share/vm/jfr/recorder/service/jfrRecorderService.cpp
浏览文件 @
2a8d5f43
...
...
@@ -24,7 +24,9 @@
#include "precompiled.hpp"
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
...
...
@@ -334,6 +336,7 @@ void JfrRecorderService::prepare_for_vm_error_rotation() {
open_new_chunk
(
true
);
}
_checkpoint_manager
.
register_service_thread
(
Thread
::
current
());
JfrMetadataEvent
::
lock
();
}
void
JfrRecorderService
::
open_new_chunk
(
bool
vm_error
)
{
...
...
@@ -397,6 +400,11 @@ static void write_stacktrace_checkpoint(JfrStackTraceRepository& stack_trace_rep
write_stack_trace_checkpoint
.
process
();
}
static
void
write_object_sample_stacktrace
(
ObjectSampler
*
sampler
,
JfrStackTraceRepository
&
stack_trace_repository
)
{
WriteObjectSampleStacktrace
object_sample_stacktrace
(
sampler
,
stack_trace_repository
);
object_sample_stacktrace
.
process
();
}
static
void
write_stringpool_checkpoint
(
JfrStringPool
&
string_pool
,
JfrChunkWriter
&
chunkwriter
)
{
WriteStringPool
write_string_pool
(
string_pool
);
WriteStringPoolCheckpoint
write_string_pool_checkpoint
(
chunkwriter
,
TYPE_STRING
,
write_string_pool
);
...
...
@@ -417,8 +425,9 @@ static void write_stringpool_checkpoint_safepoint(JfrStringPool& string_pool, Jf
// write checkpoint epoch transition list->
// write stack trace checkpoint ->
// write string pool checkpoint ->
// write storage ->
// release stream lock
// write object sample stacktraces ->
// write storage ->
// release stream lock
//
void
JfrRecorderService
::
pre_safepoint_write
()
{
MutexLockerEx
stream_lock
(
JfrStream_lock
,
Mutex
::
_no_safepoint_check_flag
);
...
...
@@ -427,6 +436,13 @@ void JfrRecorderService::pre_safepoint_write() {
_checkpoint_manager
.
write_epoch_transition_mspace
();
write_stacktrace_checkpoint
(
_stack_trace_repository
,
_chunkwriter
,
false
);
write_stringpool_checkpoint
(
_string_pool
,
_chunkwriter
);
if
(
LeakProfiler
::
is_running
())
{
// Exclusive access to the object sampler instance.
// The sampler is released (unlocked) later in post_safepoint_write.
ObjectSampler
*
const
sampler
=
ObjectSampler
::
acquire
();
assert
(
sampler
!=
NULL
,
"invariant"
);
write_object_sample_stacktrace
(
sampler
,
_stack_trace_repository
);
}
_storage
.
write
();
}
...
...
@@ -435,16 +451,10 @@ void JfrRecorderService::invoke_safepoint_write() {
VMThread
::
execute
(
&
safepoint_task
);
}
static
void
write_object_sample_stacktrace
(
JfrStackTraceRepository
&
stack_trace_repository
)
{
WriteObjectSampleStacktrace
object_sample_stacktrace
(
stack_trace_repository
);
object_sample_stacktrace
.
process
();
}
//
// safepoint write sequence
//
// lock stream lock ->
// write object sample stacktraces ->
// write stacktrace repository ->
// write string pool ->
// write safepoint dependent types ->
...
...
@@ -457,7 +467,6 @@ static void write_object_sample_stacktrace(JfrStackTraceRepository& stack_trace_
void
JfrRecorderService
::
safepoint_write
()
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"invariant"
);
MutexLockerEx
stream_lock
(
JfrStream_lock
,
Mutex
::
_no_safepoint_check_flag
);
write_object_sample_stacktrace
(
_stack_trace_repository
);
write_stacktrace_checkpoint
(
_stack_trace_repository
,
_chunkwriter
,
true
);
write_stringpool_checkpoint_safepoint
(
_string_pool
,
_chunkwriter
);
_checkpoint_manager
.
write_safepoint_types
();
...
...
@@ -477,13 +486,14 @@ static jlong write_metadata_event(JfrChunkWriter& chunkwriter) {
//
// post-safepoint write sequence
//
// lock stream lock ->
// write type set ->
// write checkpoints ->
// write metadata event ->
// write chunk header ->
// close chunk fd ->
// release stream lock
// write type set ->
// release object sampler ->
// lock stream lock ->
// write checkpoints ->
// write metadata event ->
// write chunk header ->
// close chunk fd ->
// release stream lock
//
void
JfrRecorderService
::
post_safepoint_write
()
{
assert
(
_chunkwriter
.
is_valid
(),
"invariant"
);
...
...
@@ -492,7 +502,11 @@ void JfrRecorderService::post_safepoint_write() {
// already tagged artifacts for the previous epoch. We can accomplish this concurrently
// with threads now tagging artifacts in relation to the new, now updated, epoch and remain outside of a safepoint.
_checkpoint_manager
.
write_type_set
();
MutexLockerEx
stream_lock
(
JfrStream_lock
,
Mutex
::
_no_safepoint_check_flag
);
if
(
LeakProfiler
::
is_running
())
{
// The object sampler instance was exclusively acquired and locked in pre_safepoint_write.
// Note: There is a dependency on write_type_set() above, ensure the release is subsequent.
ObjectSampler
::
release
();
}
MutexLockerEx
stream_lock
(
JfrStream_lock
,
Mutex
::
_no_safepoint_check_flag
);
// serialize any outstanding checkpoint memory
_checkpoint_manager
.
write
();
// serialize the metadata descriptor event and close out the chunk
...
...
@@ -511,11 +525,9 @@ void JfrRecorderService::vm_error_rotation() {
void
JfrRecorderService
::
finalize_current_chunk_on_vm_error
()
{
assert
(
_chunkwriter
.
is_valid
(),
"invariant"
);
pre_safepoint_write
();
JfrMetadataEvent
::
lock
();
// Do not attempt safepoint dependent operations during emergency dump.
// Optimistically write tagged artifacts.
_checkpoint_manager
.
shift_epoch
();
_checkpoint_manager
.
write_type_set
();
// update time
_chunkwriter
.
time_stamp_chunk_now
();
post_safepoint_write
();
...
...
src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp
浏览文件 @
2a8d5f43
...
...
@@ -164,7 +164,13 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
}
traceid
JfrStackTraceRepository
::
add
(
const
JfrStackTrace
&
stacktrace
)
{
return
instance
().
add_trace
(
stacktrace
);
traceid
tid
=
instance
().
add_trace
(
stacktrace
);
if
(
tid
==
0
)
{
stacktrace
.
resolve_linenos
();
tid
=
instance
().
add_trace
(
stacktrace
);
}
assert
(
tid
!=
0
,
"invariant"
);
return
tid
;
}
traceid
JfrStackTraceRepository
::
record
(
Thread
*
thread
,
int
skip
/* 0 */
)
{
...
...
@@ -187,54 +193,29 @@ traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
return
instance
().
record_for
((
JavaThread
*
)
thread
,
skip
,
frames
,
tl
->
stackdepth
());
}
traceid
JfrStackTraceRepository
::
record
(
Thread
*
thread
,
int
skip
,
unsigned
int
*
hash
)
{
assert
(
thread
==
Thread
::
current
(),
"invariant"
);
JfrThreadLocal
*
const
tl
=
thread
->
jfr_thread_local
();
assert
(
tl
!=
NULL
,
"invariant"
);
if
(
tl
->
has_cached_stack_trace
())
{
*
hash
=
tl
->
cached_stack_trace_hash
();
return
tl
->
cached_stack_trace_id
();
}
if
(
!
thread
->
is_Java_thread
()
||
thread
->
is_hidden_from_external_view
())
{
return
0
;
}
JfrStackFrame
*
frames
=
tl
->
stackframes
();
if
(
frames
==
NULL
)
{
// pending oom
return
0
;
}
assert
(
frames
!=
NULL
,
"invariant"
);
assert
(
tl
->
stackframes
()
==
frames
,
"invariant"
);
return
instance
().
record_for
((
JavaThread
*
)
thread
,
skip
,
frames
,
tl
->
stackdepth
(),
hash
);
}
traceid
JfrStackTraceRepository
::
record_for
(
JavaThread
*
thread
,
int
skip
,
JfrStackFrame
*
frames
,
u4
max_frames
)
{
JfrStackTrace
stacktrace
(
frames
,
max_frames
);
if
(
!
stacktrace
.
record_safe
(
thread
,
skip
))
{
return
0
;
}
traceid
tid
=
add
(
stacktrace
);
if
(
tid
==
0
)
{
stacktrace
.
resolve_linenos
();
tid
=
add
(
stacktrace
);
}
return
tid
;
return
stacktrace
.
record_safe
(
thread
,
skip
)
?
add
(
stacktrace
)
:
0
;
}
traceid
JfrStackTraceRepository
::
record_for
(
JavaThread
*
thread
,
int
skip
,
JfrStackFrame
*
frames
,
u4
max_frames
,
unsigned
int
*
hash
)
{
assert
(
hash
!=
NULL
&&
*
hash
==
0
,
"invariant"
);
JfrStackTrace
stacktrace
(
frames
,
max_frames
);
if
(
!
stacktrace
.
record_safe
(
thread
,
skip
,
true
))
{
return
0
;
}
traceid
tid
=
add
(
stacktrace
);
if
(
tid
==
0
)
{
stacktrace
.
resolve_linenos
();
tid
=
add
(
stacktrace
);
traceid
JfrStackTraceRepository
::
add
(
const
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
)
{
assert
(
stacktrace
!=
NULL
,
"invariant"
);
assert
(
thread
!=
NULL
,
"invariant"
);
assert
(
stacktrace
->
hash
()
!=
0
,
"invariant"
);
return
add
(
*
stacktrace
);
}
bool
JfrStackTraceRepository
::
fill_stacktrace_for
(
JavaThread
*
thread
,
JfrStackTrace
*
stacktrace
,
int
skip
)
{
assert
(
thread
==
Thread
::
current
(),
"invariant"
);
assert
(
stacktrace
!=
NULL
,
"invariant"
);
JfrThreadLocal
*
const
tl
=
thread
->
jfr_thread_local
();
assert
(
tl
!=
NULL
,
"invariant"
);
const
unsigned
int
cached_stacktrace_hash
=
tl
->
cached_stack_trace_hash
();
if
(
cached_stacktrace_hash
!=
0
)
{
stacktrace
->
set_hash
(
cached_stacktrace_hash
);
return
true
;
}
*
hash
=
stacktrace
.
_hash
;
return
tid
;
return
stacktrace
->
record_safe
(
thread
,
skip
,
true
);
}
size_t
JfrStackTraceRepository
::
write_impl
(
JfrChunkWriter
&
sw
,
bool
clear
)
{
...
...
@@ -363,7 +344,7 @@ const JfrStackTraceRepository::StackTrace* JfrStackTraceRepository::resolve_entr
return
trace
;
}
void
JfrStackFrame
::
resolve_lineno
()
{
void
JfrStackFrame
::
resolve_lineno
()
const
{
assert
(
_method
,
"no method pointer"
);
assert
(
_line
==
0
,
"already have linenumber"
);
_line
=
_method
->
line_number_from_bci
(
_bci
);
...
...
@@ -375,7 +356,7 @@ void JfrStackTrace::set_frame(u4 frame_pos, JfrStackFrame& frame) {
_frames
[
frame_pos
]
=
frame
;
}
void
JfrStackTrace
::
resolve_linenos
()
{
void
JfrStackTrace
::
resolve_linenos
()
const
{
for
(
unsigned
int
i
=
0
;
i
<
_nr_of_frames
;
i
++
)
{
_frames
[
i
].
resolve_lineno
();
}
...
...
src/share/vm/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp
浏览文件 @
2a8d5f43
...
...
@@ -36,9 +36,9 @@ class Method;
class
JfrStackFrame
{
private:
const
Method
*
_method
;
mutable
const
Method
*
_method
;
traceid
_methodid
;
int
_line
;
mutable
int
_line
;
int
_bci
;
u1
_type
;
...
...
@@ -58,7 +58,7 @@ class JfrStackFrame {
bool
equals
(
const
JfrStackFrame
&
rhs
)
const
;
void
write
(
JfrChunkWriter
&
cw
)
const
;
void
write
(
JfrCheckpointWriter
&
cpw
)
const
;
void
resolve_lineno
();
void
resolve_lineno
()
const
;
};
class
JfrStackTrace
:
public
StackObj
{
...
...
@@ -70,7 +70,7 @@ class JfrStackTrace : public StackObj {
unsigned
int
_hash
;
const
u4
_max_frames
;
bool
_reached_root
;
bool
_lineno
;
mutable
bool
_lineno
;
public:
JfrStackTrace
(
JfrStackFrame
*
frames
,
u4
max_frames
)
:
_frames
(
frames
),
...
...
@@ -82,9 +82,10 @@ class JfrStackTrace : public StackObj {
_lineno
(
false
)
{}
bool
record_thread
(
JavaThread
&
thread
,
frame
&
frame
);
bool
record_safe
(
JavaThread
*
thread
,
int
skip
,
bool
leakp
=
false
);
void
resolve_linenos
();
void
resolve_linenos
()
const
;
void
set_nr_of_frames
(
u4
nr_of_frames
)
{
_nr_of_frames
=
nr_of_frames
;
}
void
set_hash
(
unsigned
int
hash
)
{
_hash
=
hash
;
}
unsigned
int
hash
()
const
{
return
_hash
;
}
void
set_frame
(
u4
frame_pos
,
JfrStackFrame
&
frame
);
void
set_reached_root
(
bool
reached_root
)
{
_reached_root
=
reached_root
;
}
bool
full_stacktrace
()
const
{
return
_reached_root
;
}
...
...
@@ -128,23 +129,26 @@ class JfrStackTraceRepository : public JfrCHeapObj {
traceid
_next_id
;
u4
_entries
;
size_t
write_impl
(
JfrChunkWriter
&
cw
,
bool
clear
);
traceid
record_for
(
JavaThread
*
thread
,
int
skip
,
JfrStackFrame
*
frames
,
u4
max_frames
);
traceid
record_for
(
JavaThread
*
thread
,
int
skip
,
JfrStackFrame
*
frames
,
u4
max_frames
,
unsigned
int
*
hash
);
traceid
add_trace
(
const
JfrStackTrace
&
stacktrace
);
const
StackTrace
*
resolve_entry
(
unsigned
int
hash
,
traceid
id
)
const
;
static
traceid
add
(
const
JfrStackTrace
*
stacktrace
,
JavaThread
*
thread
);
traceid
record_for
(
JavaThread
*
thread
,
int
skip
,
JfrStackFrame
*
frames
,
u4
max_frames
);
size_t
write_impl
(
JfrChunkWriter
&
cw
,
bool
clear
);
const
StackTrace
*
resolve_entry
(
unsigned
int
hash
,
traceid
id
)
const
;
static
void
write_metadata
(
JfrCheckpointWriter
&
cpw
);
static
bool
fill_stacktrace_for
(
JavaThread
*
thread
,
JfrStackTrace
*
stacktrace
,
int
skip
);
JfrStackTraceRepository
();
static
JfrStackTraceRepository
&
instance
();
public:
static
JfrStackTraceRepository
*
create
();
bool
initialize
();
static
void
destroy
();
static
JfrStackTraceRepository
&
instance
();
public:
static
traceid
add
(
const
JfrStackTrace
&
stacktrace
);
static
traceid
record
(
Thread
*
thread
,
int
skip
=
0
);
static
traceid
record
(
Thread
*
thread
,
int
skip
,
unsigned
int
*
hash
);
traceid
write
(
JfrCheckpointWriter
&
cpw
,
traceid
id
,
unsigned
int
hash
);
size_t
write
(
JfrChunkWriter
&
cw
,
bool
clear
);
size_t
clear
();
...
...
src/share/vm/jfr/support/jfrFlush.hpp
浏览文件 @
2a8d5f43
...
...
@@ -48,10 +48,12 @@ void jfr_clear_stacktrace(Thread* t);
template
<
typename
Event
>
class
JfrConditionalFlush
{
protected:
bool
_enabled
;
public:
typedef
JfrBuffer
Type
;
JfrConditionalFlush
(
Thread
*
t
)
{
if
(
jfr_is_event_enabled
(
Event
::
eventId
)
)
{
JfrConditionalFlush
(
Thread
*
t
)
:
_enabled
(
jfr_is_event_enabled
(
Event
::
eventId
))
{
if
(
_enabled
)
{
jfr_conditional_flush
(
Event
::
eventId
,
sizeof
(
Event
),
t
);
}
}
...
...
@@ -63,7 +65,7 @@ class JfrConditionalFlushWithStacktrace : public JfrConditionalFlush<Event> {
bool
_owner
;
public:
JfrConditionalFlushWithStacktrace
(
Thread
*
t
)
:
JfrConditionalFlush
<
Event
>
(
t
),
_t
(
t
),
_owner
(
false
)
{
if
(
Event
::
has_stacktrace
()
&&
jfr_has_stacktrace_enabled
(
Event
::
eventId
))
{
if
(
this
->
_enabled
&&
Event
::
has_stacktrace
()
&&
jfr_has_stacktrace_enabled
(
Event
::
eventId
))
{
_owner
=
jfr_save_stacktrace
(
t
);
}
}
...
...
src/share/vm/jfr/support/jfrThreadLocal.cpp
浏览文件 @
2a8d5f43
...
...
@@ -150,9 +150,7 @@ JfrBuffer* JfrThreadLocal::install_java_buffer() const {
JfrStackFrame
*
JfrThreadLocal
::
install_stackframes
()
const
{
assert
(
_stackframes
==
NULL
,
"invariant"
);
_stackdepth
=
(
u4
)
JfrOptionSet
::
stackdepth
();
guarantee
(
_stackdepth
>
0
,
"Stackdepth must be > 0"
);
_stackframes
=
NEW_C_HEAP_ARRAY
(
JfrStackFrame
,
_stackdepth
,
mtTracing
);
_stackframes
=
NEW_C_HEAP_ARRAY
(
JfrStackFrame
,
stackdepth
(),
mtTracing
);
return
_stackframes
;
}
...
...
@@ -163,3 +161,7 @@ ByteSize JfrThreadLocal::trace_id_offset() {
ByteSize
JfrThreadLocal
::
java_event_writer_offset
()
{
return
in_ByteSize
(
offset_of
(
JfrThreadLocal
,
_java_event_writer
));
}
u4
JfrThreadLocal
::
stackdepth
()
const
{
return
_stackdepth
!=
0
?
_stackdepth
:
(
u4
)
JfrOptionSet
::
stackdepth
();
}
src/share/vm/jfr/support/jfrThreadLocal.hpp
浏览文件 @
2a8d5f43
...
...
@@ -113,9 +113,7 @@ class JfrThreadLocal {
_stackframes
=
frames
;
}
u4
stackdepth
()
const
{
return
_stackdepth
;
}
u4
stackdepth
()
const
;
void
set_stackdepth
(
u4
depth
)
{
_stackdepth
=
depth
;
...
...
src/share/vm/runtime/vm_operations.hpp
浏览文件 @
2a8d5f43
...
...
@@ -98,6 +98,7 @@
template(RotateGCLog) \
template(WhiteBoxOperation) \
template(ClassLoaderStatsOperation) \
template(JFROldObject) \
class
VM_Operation
:
public
CHeapObj
<
mtInternal
>
{
public:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录