Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
xxadev
tensorflow
提交
cda80a78
T
tensorflow
项目概览
xxadev
/
tensorflow
与 Fork 源项目一致
从无法访问的项目Fork
通知
3
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
tensorflow
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
cda80a78
编写于
7月 27, 2017
作者:
E
Eric Liu
提交者:
TensorFlower Gardener
7月 27, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[tpu profiler] Dump HLO graphs in profile responses to the log directory.
PiperOrigin-RevId: 163318992
上级
dd1f0cdd
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
35 addition
and
4 deletion
+35
-4
tensorflow/contrib/tpu/profiler/BUILD
tensorflow/contrib/tpu/profiler/BUILD
+2
-0
tensorflow/contrib/tpu/profiler/capture_tpu_profile.cc
tensorflow/contrib/tpu/profiler/capture_tpu_profile.cc
+33
-4
未找到文件。
tensorflow/contrib/tpu/profiler/BUILD
浏览文件 @
cda80a78
...
...
@@ -21,8 +21,10 @@ cc_binary(
visibility
=
[
"//tensorflow/contrib/tpu/profiler:__subpackages__"
],
deps
=
[
":tpu_profiler_proto_cc"
,
"//tensorflow/core:framework"
,
"//tensorflow/core:framework_internal"
,
"//tensorflow/core:lib"
,
"//tensorflow/core:protos_all_cc"
,
"//tensorflow/core/distributed_runtime/rpc:grpc_util"
,
"@grpc//:grpc++_unsecure"
,
],
...
...
tensorflow/contrib/tpu/profiler/capture_tpu_profile.cc
浏览文件 @
cda80a78
...
...
@@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/contrib/tpu/profiler/tpu_profiler.grpc.pb.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
...
...
@@ -33,6 +34,7 @@ limitations under the License.
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/core/util/events_writer.h"
namespace
tensorflow
{
namespace
tpu
{
...
...
@@ -47,6 +49,7 @@ using ::tensorflow::WriteStringToFile;
constexpr
char
kProfilePluginDirectory
[]
=
"plugins/profile/"
;
constexpr
char
kTraceFileName
[]
=
"trace"
;
constexpr
char
kGraphRunPrefix
[]
=
"tpu_profiler.hlo_graph."
;
tensorflow
::
string
GetCurrentTimeStampAsString
()
{
char
s
[
128
];
...
...
@@ -55,10 +58,10 @@ tensorflow::string GetCurrentTimeStampAsString() {
return
s
;
}
// The trace will be stored in <logdir>/plugins/profile/<timestamp>/trace.
void
DumpTraceToLogDirectory
(
const
tensorflow
::
string
&
logdir
,
// The trace will be stored in <logdir>/plugins/profile/<run>/trace.
void
DumpTraceToLogDirectory
(
tensorflow
::
StringPiece
logdir
,
tensorflow
::
StringPiece
run
,
tensorflow
::
StringPiece
trace
)
{
tensorflow
::
string
run
=
GetCurrentTimeStampAsString
();
tensorflow
::
string
run_dir
=
JoinPath
(
logdir
,
kProfilePluginDirectory
,
run
);
TF_CHECK_OK
(
Env
::
Default
()
->
RecursivelyCreateDir
(
run_dir
));
tensorflow
::
string
path
=
JoinPath
(
run_dir
,
kTraceFileName
);
...
...
@@ -83,6 +86,18 @@ ProfileResponse Profile(const tensorflow::string& service_addr,
return
response
;
}
void
DumpGraph
(
tensorflow
::
StringPiece
logdir
,
tensorflow
::
StringPiece
run
,
const
tensorflow
::
string
&
graph_def
)
{
// The graph plugin expects the graph in <logdir>/<run>/<event.file>.
tensorflow
::
string
run_dir
=
JoinPath
(
logdir
,
tensorflow
::
strings
::
StrCat
(
kGraphRunPrefix
,
run
));
TF_CHECK_OK
(
Env
::
Default
()
->
RecursivelyCreateDir
(
run_dir
));
tensorflow
::
EventsWriter
event_writer
(
JoinPath
(
run_dir
,
"events"
));
tensorflow
::
Event
event
;
event
.
set_graph_def
(
graph_def
);
event_writer
.
WriteEvent
(
event
);
}
}
// namespace
}
// namespace tpu
}
// namespace tensorflow
...
...
@@ -111,14 +126,28 @@ int main(int argc, char** argv) {
int
duration_ms
=
FLAGS_duration_ms
;
tensorflow
::
ProfileResponse
response
=
tensorflow
::
tpu
::
Profile
(
FLAGS_service_addr
,
duration_ms
);
// Use the current timestamp as the run name.
tensorflow
::
string
run
=
tensorflow
::
tpu
::
GetCurrentTimeStampAsString
();
// Ignore computation_graph for now.
if
(
response
.
encoded_trace
().
empty
())
{
LOG
(
WARNING
)
<<
"No trace event is collected during the "
<<
duration_ms
<<
"ms interval."
;
}
else
{
tensorflow
::
tpu
::
DumpTraceToLogDirectory
(
FLAGS_logdir
,
tensorflow
::
tpu
::
DumpTraceToLogDirectory
(
FLAGS_logdir
,
run
,
response
.
encoded_trace
());
}
int
num_graphs
=
response
.
computation_graph_size
();
if
(
num_graphs
>
0
)
{
// The server might generates multiple graphs for one program; we simply
// pick the first one.
if
(
num_graphs
>
1
)
{
LOG
(
INFO
)
<<
num_graphs
<<
" TPU program variants observed over the profiling period. "
<<
"One computation graph will be chosen arbitrarily."
;
}
tensorflow
::
tpu
::
DumpGraph
(
FLAGS_logdir
,
run
,
response
.
computation_graph
(
0
).
SerializeAsString
());
}
// Print this at the end so that it's not buried in irrelevant LOG messages.
std
::
cout
<<
"NOTE: using the trace duration "
<<
duration_ms
<<
"ms."
<<
std
::
endl
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录