Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
7f8a7536
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7f8a7536
编写于
7月 16, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 16, 2020
浏览文件
操作
浏览文件
下载
差异文件
!3112 Remove build option -S
Merge pull request !3112 from caifubi/data-dump-build
上级
4936fe48
cb8b5dbd
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
18 addition
and
57 deletion
+18
-57
build.sh
build.sh
+2
-12
cmake/options.cmake
cmake/options.cmake
+0
-4
mindspore/ccsrc/backend/kernel_compiler/ascend_kernel_mod.h
mindspore/ccsrc/backend/kernel_compiler/ascend_kernel_mod.h
+1
-9
mindspore/ccsrc/debug/CMakeLists.txt
mindspore/ccsrc/debug/CMakeLists.txt
+1
-3
mindspore/ccsrc/debug/data_dump_parser.cc
mindspore/ccsrc/debug/data_dump_parser.cc
+1
-1
mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc
...pore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc
+9
-20
mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.h
...spore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.h
+0
-4
mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.cc
mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.cc
+0
-2
mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.h
mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.h
+0
-2
tests/ut/cpp/stub/tasksink/task_sink_stub.cc
tests/ut/cpp/stub/tasksink/task_sink_stub.cc
+4
-0
未找到文件。
build.sh
浏览文件 @
7f8a7536
...
...
@@ -24,7 +24,7 @@ usage()
{
echo
"Usage:"
echo
"bash build.sh [-d] [-r] [-v] [-c on|off] [-t on|off] [-g on|off] [-h] [-b ge] [-m infer|train]
\\
"
echo
" [-a on|off] [-Q on|off] [-
S on|off] [-
p on|off] [-i] [-L] [-R] [-D on|off] [-j[n]] [-e gpu|d|cpu]
\\
"
echo
" [-a on|off] [-Q on|off] [-p on|off] [-i] [-L] [-R] [-D on|off] [-j[n]] [-e gpu|d|cpu]
\\
"
echo
" [-P on|off] [-z [on|off]] [-M on|off] [-V 9.2|10.1] [-I] [-K] [-B on|off] [-E] [-l on|off]"
echo
""
echo
"Options:"
...
...
@@ -48,7 +48,6 @@ usage()
echo
" -P Enable dump anf graph to file in ProtoBuffer format, default on"
echo
" -Q Enable dump memory, default off"
echo
" -D Enable dumping of function graph ir, default on"
echo
" -S Enable async data dump, default off"
echo
" -z Compile dataset & mindrecord, default on"
echo
" -M Enable MPI and NCCL for GPU training, gpu default on"
echo
" -V Specify the minimum required cuda version, default CUDA 10.1"
...
...
@@ -89,7 +88,6 @@ checkopts()
ENABLE_TIMELINE
=
"off"
ENABLE_DUMP2PROTO
=
"on"
ENABLE_DUMPE2E
=
"off"
ENABLE_DATA_DUMP
=
"off"
ENABLE_DUMP_IR
=
"on"
COMPILE_MINDDATA
=
"on"
ENABLE_MPI
=
"off"
...
...
@@ -104,7 +102,7 @@ checkopts()
ENABLE_PYTHON
=
"on"
# Process the options
while
getopts
'drvj:c:t:hsb:a:g:p:ie:m:l:I:LRP:Q:
S:
D:zM:V:K:sB:E'
opt
while
getopts
'drvj:c:t:hsb:a:g:p:ie:m:l:I:LRP:Q:D:zM:V:K:sB:E'
opt
do
OPTARG
=
$(
echo
${
OPTARG
}
|
tr
'[A-Z]'
'[a-z]'
)
case
"
${
opt
}
"
in
...
...
@@ -220,11 +218,6 @@ checkopts()
ENABLE_DUMPE2E
=
"
$OPTARG
"
echo
"enable dump end to end"
;;
S
)
check_on_off
$OPTARG
S
ENABLE_DATA_DUMP
=
"
$OPTARG
"
echo
"enable data dump"
;;
D
)
check_on_off
$OPTARG
D
ENABLE_DUMP_IR
=
"
$OPTARG
"
...
...
@@ -328,9 +321,6 @@ build_mindspore()
if
[[
"X
$ENABLE_DUMPE2E
"
=
"Xon"
]]
;
then
CMAKE_ARGS
=
"
${
CMAKE_ARGS
}
-DENABLE_DUMP_E2E=ON"
fi
if
[[
"X
$ENABLE_DATA_DUMP
"
=
"Xon"
]]
;
then
CMAKE_ARGS
=
"
${
CMAKE_ARGS
}
-DENABLE_DATA_DUMP=ON"
fi
CMAKE_ARGS
=
"
${
CMAKE_ARGS
}
-DENABLE_DUMP_IR=
${
ENABLE_DUMP_IR
}
"
CMAKE_ARGS
=
"
${
CMAKE_ARGS
}
-DENABLE_PYTHON=
${
ENABLE_PYTHON
}
"
if
[[
"X
$ENABLE_MPI
"
=
"Xon"
]]
;
then
...
...
cmake/options.cmake
浏览文件 @
7f8a7536
...
...
@@ -116,10 +116,6 @@ if(ENABLE_DUMP_E2E)
add_compile_definitions
(
ENABLE_DUMP_E2E
)
endif
()
if
(
ENABLE_DATA_DUMP
)
add_compile_definitions
(
ENABLE_DATA_DUMP
)
endif
()
if
(
ENABLE_DEBUGGER
)
add_compile_definitions
(
ENABLE_DEBUGGER
)
endif
()
mindspore/ccsrc/backend/kernel_compiler/ascend_kernel_mod.h
浏览文件 @
7f8a7536
...
...
@@ -21,9 +21,7 @@
#include <memory>
#include "framework/ge_runtime/task_info.h"
#include "backend/kernel_compiler/kernel.h"
#ifdef ENABLE_DATA_DUMP
#include "debug/data_dump_parser.h"
#endif
using
TaskInfoPtr
=
std
::
shared_ptr
<
ge
::
model_runner
::
TaskInfo
>
;
namespace
mindspore
{
...
...
@@ -34,13 +32,7 @@ class AscendKernelMod : public KernelMod {
const
std
::
vector
<
AddressPtr
>
&
,
uint32_t
)
=
0
;
uint32_t
block_dim
()
{
return
block_dim_
;
}
uint32_t
stream_id
()
{
return
stream_id_
;
}
virtual
bool
NeedDump
()
{
#ifdef ENABLE_DATA_DUMP
return
DataDumpParser
::
GetInstance
().
NeedDump
(
kernel_name_
);
#else
return
false
;
#endif
}
virtual
bool
NeedDump
()
{
return
DataDumpParser
::
GetInstance
().
NeedDump
(
kernel_name_
);
}
protected:
uint32_t
block_dim_
{
1
};
...
...
mindspore/ccsrc/debug/CMakeLists.txt
浏览文件 @
7f8a7536
...
...
@@ -23,9 +23,7 @@ if (ENABLE_D)
list
(
APPEND _DEBUG_SRC_LIST
"
${
CMAKE_CURRENT_SOURCE_DIR
}
/common.cc"
)
if
(
ENABLE_DATA_DUMP
)
list
(
APPEND _DEBUG_SRC_LIST
"
${
CMAKE_CURRENT_SOURCE_DIR
}
/data_dump_parser.cc"
)
endif
(
ENABLE_DATA_DUMP
)
list
(
APPEND _DEBUG_SRC_LIST
"
${
CMAKE_CURRENT_SOURCE_DIR
}
/data_dump_parser.cc"
)
endif
()
if
(
ENABLE_DUMP_E2E
)
...
...
mindspore/ccsrc/debug/data_dump_parser.cc
浏览文件 @
7f8a7536
...
...
@@ -35,7 +35,7 @@ void DataDumpParser::ResetParam() {
bool
DataDumpParser
::
DumpEnabled
()
const
{
auto
enable_dump
=
std
::
getenv
(
kEnableDataDump
);
if
(
!
enable_dump
)
{
MS_LOG
(
WARNING
)
<<
"[DataDump] enable dump is null. Please export ENABLE_DATA_DUMP"
;
MS_LOG
(
INFO
)
<<
"[DataDump] enable dump is null. Please export ENABLE_DATA_DUMP"
;
return
false
;
}
...
...
mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.cc
浏览文件 @
7f8a7536
...
...
@@ -49,6 +49,10 @@ using mindspore::device::ascend::tasksink::TaskGenerator;
using
mindspore
::
kernel
::
tbe
::
TbeUtils
;
using
std
::
vector
;
constexpr
uint32_t
kTupleTaskId
=
0
;
constexpr
uint32_t
kTupleStreamId
=
1
;
constexpr
uint32_t
kTupleArgs
=
2
;
namespace
mindspore
{
namespace
device
{
namespace
ascend
{
...
...
@@ -91,13 +95,11 @@ std::string GetRankId() {
AscendKernelRuntime
::~
AscendKernelRuntime
()
{
graph_model_map_
.
clear
();
}
void
AscendKernelRuntime
::
ClearGraphModelMap
()
{
#ifdef ENABLE_DATA_DUMP
for
(
auto
&
iter
:
graph_data_dumper_
)
{
MS_LOG
(
INFO
)
<<
"[DataDump] Unload data dumper:"
<<
iter
.
first
;
iter
.
second
->
UnloadDumpInfo
();
}
graph_data_dumper_
.
clear
();
#endif
for
(
auto
&
iter
:
graph_model_map_
)
{
MS_LOG
(
INFO
)
<<
"Ge UnloadModel "
<<
iter
.
first
;
auto
ret
=
ModelRunner
::
Instance
().
UnloadModel
(
iter
.
first
);
...
...
@@ -167,9 +169,7 @@ bool AscendKernelRuntime::Init() {
}
#endif
#ifdef ENABLE_DATA_DUMP
DataDumpParser
::
GetInstance
().
ParseDumpConfig
();
#endif
// Start up profiling before rtSetDevice
ret
=
ProfilingManager
::
GetInstance
().
StartupProfiling
(
device_id_
);
...
...
@@ -510,9 +510,8 @@ bool AscendKernelRuntime::LoadTask(const session::KernelGraph *graph) {
ProfilingUtils
::
ReportProfilingData
(
task_ids
,
stream_ids
,
NOT_NULL
(
graph
));
}
#ifdef ENABLE_DATA_DUMP
LaunchDataDump
(
NOT_NULL
(
graph
));
#endif
if
(
!
ModelRunner
::
Instance
().
LoadModelComplete
(
model_iter
->
first
))
{
MS_LOG
(
ERROR
)
<<
"Call ge runtime LoadModelComplete failed"
;
return
false
;
...
...
@@ -520,7 +519,6 @@ bool AscendKernelRuntime::LoadTask(const session::KernelGraph *graph) {
return
true
;
}
#ifdef ENABLE_DATA_DUMP
void
AscendKernelRuntime
::
LaunchDataDump
(
NotNull
<
const
session
::
KernelGraph
*>
graph
)
{
if
(
!
DataDumpParser
::
GetInstance
().
DumpEnabled
())
{
return
;
...
...
@@ -534,21 +532,12 @@ void AscendKernelRuntime::LaunchDataDump(NotNull<const session::KernelGraph *> g
MS_LOG
(
WARNING
)
<<
"[DataDump] Insert graphId:"
<<
graph
->
graph_id
()
<<
" data dumper failed"
;
}
}
#endif
void
AscendKernelRuntime
::
DebugTaskIdName
(
GraphId
graph_id
)
{
auto
task_ids
=
ModelRunner
::
Instance
().
GetTaskIdList
(
graph_id
);
auto
graph_task_names
=
ProfilingUtils
::
graph_kernel_name
();
auto
iter
=
graph_task_names
.
find
(
graph_id
);
if
(
iter
!=
graph_task_names
.
end
())
{
const
auto
&
task_names
=
iter
->
second
;
if
(
task_ids
.
size
()
!=
task_names
.
size
())
{
MS_LOG
(
WARNING
)
<<
"Task_ids and task_names size not match"
;
return
;
}
for
(
size_t
i
=
0
;
i
<
task_ids
.
size
();
++
i
)
{
MS_LOG
(
INFO
)
<<
"Task_id:"
<<
task_ids
[
i
]
<<
" task_name:"
<<
task_names
[
i
];
}
auto
runtime_info_map
=
ModelRunner
::
Instance
().
GetRuntimeInfoMap
(
graph_id
);
for
(
auto
iter
:
runtime_info_map
)
{
MS_LOG
(
WARNING
)
<<
"Task name:"
<<
iter
.
first
<<
" task_id:"
<<
std
::
get
<
kTupleTaskId
>
(
*
iter
.
second
)
<<
" stream_id:"
<<
std
::
get
<
kTupleStreamId
>
(
*
iter
.
second
);
}
}
...
...
mindspore/ccsrc/runtime/device/ascend/ascend_kernel_runtime.h
浏览文件 @
7f8a7536
...
...
@@ -24,10 +24,8 @@
#include "framework/ge_runtime/davinci_model.h"
#include "runtime/device/kernel_runtime_manager.h"
#include "backend/session/session_basic.h"
#ifdef ENABLE_DATA_DUMP
#include "debug/data_dump_parser.h"
#include "runtime/device/ascend/dump/data_dumper.h"
#endif
using
ge
::
model_runner
::
TaskInfo
;
using
std
::
unordered_map
;
...
...
@@ -70,10 +68,8 @@ class AscendKernelRuntime : public KernelRuntime {
bool
initialized_
{
false
};
unordered_map
<
GraphId
,
vector
<
std
::
shared_ptr
<
TaskInfo
>>>
task_map_
;
unordered_map
<
GraphId
,
std
::
shared_ptr
<
ge
::
model_runner
::
DavinciModel
>>
graph_model_map_
;
#ifdef ENABLE_DATA_DUMP
void
LaunchDataDump
(
NotNull
<
const
session
::
KernelGraph
*>
graph
);
unordered_map
<
GraphId
,
std
::
shared_ptr
<
DataDumper
>>
graph_data_dumper_
;
#endif
};
MS_REG_KERNEL_RUNTIME
(
kAscendDevice
,
AscendKernelRuntime
);
...
...
mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.cc
浏览文件 @
7f8a7536
...
...
@@ -13,7 +13,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef ENABLE_DATA_DUMP
#include "runtime/device/ascend/dump/data_dumper.h"
#include <map>
...
...
@@ -280,4 +279,3 @@ void DumpKernelInput(const CNodePtr &kernel, void *args, NotNull<aicpu::dump::Ta
}
// namespace ascend
}
// namespace device
}
// namespace mindspore
#endif
mindspore/ccsrc/runtime/device/ascend/dump/data_dumper.h
浏览文件 @
7f8a7536
...
...
@@ -16,7 +16,6 @@
#ifndef MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_DUMP_DATADUMP_H_
#define MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_DUMP_DATADUMP_H_
#ifdef ENABLE_DATA_DUMP
#include <tuple>
#include <map>
#include <memory>
...
...
@@ -67,5 +66,4 @@ class DataDumper {
}
// namespace ascend
}
// namespace device
}
// namespace mindspore
#endif
#endif // MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_DUMP_DATADUMP_H_
tests/ut/cpp/stub/tasksink/task_sink_stub.cc
浏览文件 @
7f8a7536
...
...
@@ -15,6 +15,7 @@
*/
#include "runtime/device/ascend/tasksink/task_generator.h"
#include "runtime/device/ascend/dump/data_dumper.h"
namespace
mindspore
{
namespace
device
{
...
...
@@ -25,6 +26,9 @@ bool TaskGenerator::GenTasks(const std::vector<CNodePtr> &anf_node_list, std::ve
return
true
;
}
}
// namespace tasksink
void
DataDumper
::
LoadDumpInfo
()
{}
void
DataDumper
::
UnloadDumpInfo
()
{}
DataDumper
::~
DataDumper
()
{}
}
// namespace ascend
}
// namespace device
}
// namespace mindspore
\ No newline at end of file
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录