Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
30c242d7
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
30c242d7
编写于
4月 07, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
4月 07, 2020
浏览文件
操作
浏览文件
下载
差异文件
!146 Fix some typo errors in mindspore ir module
Merge pull request !146 from leonwanghui/typo-fix
上级
b1e38423
322ffef3
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
129 addition
and
130 deletion
+129
-130
mindspore/ccsrc/common/trans.cc
mindspore/ccsrc/common/trans.cc
+45
-45
mindspore/ccsrc/debug/anf_ir_dump.cc
mindspore/ccsrc/debug/anf_ir_dump.cc
+8
-8
mindspore/ccsrc/debug/anf_ir_utils.cc
mindspore/ccsrc/debug/anf_ir_utils.cc
+18
-18
mindspore/ccsrc/debug/draw.cc
mindspore/ccsrc/debug/draw.cc
+1
-1
mindspore/ccsrc/debug/dump_proto.cc
mindspore/ccsrc/debug/dump_proto.cc
+1
-1
mindspore/ccsrc/debug/e2e_dump.cc
mindspore/ccsrc/debug/e2e_dump.cc
+12
-12
mindspore/ccsrc/debug/info.cc
mindspore/ccsrc/debug/info.cc
+2
-2
mindspore/ccsrc/debug/trace.cc
mindspore/ccsrc/debug/trace.cc
+5
-5
mindspore/ccsrc/debug/trace_info.cc
mindspore/ccsrc/debug/trace_info.cc
+1
-1
mindspore/ccsrc/ir/anf.h
mindspore/ccsrc/ir/anf.h
+3
-3
mindspore/ccsrc/ir/dtype/number.cc
mindspore/ccsrc/ir/dtype/number.cc
+3
-3
mindspore/ccsrc/ir/dtype/type.cc
mindspore/ccsrc/ir/dtype/type.cc
+3
-3
mindspore/ccsrc/ir/func_graph.h
mindspore/ccsrc/ir/func_graph.h
+1
-1
mindspore/ccsrc/ir/manager.cc
mindspore/ccsrc/ir/manager.cc
+2
-2
mindspore/ccsrc/ir/manager.h
mindspore/ccsrc/ir/manager.h
+1
-1
mindspore/ccsrc/ir/meta_tensor.h
mindspore/ccsrc/ir/meta_tensor.h
+4
-4
mindspore/ccsrc/ir/visitor.cc
mindspore/ccsrc/ir/visitor.cc
+3
-4
mindspore/ccsrc/pynative/pynative_execute.cc
mindspore/ccsrc/pynative/pynative_execute.cc
+7
-7
mindspore/ccsrc/pynative/pynative_execute_ge.cc
mindspore/ccsrc/pynative/pynative_execute_ge.cc
+9
-9
未找到文件。
mindspore/ccsrc/common/trans.cc
浏览文件 @
30c242d7
...
...
@@ -122,7 +122,7 @@ bool CastKernel(const TypeIdArgs &args, void *dst, const size_t data_size, const
TransDataSrc2Dst
<
uint16_t
,
int32_t
>
(
args
,
dst
,
data_size
);
break
;
default:
MS_LOG
(
ERROR
)
<<
"
u
nsupported datatype trans"
;
MS_LOG
(
ERROR
)
<<
"
U
nsupported datatype trans"
;
return
false
;
}
return
true
;
...
...
@@ -132,7 +132,7 @@ size_t CubeSizeByType(const TypeId data_type) {
const
size_t
default_error
=
0
;
auto
dt_size
=
TypeIdSize
(
data_type
);
if
(
dt_size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype."
;
return
default_error
;
}
else
if
(
dt_size
==
1
)
{
return
kCubeSize
*
2
;
...
...
@@ -146,12 +146,12 @@ size_t ShapeSize(const std::vector<size_t> &shape) {
}
size_t
TypeIdSize
(
const
TypeId
data_type
)
{
const
size_t
unsupport_type_error
=
0
;
const
size_t
unsupport
ed
_type_error
=
0
;
auto
iter
=
type_map
.
find
(
data_type
);
if
(
iter
!=
type_map
.
end
())
{
return
iter
->
second
;
}
return
unsupport_type_error
;
return
unsupport
ed
_type_error
;
}
std
::
vector
<
size_t
>
TransShapeTo4d
(
const
std
::
vector
<
size_t
>
&
shape
)
{
...
...
@@ -174,7 +174,7 @@ std::vector<size_t> TransShapeTo4d(const std::vector<size_t> &shape) {
}
break
;
default:
MS_LOG
(
EXCEPTION
)
<<
"Unexpeted shape size = "
<<
shape
.
size
();
MS_LOG
(
EXCEPTION
)
<<
"Unexpe
c
ted shape size = "
<<
shape
.
size
();
}
return
shape_4d
;
}
...
...
@@ -183,7 +183,7 @@ std::vector<size_t> TransShapeToDevice(const std::vector<size_t> &shape, const s
std
::
vector
<
size_t
>
device_shape
;
if
(
format
==
kOpFormat_FRAC_NZ
)
{
if
(
shape
.
size
()
<
2
)
{
MS_EXCEPTION
(
NotSupportError
)
<<
"
f
ormat "
<<
format
<<
" is not support shape "
<<
shape
.
size
();
MS_EXCEPTION
(
NotSupportError
)
<<
"
F
ormat "
<<
format
<<
" is not support shape "
<<
shape
.
size
();
}
if
(
shape
.
size
()
>
2
)
{
(
void
)
std
::
copy
(
shape
.
begin
(),
shape
.
end
()
-
2
,
std
::
back_inserter
(
device_shape
));
...
...
@@ -231,37 +231,37 @@ std::vector<size_t> TransShapeToDevice(const std::vector<size_t> &shape, const s
}
bool
TransDataType
(
const
TypeIdArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
b
egin trans datatype from "
<<
TypeIdLabel
(
args
.
host_data_type
)
<<
" to "
MS_LOG
(
DEBUG
)
<<
"
B
egin trans datatype from "
<<
TypeIdLabel
(
args
.
host_data_type
)
<<
" to "
<<
TypeIdLabel
(
args
.
device_data_type
);
MS_EXCEPTION_IF_NULL
(
result
);
std
::
pair
<
TypeId
,
TypeId
>
type_info
(
args
.
host_data_type
,
args
.
device_data_type
);
auto
iter
=
mode_map
.
find
(
type_info
);
if
(
iter
==
mode_map
.
end
())
{
MS_LOG
(
ERROR
)
<<
"
u
nsupported datatype trans. src_type :"
<<
TypeIdLabel
(
args
.
host_data_type
)
MS_LOG
(
ERROR
)
<<
"
U
nsupported datatype trans. src_type :"
<<
TypeIdLabel
(
args
.
host_data_type
)
<<
", dst_type:"
<<
TypeIdLabel
(
args
.
device_data_type
);
return
false
;
}
auto
trans_mode
=
iter
->
second
;
auto
type_size
=
TypeIdSize
(
args
.
device_data_type
);
if
(
type_size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid host data type."
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid host data type."
;
return
false
;
}
if
(
args
.
host_shape_size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid host data size."
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid host data size."
;
return
false
;
}
if
(
!
CastKernel
(
args
,
result
,
args
.
host_shape_size
,
trans_mode
))
{
MS_LOG
(
ERROR
)
<<
"
f
ailed to trans datatype.."
;
MS_LOG
(
ERROR
)
<<
"
F
ailed to trans datatype.."
;
return
false
;
}
return
true
;
}
bool
TransFormat
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
s
tart trans format."
;
MS_LOG
(
DEBUG
)
<<
"
S
tart trans format."
;
if
(
TypeIdSize
(
args
.
src_data_type
)
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid datatype.."
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid datatype.."
;
return
false
;
}
if
((
args
.
host_format
==
kOpFormat_NCHW
||
args
.
host_format
==
kOpFormat_ND
)
&&
...
...
@@ -276,9 +276,9 @@ bool TransFormat(const FormatArgs &args, void *result) {
}
bool
TransFormatFromDeviceToHost
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
s
tart trans format."
;
MS_LOG
(
DEBUG
)
<<
"
S
tart trans format."
;
if
(
TypeIdSize
(
args
.
src_data_type
)
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid datatype.."
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid datatype.."
;
return
false
;
}
if
((
args
.
host_format
==
kOpFormat_NCHW
||
args
.
host_format
==
kOpFormat_ND
)
&&
...
...
@@ -293,15 +293,15 @@ bool TransFormatFromDeviceToHost(const FormatArgs &args, void *result) {
}
bool
NchwToFracZ
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
t
rans format from nchw to frac_z"
;
MS_LOG
(
DEBUG
)
<<
"
T
rans format from nchw to frac_z"
;
MS_EXCEPTION_IF_NULL
(
result
);
if
(
args
.
host_shape
.
size
()
!=
kNchwDims
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
return
false
;
}
size_t
size
=
TypeIdSize
(
args
.
src_data_type
);
if
(
size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype."
;
return
false
;
}
auto
n
=
args
.
host_shape
[
0
];
...
...
@@ -311,7 +311,7 @@ bool NchwToFracZ(const FormatArgs &args, void *result) {
size_t
c0
=
CubeSizeByType
(
args
.
src_data_type
);
if
(
c0
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype."
;
return
false
;
}
size_t
c1
=
Ceil
(
c
,
c0
);
...
...
@@ -327,7 +327,7 @@ bool NchwToFracZ(const FormatArgs &args, void *result) {
size_t
dst_size
=
total_ele_cnt
*
size
;
if
(
dst_size
!=
args
.
device_size
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal total data size."
MS_LOG
(
ERROR
)
<<
"
I
llegal total data size."
<<
"dst size is :"
<<
dst_size
<<
"device size is :"
<<
args
.
device_size
;
return
false
;
}
...
...
@@ -369,20 +369,20 @@ bool NchwToFracZ(const FormatArgs &args, void *result) {
}
bool
FracZToNchw
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
t
rans format from frac_z to nchw"
;
MS_LOG
(
DEBUG
)
<<
"
T
rans format from frac_z to nchw"
;
MS_EXCEPTION_IF_NULL
(
result
);
if
(
args
.
host_shape
.
size
()
!=
kNchwDims
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
return
false
;
}
size_t
size
=
TypeIdSize
(
args
.
src_data_type
);
if
(
size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype."
;
return
false
;
}
size_t
total_size
=
ShapeSize
(
args
.
device_shape
)
*
size
;
if
(
total_size
!=
args
.
device_size
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal total data size, total_size:"
<<
total_size
<<
", device_size:"
<<
args
.
device_size
;
MS_LOG
(
ERROR
)
<<
"
I
llegal total data size, total_size:"
<<
total_size
<<
", device_size:"
<<
args
.
device_size
;
return
false
;
}
...
...
@@ -435,7 +435,7 @@ bool FracZToNchw(const FormatArgs &args, void *result) {
bool
TransShapeToNz
(
const
std
::
vector
<
size_t
>
&
host_shape
,
std
::
vector
<
size_t
>
*
hw_shape
)
{
MS_EXCEPTION_IF_NULL
(
hw_shape
);
if
(
host_shape
.
empty
())
{
MS_LOG
(
ERROR
)
<<
"
s
ize of vector is 0."
;
MS_LOG
(
ERROR
)
<<
"
S
ize of vector is 0."
;
return
false
;
}
switch
(
host_shape
.
size
())
{
...
...
@@ -447,7 +447,7 @@ bool TransShapeToNz(const std::vector<size_t> &host_shape, std::vector<size_t> *
default:
auto
size
=
host_shape
.
size
();
if
(
size
<
2
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal size."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal size."
;
return
false
;
}
size_t
times
=
1
;
...
...
@@ -462,26 +462,26 @@ bool TransShapeToNz(const std::vector<size_t> &host_shape, std::vector<size_t> *
}
bool
NchwToFracNz
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
t
rans format from nchw to frac_nz."
;
MS_LOG
(
DEBUG
)
<<
"
T
rans format from nchw to frac_nz."
;
MS_EXCEPTION_IF_NULL
(
result
);
std
::
vector
<
size_t
>
hw_shape
;
if
(
!
TransShapeToNz
(
args
.
host_shape
,
&
hw_shape
))
{
MS_LOG
(
ERROR
)
<<
"
t
rans shape failed.."
;
MS_LOG
(
ERROR
)
<<
"
T
rans shape failed.."
;
return
false
;
}
if
(
hw_shape
.
size
()
<
3
||
args
.
device_shape
.
size
()
<
4
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid shape size."
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid shape size."
;
return
false
;
}
auto
size
=
TypeIdSize
(
args
.
src_data_type
);
if
(
size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype"
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype"
;
return
false
;
}
auto
dst_size
=
ShapeSize
(
args
.
device_shape
)
*
size
;
if
(
dst_size
!=
args
.
device_size
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal total data size, total_size:"
<<
dst_size
<<
", device_size:"
<<
args
.
device_size
;
MS_LOG
(
ERROR
)
<<
"
I
llegal total data size, total_size:"
<<
dst_size
<<
", device_size:"
<<
args
.
device_size
;
return
false
;
}
auto
times
=
hw_shape
.
at
(
0
);
...
...
@@ -538,26 +538,26 @@ bool NchwToFracNz(const FormatArgs &args, void *result) {
}
bool
FracNzToNchw
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
t
rans format from frac_nz to nchw"
;
MS_LOG
(
DEBUG
)
<<
"
T
rans format from frac_nz to nchw"
;
MS_EXCEPTION_IF_NULL
(
result
);
std
::
vector
<
size_t
>
hw_shape
;
if
(
!
TransShapeToNz
(
args
.
host_shape
,
&
hw_shape
))
{
MS_LOG
(
ERROR
)
<<
"
t
rans shape failed.."
;
MS_LOG
(
ERROR
)
<<
"
T
rans shape failed.."
;
return
false
;
}
if
(
hw_shape
.
size
()
<
3
||
args
.
device_shape
.
size
()
<
4
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid shape size."
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid shape size."
;
return
false
;
}
auto
size
=
TypeIdSize
(
args
.
src_data_type
);
if
(
size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype"
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype"
;
return
false
;
}
auto
dst_size
=
ShapeSize
(
args
.
device_shape
)
*
size
;
if
(
dst_size
!=
args
.
device_size
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal total data size, total_size:"
<<
dst_size
<<
", device_size:"
<<
args
.
device_size
;
MS_LOG
(
ERROR
)
<<
"
I
llegal total data size, total_size:"
<<
dst_size
<<
", device_size:"
<<
args
.
device_size
;
return
false
;
}
auto
times
=
hw_shape
.
at
(
0
);
...
...
@@ -614,20 +614,20 @@ bool FracNzToNchw(const FormatArgs &args, void *result) {
}
bool
NchwToNc1hwc0
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
t
rans format from nchw to Nc1h1wc0"
;
MS_LOG
(
DEBUG
)
<<
"
T
rans format from nchw to Nc1h1wc0"
;
MS_EXCEPTION_IF_NULL
(
result
);
if
(
args
.
host_shape
.
size
()
!=
kNchwDims
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
return
false
;
}
size_t
size
=
TypeIdSize
(
args
.
src_data_type
);
if
(
size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype."
;
return
false
;
}
auto
total_size
=
ShapeSize
(
args
.
device_shape
)
*
size
;
if
(
total_size
!=
args
.
device_size
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal total data size, total_size:"
<<
total_size
<<
", device_size:"
<<
args
.
device_size
;
MS_LOG
(
ERROR
)
<<
"
I
llegal total data size, total_size:"
<<
total_size
<<
", device_size:"
<<
args
.
device_size
;
return
false
;
}
...
...
@@ -637,7 +637,7 @@ bool NchwToNc1hwc0(const FormatArgs &args, void *result) {
auto
w
=
args
.
host_shape
[
3
];
size_t
c0
=
CubeSizeByType
(
args
.
src_data_type
);
if
(
c0
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype."
;
return
false
;
}
size_t
c1
=
Ceil
(
c
,
c0
);
...
...
@@ -687,20 +687,20 @@ bool NchwToNc1hwc0(const FormatArgs &args, void *result) {
}
bool
Nc1hwc0ToNchw
(
const
FormatArgs
&
args
,
void
*
result
)
{
MS_LOG
(
DEBUG
)
<<
"
t
rans format from nc1h1wc0 to nchw"
;
MS_LOG
(
DEBUG
)
<<
"
T
rans format from nc1h1wc0 to nchw"
;
MS_EXCEPTION_IF_NULL
(
result
);
if
(
args
.
host_shape
.
size
()
!=
kNchwDims
)
{
MS_LOG
(
ERROR
)
<<
"
i
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
MS_LOG
(
ERROR
)
<<
"
I
nvalid host shape, host shape dims:"
<<
args
.
host_shape
.
size
()
<<
", expect dims:"
<<
kNchwDims
;
return
false
;
}
size_t
size
=
TypeIdSize
(
args
.
src_data_type
);
if
(
size
<
1
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal dtype."
;
MS_LOG
(
ERROR
)
<<
"
I
llegal dtype."
;
return
false
;
}
size_t
total_size
=
ShapeSize
(
args
.
device_shape
)
*
size
;
if
(
total_size
!=
args
.
device_size
)
{
MS_LOG
(
ERROR
)
<<
"
i
llegal total data size, total_size:"
<<
total_size
<<
", device_size:"
<<
args
.
device_size
;
MS_LOG
(
ERROR
)
<<
"
I
llegal total data size, total_size:"
<<
total_size
<<
", device_size:"
<<
args
.
device_size
;
return
false
;
}
...
...
mindspore/ccsrc/debug/anf_ir_dump.cc
浏览文件 @
30c242d7
...
...
@@ -141,7 +141,7 @@ void DumpKernelInfo(const CNodePtr &node, const std::shared_ptr<SubGraphIRInfo>
void
DumpParams
(
const
FuncGraphPtr
&
graph
,
std
::
ostringstream
&
buffer
,
OrderedMap
<
AnfNodePtr
,
int32_t
>
*
para_map
)
{
if
(
graph
==
nullptr
)
{
MS_LOG
(
INFO
)
<<
"
p
aram graph is nullptr."
;
MS_LOG
(
INFO
)
<<
"
P
aram graph is nullptr."
;
return
;
}
std
::
vector
<
AnfNodePtr
>
parameters
=
graph
->
parameters
();
...
...
@@ -175,17 +175,17 @@ void DumpParams(const FuncGraphPtr &graph, std::ostringstream &buffer, OrderedMa
if
(
para_map
!=
nullptr
)
{
(
*
para_map
)[
p
]
=
para
++
;
}
MS_LOG
(
DEBUG
)
<<
"
r
ecord param: "
<<
p
->
ToString
()
<<
" graph belong : "
<<
p
->
func_graph
()
->
ToString
();
MS_LOG
(
DEBUG
)
<<
"
R
ecord param: "
<<
p
->
ToString
()
<<
" graph belong : "
<<
p
->
func_graph
()
->
ToString
();
}
}
void
DumpOperator
(
const
AnfNodePtr
&
op
,
const
std
::
shared_ptr
<
SubGraphIRInfo
>
&
gsub
)
{
if
(
op
==
nullptr
)
{
MS_LOG
(
INFO
)
<<
"
p
aram op is nullptr"
;
MS_LOG
(
INFO
)
<<
"
P
aram op is nullptr"
;
return
;
}
if
(
gsub
==
nullptr
)
{
MS_LOG
(
INFO
)
<<
"
p
aram gsub is nullptr"
;
MS_LOG
(
INFO
)
<<
"
P
aram gsub is nullptr"
;
return
;
}
...
...
@@ -338,7 +338,7 @@ void DumpCNode(const CNodePtr &nd, const FuncGraphPtr &sub_graph, OrderedMap<Anf
}
if
(
nd
->
inputs
().
empty
())
{
MS_LOG
(
EXCEPTION
)
<<
"
i
nput of apply node is empty"
;
MS_LOG
(
EXCEPTION
)
<<
"
I
nput of apply node is empty"
;
}
// print operator
...
...
@@ -376,7 +376,7 @@ void DumpIRInSubgraph(const std::vector<AnfNodePtr> &nodes, OrderedMap<AnfNodePt
MS_EXCEPTION_IF_NULL
(
nd
);
FuncGraphPtr
sub_graph
=
nd
->
func_graph
();
if
(
sub_graph
==
nullptr
)
{
MS_LOG
(
DEBUG
)
<<
"
n
ode["
<<
nd
->
ToString
()
<<
"] belongs to no graph!"
;
MS_LOG
(
DEBUG
)
<<
"
N
ode["
<<
nd
->
ToString
()
<<
"] belongs to no graph!"
;
continue
;
}
std
::
shared_ptr
<
SubGraphIRInfo
>
gsub
=
(
*
sub_graphs
)[
sub_graph
];
...
...
@@ -430,12 +430,12 @@ void DumpIR(const std::string &filename, const FuncGraphPtr &graph, bool dump_fu
return
;
}
if
(
filename
.
size
()
>
PATH_MAX
)
{
MS_LOG
(
ERROR
)
<<
"
f
ile path "
<<
filename
<<
" is too long."
;
MS_LOG
(
ERROR
)
<<
"
F
ile path "
<<
filename
<<
" is too long."
;
return
;
}
char
real_path
[
PATH_MAX
]
=
{
0
};
if
(
nullptr
==
realpath
(
filename
.
c_str
(),
real_path
))
{
MS_LOG
(
DEBUG
)
<<
"
d
ir "
<<
filename
<<
" does not exit."
;
MS_LOG
(
DEBUG
)
<<
"
D
ir "
<<
filename
<<
" does not exit."
;
}
OrderedMap
<
AnfNodePtr
,
int32_t
>
para_map
;
...
...
mindspore/ccsrc/debug/anf_ir_utils.cc
浏览文件 @
30c242d7
...
...
@@ -49,7 +49,7 @@ std::string GetMsIrPath(void) {
path
=
path_ptr
;
char
real_path
[
PATH_MAX
]
=
{
0
};
if
(
path
.
size
()
>
PATH_MAX
||
nullptr
==
realpath
(
path
.
c_str
(),
real_path
))
{
MS_LOG
(
EXCEPTION
)
<<
"MS IR
P
ath error, "
<<
path_ptr
;
MS_LOG
(
EXCEPTION
)
<<
"MS IR
p
ath error, "
<<
path_ptr
;
}
path
=
real_path
;
}
...
...
@@ -144,8 +144,8 @@ std::string AnfExporter::GetValueNodeText(const FuncGraphPtr& fg, const ValueNod
}
std
::
string
AnfExporter
::
GetMultitypeFuncGraphText
(
const
prim
::
MultitypeFuncGraphPtr
&
mt_func_graph
)
{
auto
py_funs
=
mt_func_graph
->
GetPyFunctions
();
if
(
py_funs
.
empty
())
{
auto
py_fun
c
s
=
mt_func_graph
->
GetPyFunctions
();
if
(
py_fun
c
s
.
empty
())
{
return
""
;
}
...
...
@@ -153,7 +153,7 @@ std::string AnfExporter::GetMultitypeFuncGraphText(const prim::MultitypeFuncGrap
oss
<<
"{"
;
bool
is_first
=
true
;
for
(
const
auto
&
py_func
:
py_funs
)
{
for
(
const
auto
&
py_func
:
py_fun
c
s
)
{
if
(
is_first
)
{
is_first
=
false
;
}
else
{
...
...
@@ -626,7 +626,7 @@ void AnfExporter::ExportFuncGraph(const std::string& filename, const FuncGraphPt
ofs
<<
"
\n\n
"
;
(
void
)
func_graph_set
.
erase
(
fg
);
}
ofs
<<
"# num of total funcgraphs: "
<<
exported
.
size
();
ofs
<<
"# num of total func
tion
graphs: "
<<
exported
.
size
();
ofs
.
close
();
}
...
...
@@ -651,7 +651,7 @@ void AnfExporter::ExportFuncGraph(const std::string& filename, const std::vector
ofs
<<
"
\n\n
"
;
}
ofs
<<
"# num of total funcgraphs: "
<<
graphs
.
size
();
ofs
<<
"# num of total func
tion
graphs: "
<<
graphs
.
size
();
ofs
.
close
();
}
...
...
@@ -763,7 +763,7 @@ class Lexer {
fin
.
close
();
}
}
catch
(
const
std
::
exception
&
e
)
{
MS_LOG
(
ERROR
)
<<
"
e
xception when closing file"
;
MS_LOG
(
ERROR
)
<<
"
E
xception when closing file"
;
}
catch
(...)
{
std
::
string
exName
(
abi
::
__cxa_current_exception_type
()
->
name
());
MS_LOG
(
ERROR
)
<<
"Error occurred when closing file. Exception name: "
<<
exName
;
...
...
@@ -802,7 +802,7 @@ class Lexer {
Token
token
=
GetNextTokenInner
();
const
char
*
str
=
token_text
[
token
];
std
::
string
text
=
(
str
==
nullptr
?
GetTokenText
()
:
str
);
MS_LOG
(
DEBUG
)
<<
"------
p
arse token] "
<<
text
;
MS_LOG
(
DEBUG
)
<<
"------
P
arse token] "
<<
text
;
return
token
;
}
...
...
@@ -1642,7 +1642,7 @@ class IrParser {
MS_LOG
(
EXCEPTION
)
<<
"Expect @file at line "
<<
lexer_
.
GetLineNo
();
}
// load prameter default value from serialized file
// load p
a
rameter default value from serialized file
py
::
object
default_obj
=
LoadObject
(
lexer_
.
GetTokenText
());
param
->
set_default_param
(
default_obj
);
...
...
@@ -1950,7 +1950,7 @@ class IrParser {
return
TOK_ERROR
;
}
// restore python func
it
on of PrimitivePy from serialized file
// restore python func
ti
on of PrimitivePy from serialized file
py
::
object
py_obj
=
LoadObject
(
lexer_
.
GetTokenText
());
PrimitivePyPtr
ptr
=
nullptr
;
if
(
py
::
hasattr
(
py_obj
,
"__setattr_flag__"
)
&&
py
::
hasattr
(
py_obj
,
"_clone"
))
{
...
...
@@ -1958,7 +1958,7 @@ class IrParser {
py
::
object
new_obj
=
clone_fn
();
ptr
=
new_obj
.
cast
<
PrimitivePyPtr
>
();
if
(
ptr
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"
c
ast to type 'PrimitivePyPtr' error"
;
MS_LOG
(
EXCEPTION
)
<<
"
C
ast to type 'PrimitivePyPtr' error"
;
}
}
else
{
ptr
=
std
::
make_shared
<
PrimitivePy
>
(
id
.
substr
(
strlen
(
"PrimitivePy::"
)),
py_obj
);
...
...
@@ -2221,15 +2221,15 @@ class IrParser {
};
std
::
vector
<
FuncGraphPtr
>
ImportIR
(
const
std
::
string
&
filename
)
{
IrParser
paser
(
filename
.
c_str
());
paser
.
ParseFile
();
return
paser
.
GetFuncGraphs
();
IrParser
pa
r
ser
(
filename
.
c_str
());
pa
r
ser
.
ParseFile
();
return
pa
r
ser
.
GetFuncGraphs
();
}
#ifdef ENABLE_DUMP_IR
void
DumpIRProto
(
const
FuncGraphPtr
&
func_graph
,
const
std
::
string
&
suffix
)
{
if
(
func_graph
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"
f
unc graph is nullptr"
;
MS_LOG
(
ERROR
)
<<
"
F
unc graph is nullptr"
;
return
;
}
auto
ms_context
=
MsContext
::
GetInstance
();
...
...
@@ -2243,16 +2243,16 @@ void DumpIRProto(const FuncGraphPtr& func_graph, const std::string& suffix) {
}
std
::
string
file_path
=
save_graphs_path
+
"/"
+
"ms_output_"
+
suffix
+
".pb"
;
if
(
file_path
.
size
()
>
PATH_MAX
)
{
MS_LOG
(
ERROR
)
<<
"
f
ile path "
<<
file_path
<<
" is too long."
;
MS_LOG
(
ERROR
)
<<
"
F
ile path "
<<
file_path
<<
" is too long."
;
return
;
}
char
real_path
[
PATH_MAX
]
=
{
0
};
if
(
nullptr
==
realpath
(
file_path
.
c_str
(),
real_path
))
{
MS_LOG
(
DEBUG
)
<<
"
d
ir "
<<
file_path
<<
" does not exit."
;
MS_LOG
(
DEBUG
)
<<
"
D
ir "
<<
file_path
<<
" does not exit."
;
}
else
{
std
::
string
path_string
=
real_path
;
if
(
chmod
(
common
::
SafeCStr
(
path_string
),
S_IRUSR
|
S_IWUSR
)
==
-
1
)
{
MS_LOG
(
ERROR
)
<<
"
m
odify file:"
<<
real_path
<<
" to rw fail."
;
MS_LOG
(
ERROR
)
<<
"
M
odify file:"
<<
real_path
<<
" to rw fail."
;
return
;
}
}
...
...
mindspore/ccsrc/debug/draw.cc
浏览文件 @
30c242d7
...
...
@@ -362,7 +362,7 @@ Digraph::~Digraph() {
fout_
.
close
();
}
}
catch
(
const
std
::
exception
&
e
)
{
MS_LOG
(
ERROR
)
<<
"
e
xception when closing file "
<<
filename_
;
MS_LOG
(
ERROR
)
<<
"
E
xception when closing file "
<<
filename_
;
}
}
...
...
mindspore/ccsrc/debug/dump_proto.cc
浏览文件 @
30c242d7
...
...
@@ -208,7 +208,7 @@ void ProtoExporter::SetValueToProto(const ValuePtr& val, irpb::ValueProto* value
TypePtr
elem_type
=
dyn_cast
<
TensorType
>
(
val
)
->
element
();
type_proto
->
mutable_tensor_type
()
->
set_elem_type
(
GetNumberDataType
(
elem_type
));
}
else
{
MS_LOG
(
WARNING
)
<<
"
Not
supported type "
<<
val
->
type_name
();
MS_LOG
(
WARNING
)
<<
"
Un
supported type "
<<
val
->
type_name
();
}
}
...
...
mindspore/ccsrc/debug/e2e_dump.cc
浏览文件 @
30c242d7
...
...
@@ -101,7 +101,7 @@ bool Dump::IsConfigValid(const nlohmann::json& dumpSettings) {
auto
kernels
=
dumpSettings
.
at
(
"kernels"
);
if
(
!
(
enable
.
is_boolean
()
&&
trans_flag
.
is_boolean
()
&&
mode
.
is_number
()
&&
path
.
is_string
()
&&
net_name
.
is_string
()
&&
iteration
.
is_number
()
&&
kernels
.
is_array
()))
{
MS_LOG
(
ERROR
)
<<
"
e
lement's type in Dump config json is invalid."
;
MS_LOG
(
ERROR
)
<<
"
E
lement's type in Dump config json is invalid."
;
dump_enable_
=
false
;
return
false
;
}
...
...
@@ -121,7 +121,7 @@ bool Dump::IsConfigValid(const nlohmann::json& dumpSettings) {
bool
Dump
::
SetDumpConfFromJsonFile
()
{
const
char
*
config_path_str
=
std
::
getenv
(
"MINDSPORE_CONFIG_PATH"
);
if
(
config_path_str
!=
nullptr
)
{
MS_LOG
(
INFO
)
<<
"
g
etenv MINDSPORE_CONFIG_PATH :"
<<
config_path_str
;
MS_LOG
(
INFO
)
<<
"
G
etenv MINDSPORE_CONFIG_PATH :"
<<
config_path_str
;
}
else
{
MS_LOG
(
INFO
)
<<
"No need E2E Dump. please export MINDSPORE_CONFIG_PATH eg: MINDSPORE_CONFIG_PATH=/etc"
;
dump_enable_
=
false
;
...
...
@@ -132,7 +132,7 @@ bool Dump::SetDumpConfFromJsonFile() {
auto
id
=
context_ptr
->
device_id
();
char
real_path
[
PATH_MAX
]
=
{
0
};
if
(
nullptr
==
realpath
(
config_path_str
,
real_path
))
{
MS_LOG
(
ERROR
)
<<
"
e
nv e2e dump path error, "
<<
config_path_str
;
MS_LOG
(
ERROR
)
<<
"
E
nv e2e dump path error, "
<<
config_path_str
;
dump_enable_
=
false
;
return
false
;
}
...
...
@@ -150,20 +150,20 @@ bool Dump::SetDumpConfFromJsonFile() {
bool
Dump
::
DumpToFile
(
const
std
::
string
&
filename
,
const
void
*
data
,
size_t
len
)
{
if
(
filename
.
empty
()
||
data
==
nullptr
||
len
==
0
)
{
MS_LOG
(
ERROR
)
<<
"
i
ncorrect parameter."
;
MS_LOG
(
ERROR
)
<<
"
I
ncorrect parameter."
;
return
false
;
}
std
::
string
realpath
;
bool
ret
=
GetRealPath
(
filename
,
&
realpath
);
if
(
!
ret
)
{
MS_LOG
(
ERROR
)
<<
"
g
et real path failed."
;
MS_LOG
(
ERROR
)
<<
"
G
et real path failed."
;
return
false
;
}
std
::
ofstream
fd
;
fd
.
open
(
realpath
,
std
::
ios
::
binary
|
std
::
ios
::
out
);
if
(
!
fd
.
is_open
())
{
MS_LOG
(
ERROR
)
<<
"
o
pen file "
<<
realpath
<<
" fail."
;
MS_LOG
(
ERROR
)
<<
"
O
pen file "
<<
realpath
<<
" fail."
;
return
false
;
}
(
void
)
fd
.
write
(
reinterpret_cast
<
const
char
*>
(
data
),
SizeToLong
(
len
));
...
...
@@ -182,7 +182,7 @@ bool Dump::GetRealPath(const std::string& inpath, std::string* outpath) {
if
(
path_split_pos
!=
std
::
string
::
npos
)
{
std
::
string
prefix_path
=
inpath
.
substr
(
0
,
path_split_pos
);
if
(
prefix_path
.
length
()
>=
PATH_MAX
)
{
MS_LOG
(
ERROR
)
<<
"
p
refix path is too longer!"
;
MS_LOG
(
ERROR
)
<<
"
P
refix path is too longer!"
;
return
false
;
}
std
::
string
last_path
=
inpath
.
substr
(
path_split_pos
,
inpath
.
length
()
-
path_split_pos
);
...
...
@@ -201,11 +201,11 @@ bool Dump::GetRealPath(const std::string& inpath, std::string* outpath) {
if
(
path_split_pos
==
std
::
string
::
npos
)
{
if
(
inpath
.
length
()
>=
PATH_MAX
)
{
MS_LOG
(
ERROR
)
<<
"
p
refix path is too longer!"
;
MS_LOG
(
ERROR
)
<<
"
P
refix path is too longer!"
;
return
false
;
}
if
(
nullptr
==
realpath
(
inpath
.
c_str
(),
real_path
))
{
MS_LOG
(
ERROR
)
<<
"
f
ile "
<<
inpath
<<
" does not exit, it will be created."
;
MS_LOG
(
ERROR
)
<<
"
F
ile "
<<
inpath
<<
" does not exit, it will be created."
;
}
*
outpath
=
std
::
string
(
real_path
);
}
...
...
@@ -218,7 +218,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) {
MS_EXCEPTION_IF_NULL
(
fs
);
char
temp_path
[
PATH_MAX
]
=
{
0
};
if
(
path
.
length
()
>
PATH_MAX
)
{
MS_LOG
(
ERROR
)
<<
"
p
ath lens is max than "
<<
PATH_MAX
;
MS_LOG
(
ERROR
)
<<
"
P
ath lens is max than "
<<
PATH_MAX
;
return
false
;
}
for
(
uint32_t
i
=
0
;
i
<
path
.
length
();
i
++
)
{
...
...
@@ -229,7 +229,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) {
temp_path
[
i
]
=
'\0'
;
std
::
string
path_handle
(
temp_path
);
if
(
!
fs
->
FileExist
(
temp_path
))
{
MS_LOG
(
INFO
)
<<
"
d
ir "
<<
path_handle
<<
" does not exit, creating..."
;
MS_LOG
(
INFO
)
<<
"
D
ir "
<<
path_handle
<<
" does not exit, creating..."
;
if
(
!
fs
->
CreateDir
(
temp_path
))
{
MS_LOG
(
ERROR
)
<<
"Create "
<<
path_handle
<<
" dir error"
;
return
false
;
...
...
@@ -241,7 +241,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) {
}
if
(
!
fs
->
FileExist
(
path
))
{
MS_LOG
(
INFO
)
<<
"
d
ir "
<<
path
<<
" does not exit, creating..."
;
MS_LOG
(
INFO
)
<<
"
D
ir "
<<
path
<<
" does not exit, creating..."
;
if
(
!
fs
->
CreateDir
(
path
))
{
MS_LOG
(
ERROR
)
<<
"Create "
<<
path
<<
" dir error"
;
return
false
;
...
...
mindspore/ccsrc/debug/info.cc
浏览文件 @
30c242d7
...
...
@@ -193,7 +193,7 @@ void TraceManager::DebugTrace(const TraceInfoPtr& trace_info) {
}
TraceContextPtr
context
=
std
::
make_shared
<
TraceContext
>
(
trace_info
);
if
(
trace_info
->
debug_info
()
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"
t
race debug info is null"
;
MS_LOG
(
EXCEPTION
)
<<
"
T
race debug info is null"
;
}
TraceManager
::
trace_context_stack_
.
push
(
context
);
}
...
...
@@ -205,7 +205,7 @@ void TraceManager::DebugTrace(const DebugInfoPtr& debug_info, const TraceInfoPtr
auto
cloned_info
=
trace_info
->
clone
();
cloned_info
->
set_debug_info
(
debug_info
);
if
(
cloned_info
->
debug_info
()
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"
t
race debug info is null with cloned trace"
;
MS_LOG
(
EXCEPTION
)
<<
"
T
race debug info is null with cloned trace"
;
}
TraceContextPtr
context
=
std
::
make_shared
<
TraceContext
>
(
cloned_info
);
TraceManager
::
trace_context_stack_
.
push
(
context
);
...
...
mindspore/ccsrc/debug/trace.cc
浏览文件 @
30c242d7
...
...
@@ -89,7 +89,7 @@ std::string GetDebugInfo(const DebugInfoPtr& info, SourceLineTip tip) {
return
""
;
}
// a trace info identif
y
s a node transform, so we can trace the node transform through
// a trace info identif
ie
s a node transform, so we can trace the node transform through
// a link of trace info and debug info
std
::
string
GetInfoWithAction
(
const
std
::
vector
<
DebugInfoPtr
>&
info_vec
,
SourceLineTip
tip
)
{
if
(
info_vec
.
size
()
<
1
)
{
...
...
@@ -173,7 +173,7 @@ void DumpInferStack(std::ostringstream& oss) {
}
auto
graph_context
=
graph_infer
->
graph_context
();
if
(
graph_context
==
nullptr
)
{
MS_LOG
(
INFO
)
<<
"
n
ull context continue"
;
MS_LOG
(
INFO
)
<<
"
N
ull context continue"
;
continue
;
}
auto
graph
=
graph_context
->
func_graph
();
...
...
@@ -264,7 +264,7 @@ void AnalyzedFuncGraphExporter::ExportFuncGraph(const std::string& filename,
param_index
=
1
;
auto
tagged_func_graphs
=
CalcTaggedFuncGraphs
();
// first output grap
n
on the analysis stack
// first output grap
h
on the analysis stack
for
(
const
auto
&
node_cfg
:
node_cfgs
)
{
auto
fg
=
node_cfg
->
context
()
->
func_graph
();
// the graph is already output, skip it
...
...
@@ -291,7 +291,7 @@ void AnalyzedFuncGraphExporter::ExportFuncGraph(const std::string& filename,
ofs
<<
"
\n\n
"
;
(
void
)
func_graph_set
.
erase
(
fg
);
}
ofs
<<
"# num of total funcgraphs: "
<<
exported
.
size
();
ofs
<<
"# num of total func
tion
graphs: "
<<
exported
.
size
();
ofs
.
close
();
}
...
...
@@ -332,7 +332,7 @@ void GetInferStackInfo(std::ostringstream& oss) {
MS_LOG
(
INFO
)
<<
"Get graph analysis information *end*"
;
}
// trace the graph evaluator sta
t
ck
// trace the graph evaluator stack
static
std
::
stack
<
std
::
pair
<
abstract
::
EvaluatorPtr
,
abstract
::
AnfNodeConfigPtr
>>
graph_infer_stack
;
// trace the cnode infer debug info
static
std
::
vector
<
abstract
::
AnfNodeConfigPtr
>
cnode_debug_stack
{};
...
...
mindspore/ccsrc/debug/trace_info.cc
浏览文件 @
30c242d7
...
...
@@ -36,6 +36,6 @@ std::string TraceInfo::GetActionBetweenNode(const DebugInfoPtr& info) {
}
else
if
(
debug_info
()
->
trace_info
()
!=
nullptr
)
{
return
act_name
+
debug_info
()
->
trace_info
()
->
GetActionBetweenNode
(
info
);
}
return
"
n
ot in the traced info"
;
return
"
N
ot in the traced info"
;
}
}
// namespace mindspore
mindspore/ccsrc/ir/anf.h
浏览文件 @
30c242d7
...
...
@@ -83,7 +83,7 @@ class AnfVisitor;
// Methods:
// func_graph: return FuncGraph that this AnfNode belongs to.
// scope: return the scope namespace of this AnfNode. Set it using set_scope.
// abstract: return the cached inferred abstract value. It c
a
ntains type, shape
// abstract: return the cached inferred abstract value. It c
o
ntains type, shape
// value. Set New cache using set_abstract.
// intermediate_abstract: return the cached inferring abstract value.
// Type/Shape: return the related info of this AnfNode. When this AnfNode is an
...
...
@@ -284,7 +284,7 @@ class Parameter : public ANode {
};
using
ParameterPtr
=
std
::
shared_ptr
<
Parameter
>
;
// Value is used to represent the atomic expression metioned in BNF.
// Value is used to represent the atomic expression me
n
tioned in BNF.
// It mainly be stored in ValueNode. Value and ValueNode is related definition.
class
Value
:
public
Base
{
public:
...
...
@@ -313,7 +313,7 @@ using ValuePtr = std::shared_ptr<Value>;
using
ValuePtrList
=
std
::
vector
<
ValuePtr
>
;
// ValueNode is used to hold value. Unlike CNode and Parameter, ValueNode
// do not belong to any particular function graph.
// do
es
not belong to any particular function graph.
class
ValueNode
:
public
ANode
{
public:
explicit
ValueNode
(
const
ValuePtr
&
value
)
:
value_
(
value
)
{}
...
...
mindspore/ccsrc/ir/dtype/number.cc
浏览文件 @
30c242d7
...
...
@@ -34,19 +34,19 @@ bool Number::operator==(const Type& other) const {
Int
::
Int
(
const
int
nbits
)
:
Number
(
IntBitsToTypeId
(
nbits
),
nbits
,
false
)
{
if
(
nbits
!=
8
&&
nbits
!=
16
&&
nbits
!=
32
&&
nbits
!=
64
)
{
MS_LOG
(
EXCEPTION
)
<<
"
w
rong number of bits."
;
MS_LOG
(
EXCEPTION
)
<<
"
W
rong number of bits."
;
}
}
UInt
::
UInt
(
const
int
nbits
)
:
Number
(
UIntBitsToTypeId
(
nbits
),
nbits
,
false
)
{
if
(
nbits
!=
8
&&
nbits
!=
16
&&
nbits
!=
32
&&
nbits
!=
64
)
{
MS_LOG
(
EXCEPTION
)
<<
"
w
rong number of bits."
;
MS_LOG
(
EXCEPTION
)
<<
"
W
rong number of bits."
;
}
}
Float
::
Float
(
const
int
nbits
)
:
Number
(
FloatBitsToTypeId
(
nbits
),
nbits
,
false
)
{
if
(
nbits
!=
16
&&
nbits
!=
32
&&
nbits
!=
64
)
{
MS_LOG
(
EXCEPTION
)
<<
"
w
rong number of bits."
;
MS_LOG
(
EXCEPTION
)
<<
"
W
rong number of bits."
;
}
}
...
...
mindspore/ccsrc/ir/dtype/type.cc
浏览文件 @
30c242d7
...
...
@@ -37,7 +37,7 @@ TypeId IntBitsToTypeId(const int nbits) {
case
64
:
return
kNumberTypeInt64
;
default:
MS_LOG
(
EXCEPTION
)
<<
"
w
rong number of bits."
;
MS_LOG
(
EXCEPTION
)
<<
"
W
rong number of bits."
;
}
}
...
...
@@ -52,7 +52,7 @@ TypeId UIntBitsToTypeId(const int nbits) {
case
64
:
return
kNumberTypeUInt64
;
default:
MS_LOG
(
EXCEPTION
)
<<
"
w
rong number of bits."
;
MS_LOG
(
EXCEPTION
)
<<
"
W
rong number of bits."
;
}
}
...
...
@@ -65,7 +65,7 @@ TypeId FloatBitsToTypeId(const int nbits) {
case
64
:
return
kNumberTypeFloat64
;
default:
MS_LOG
(
EXCEPTION
)
<<
"
w
rong number of bits."
;
MS_LOG
(
EXCEPTION
)
<<
"
W
rong number of bits."
;
}
}
...
...
mindspore/ccsrc/ir/func_graph.h
浏览文件 @
30c242d7
...
...
@@ -174,7 +174,7 @@ class FuncGraph : public FuncGraphBase {
GraphDebugInfoPtr
debug_info
();
void
set_debug_info
(
const
GraphDebugInfoPtr
&
info
)
{
if
(
info
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"
g
raph set null debug info"
;
MS_LOG
(
EXCEPTION
)
<<
"
G
raph set null debug info"
;
}
this
->
debug_info_
=
info
;
}
...
...
mindspore/ccsrc/ir/manager.cc
浏览文件 @
30c242d7
...
...
@@ -817,7 +817,7 @@ void FuncGraphChildDirect::OnMoveAllCNode(FuncGraphPtr src, FuncGraphPtr dst) {
void
FuncGraphParentsDirectCollector
::
OnModEdge
(
AnfNodePtr
node
,
int
,
AnfNodePtr
inp
,
EdgeProcessDirection
direction
)
{
MS_EXCEPTION_IF_NULL
(
node
);
FuncGraphPtr
fg1
=
node
->
func_graph
();
// possible chi
r
ld parent
// possible child parent
if
(
IsValueNode
<
FuncGraph
>
(
inp
))
{
FuncGraphPtr
fg2
=
GetValueNode
<
FuncGraphPtr
>
(
inp
);
if
(
Mod
(
fg1
,
ParentProxy
(
fg2
),
direction
))
{
...
...
@@ -1181,7 +1181,7 @@ bool FuncGraphJTotalComputer::SeekJ(const FuncGraphPtr& fg, const FuncGraphSetPt
}
path
->
add
(
fg
);
// check
g
if func graphs used contains J(func_graph);
// check if func graphs used contains J(func_graph);
auto
&
used
=
this
->
manager_
->
func_graphs_used
();
for
(
auto
&
item
:
used
[
fg
])
{
auto
used_g
=
item
.
first
;
...
...
mindspore/ccsrc/ir/manager.h
浏览文件 @
30c242d7
...
...
@@ -650,7 +650,7 @@ class FuncGraphTransaction {
explicit
FuncGraphTransaction
(
FuncGraphManager
*
manager
)
:
manager_
(
manager
),
changes_
()
{
MS_EXCEPTION_IF_NULL
(
manager_
);
if
(
!
manager_
->
IsManaged
())
{
MS_LOG
(
DEBUG
)
<<
"
t
he manager is not managed yet"
;
MS_LOG
(
DEBUG
)
<<
"
T
he manager is not managed yet"
;
}
}
...
...
mindspore/ccsrc/ir/meta_tensor.h
浏览文件 @
30c242d7
...
...
@@ -148,7 +148,7 @@ class MetaTensor : public Value {
//
// The constructed MetaTensor object has the same type and shape with meta_tensor.
//
// param meta_tensor An exis
i
ting MetaTensor object.
// param meta_tensor An existing MetaTensor object.
virtual
MetaTensor
&
operator
=
(
const
MetaTensor
&
meta_tensor
);
// brief Compares two MetaTensor objects.
...
...
@@ -166,7 +166,7 @@ class MetaTensor : public Value {
TypeId
data_type
()
const
{
return
data_type_
;
}
std
::
string
ToString
()
const
override
;
std
::
string
DumpText
()
const
override
;
// brie
d
Sets the data type of a tensor in its MetaTensor.
// brie
f
Sets the data type of a tensor in its MetaTensor.
//
// param data_type The data type of the tensor to be set.
virtual
TypeId
set_data_type
(
const
TypeId
data_type
)
{
...
...
@@ -314,7 +314,7 @@ class Tensor : public MetaTensor {
//
// The constructed Tensor object has the same type and shape with tensor.
//
// param tensor An exis
i
ting Tensor object.
// param tensor An existing Tensor object.
Tensor
&
operator
=
(
const
Tensor
&
tensor
);
// brief Compares two Tensor objects.
...
...
@@ -383,7 +383,7 @@ class Tensor : public MetaTensor {
// return The [TypeId] of the tensor data.
TypeId
GetDataType
(
const
py
::
buffer_info
&
buf
)
const
;
// brie
d
Sets the data type of a tensor.
// brie
f
Sets the data type of a tensor.
//
// param data_type The data type of the tensor to be set.
//
...
...
mindspore/ccsrc/ir/visitor.cc
浏览文件 @
30c242d7
...
...
@@ -43,14 +43,13 @@ VisitFuncType AnfVisitor::Match(const PrimitivePtr &prim, const std::vector<opt:
}
auto
&
inputs
=
node
->
cast
<
CNodePtr
>
()
->
inputs
();
// infact, funcs_size == inps_size - 1
auto
funcs_size
=
funcs
.
size
();
auto
inps_size
=
inputs
.
size
();
auto
inp
ut
s_size
=
inputs
.
size
();
// check the inputs are matched with the predicate functions
if
(
funcs_size
>
0
)
{
// use the predicate function list to check the number of inputs
if
(
funcs_size
!=
(
inps_size
-
1
))
{
if
(
funcs_size
!=
(
inp
ut
s_size
-
1
))
{
return
;
}
...
...
@@ -63,7 +62,7 @@ VisitFuncType AnfVisitor::Match(const PrimitivePtr &prim, const std::vector<opt:
}
// visit the inputs
for
(
size_t
i
=
1
;
i
<
inps_size
;
i
++
)
{
for
(
size_t
i
=
1
;
i
<
inp
ut
s_size
;
i
++
)
{
this
->
Visit
(
inputs
[
i
]);
}
};
...
...
mindspore/ccsrc/pynative/pynative_execute.cc
浏览文件 @
30c242d7
...
...
@@ -36,7 +36,7 @@
#endif
const
char
SINGLE_OP_GRAPH
[]
=
"single_op_graph"
;
// primitive unable to infer value for constant input in
pyn
ative mode
// primitive unable to infer value for constant input in
PyN
ative mode
const
std
::
unordered_set
<
std
::
string
>
vm_operators
=
{
"partial"
,
"depend"
};
namespace
mindspore
{
...
...
@@ -45,7 +45,7 @@ inline ValuePtr PyAttrValue(const py::object& obj) {
ValuePtr
converted_ret
=
nullptr
;
bool
converted
=
parse
::
ConvertData
(
obj
,
&
converted_ret
);
if
(
!
converted
)
{
MS_LOG
(
EXCEPTION
)
<<
"
a
ttribute convert error with type:"
<<
std
::
string
(
py
::
str
(
obj
));
MS_LOG
(
EXCEPTION
)
<<
"
A
ttribute convert error with type:"
<<
std
::
string
(
py
::
str
(
obj
));
}
return
converted_ret
;
}
...
...
@@ -67,7 +67,7 @@ void PynativeInfer(const PrimitivePyPtr& prim, const py::tuple& py_args, OpExecI
OpExecInfoPtr
GenerateOpExecInfo
(
const
py
::
args
&
args
)
{
if
(
args
.
size
()
!=
PY_ARGS_NUM
)
{
MS_LOG
(
ERROR
)
<<
"
f
our args are needed by RunOp"
;
MS_LOG
(
ERROR
)
<<
"
F
our args are needed by RunOp"
;
return
nullptr
;
}
auto
op_exec_info
=
std
::
make_shared
<
OpExecInfo
>
();
...
...
@@ -145,13 +145,13 @@ py::object RunOpInVM(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* stat
py
::
object
RunOpInMs
(
const
OpExecInfoPtr
&
op_exec_info
,
PynativeStatusCode
*
status
)
{
MS_EXCEPTION_IF_NULL
(
op_exec_info
);
MS_LOG
(
INFO
)
<<
"
s
tart run op["
<<
op_exec_info
->
op_name
<<
"] with backend policy ms"
;
MS_LOG
(
INFO
)
<<
"
S
tart run op["
<<
op_exec_info
->
op_name
<<
"] with backend policy ms"
;
auto
ms_context
=
MsContext
::
GetInstance
();
MS_EXCEPTION_IF_NULL
(
ms_context
);
ms_context
->
set_enable_pynative_infer
(
true
);
std
::
string
device_target
=
ms_context
->
device_target
();
if
(
device_target
!=
kAscendDevice
&&
device_target
!=
kGPUDevice
)
{
MS_EXCEPTION
(
ArgumentError
)
<<
"
d
evice target ["
<<
device_target
<<
"] is not supported in Pynative mode"
;
MS_EXCEPTION
(
ArgumentError
)
<<
"
D
evice target ["
<<
device_target
<<
"] is not supported in Pynative mode"
;
}
std
::
shared_ptr
<
session
::
SessionBasic
>
session
=
session
::
SessionFactory
::
Get
().
Create
(
device_target
);
MS_EXCEPTION_IF_NULL
(
session
);
...
...
@@ -197,7 +197,7 @@ py::object RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecIn
break
;
}
default:
MS_LOG
(
ERROR
)
<<
"No backend configed for run op"
;
MS_LOG
(
ERROR
)
<<
"No backend config
ur
ed for run op"
;
}
return
result
;
}
...
...
@@ -240,7 +240,7 @@ py::tuple RunOp(const py::args& args) {
}
result
=
RunOpWithBackendPolicy
(
backend_policy
,
op_exec_info
,
&
status
);
if
(
status
!=
PYNATIVE_SUCCESS
)
{
MS_LOG
(
ERROR
)
<<
"Fail to run "
<<
op_exec_info
->
op_name
;
MS_LOG
(
ERROR
)
<<
"Fail
ed
to run "
<<
op_exec_info
->
op_name
;
return
err_ret
;
}
...
...
mindspore/ccsrc/pynative/pynative_execute_ge.cc
浏览文件 @
30c242d7
...
...
@@ -47,7 +47,7 @@ inline ValuePtr PyAttrValue(const py::object& obj) {
ValuePtr
converted_ret
=
nullptr
;
bool
converted
=
parse
::
ConvertData
(
obj
,
&
converted_ret
);
if
(
!
converted
)
{
MS_LOG
(
EXCEPTION
)
<<
"
a
ttribute convert error with type:"
<<
std
::
string
(
py
::
str
(
obj
));
MS_LOG
(
EXCEPTION
)
<<
"
A
ttribute convert error with type:"
<<
std
::
string
(
py
::
str
(
obj
));
}
return
converted_ret
;
}
...
...
@@ -67,7 +67,7 @@ MeTensorPtr ConvertPyObjToTensor(const py::object& obj) {
}
else
if
(
py
::
isinstance
<
py
::
array
>
(
obj
))
{
me_tensor_ptr
=
std
::
make_shared
<
MeTensor
>
(
py
::
cast
<
py
::
array
>
(
obj
),
nullptr
);
}
else
{
MS_LOG
(
EXCEPTION
)
<<
"
r
un op inputs type is invalid!"
;
MS_LOG
(
EXCEPTION
)
<<
"
R
un op inputs type is invalid!"
;
}
return
me_tensor_ptr
;
}
...
...
@@ -97,7 +97,7 @@ bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vec
auto
const_op_desc
=
transform
::
TransformUtil
::
GetGeTensorDesc
(
me_tensor_ptr
->
shape_c
(),
me_tensor_ptr
->
data_type
(),
kOpFormat_NCHW
);
if
(
const_op_desc
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Create variable "
<<
op_name
<<
" ou
pt
ut descriptor failed!"
;
MS_LOG
(
ERROR
)
<<
"Create variable "
<<
op_name
<<
" ou
tp
ut descriptor failed!"
;
return
false
;
}
auto
pointer_cast_const_op
=
std
::
static_pointer_cast
<
transform
::
Constant
>
(
const_op
);
...
...
@@ -108,7 +108,7 @@ bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vec
continue
;
}
if
(
adapter
->
setInput
(
op
,
op_input_idx
++
,
const_op
))
{
MS_LOG
(
ERROR
)
<<
"
fail
to set params, index is "
<<
op_input_idx
;
MS_LOG
(
ERROR
)
<<
"
Failed
to set params, index is "
<<
op_input_idx
;
return
false
;
}
graph_input_nodes
->
push_back
(
*
const_op
);
...
...
@@ -178,7 +178,7 @@ void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector<GeTensorPtr>* con
MeTensorPtr
me_tensor_ptr
=
ConvertPyObjToTensor
(
op_inputs
[
i
]);
auto
ge_tensor_ptr
=
transform
::
TransformUtil
::
ConvertTensor
(
me_tensor_ptr
,
kOpFormat_NCHW
);
if
(
ge_tensor_ptr
==
nullptr
)
{
MS_LOG
(
EXCEPTION
)
<<
"
c
onvert inputs to GE tensor failed in op "
<<
op_exec_info
->
op_name
<<
"."
;
MS_LOG
(
EXCEPTION
)
<<
"
C
onvert inputs to GE tensor failed in op "
<<
op_exec_info
->
op_name
<<
"."
;
}
// set inputs for operator to build single node graph
inputs
->
push_back
(
ge_tensor_ptr
);
...
...
@@ -192,7 +192,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st
for
(
auto
&
item
:
op_attrs
)
{
if
(
!
py
::
isinstance
<
py
::
str
>
(
item
.
first
))
{
MS_LOG
(
ERROR
)
<<
"
t
ype error in py dict convert"
;
MS_LOG
(
ERROR
)
<<
"
T
ype error in py dict convert"
;
return
PYNATIVE_OP_ATTRS_ERR
;
}
std
::
string
name
=
py
::
cast
<
std
::
string
>
(
item
.
first
);
...
...
@@ -203,7 +203,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st
// build graph
GeGraphPtr
graph
=
std
::
make_shared
<
GeGraph
>
(
op_exec_info
->
op_name
);
if
(
BuildSingleOpGraph
(
op_exec_info
,
inputs
,
attrs
,
graph
)
==
false
)
{
MS_LOG
(
ERROR
)
<<
"Fail to BuildSingleOpGraph"
;
MS_LOG
(
ERROR
)
<<
"Fail
ed
to BuildSingleOpGraph"
;
return
PYNATIVE_GRAPH_GE_BUILD_ERR
;
}
...
...
@@ -211,7 +211,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st
transform
::
Status
ret
=
transform
::
DfGraphManager
::
GetInstance
().
AddGraph
(
SINGLE_OP_GRAPH
,
std
::
shared_ptr
<
transform
::
DfGraph
>
(
graph
));
if
(
ret
!=
transform
::
SUCCESS
)
{
MS_LOG
(
ERROR
)
<<
"Fail to AddGraph into graph manager"
;
MS_LOG
(
ERROR
)
<<
"Fail
ed
to AddGraph into graph manager"
;
return
PYNATIVE_GRAPH_MANAGER_ERR
;
}
...
...
@@ -289,7 +289,7 @@ py::object RunOpInGE(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* stat
run_ret
=
graph_runner
->
RunGraph
(
run_options
,
ge_inputs
,
&
ge_outputs
);
}
if
(
run_ret
!=
transform
::
Status
::
SUCCESS
)
{
MS_LOG
(
ERROR
)
<<
"GraphRunner
Fails to Run G
raph"
;
MS_LOG
(
ERROR
)
<<
"GraphRunner
fails to run g
raph"
;
*
status
=
PYNATIVE_GRAPH_GE_RUN_ERR
;
return
std
::
move
(
err_ret
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录