Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
4df1edf5
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4df1edf5
编写于
7月 09, 2020
作者:
B
buxue
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Improving implicit type conversion
上级
7b65c548
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
151 addition
and
42 deletion
+151
-42
mindspore/ccsrc/ir/dtype/type_id.h
mindspore/ccsrc/ir/dtype/type_id.h
+3
-3
mindspore/ccsrc/operator/composite/do_signature.cc
mindspore/ccsrc/operator/composite/do_signature.cc
+10
-5
mindspore/ccsrc/operator/composite/do_signature.h
mindspore/ccsrc/operator/composite/do_signature.h
+3
-0
mindspore/ccsrc/pynative/pynative_execute.cc
mindspore/ccsrc/pynative/pynative_execute.cc
+11
-33
tests/ut/python/pynative_mode/test_implicit_conversion.py
tests/ut/python/pynative_mode/test_implicit_conversion.py
+124
-1
未找到文件。
mindspore/ccsrc/ir/dtype/type_id.h
浏览文件 @
4df1edf5
...
...
@@ -86,8 +86,8 @@ enum TypeId : int {
// TypeId name map
//
const
std
::
unordered_map
<
TypeId
,
std
::
string
>
type_name_map
=
{
{
kNumberTypeBool
,
"
Bool"
},
{
kNumberTypeInt8
,
"Int8"
},
{
kNumberTypeUInt8
,
"UI
nt8"
},
{
kNumberTypeInt16
,
"
Int16"
},
{
kNumberTypeInt32
,
"Int32"
},
{
kNumberTypeInt64
,
"I
nt64"
},
{
kNumberTypeFloat16
,
"
Float16"
},
{
kNumberTypeFloat32
,
"Float32"
},
{
kNumberTypeFloat64
,
"F
loat64"
}};
{
kNumberTypeBool
,
"
bool_"
},
{
kNumberTypeInt8
,
"int8"
},
{
kNumberTypeUInt8
,
"ui
nt8"
},
{
kNumberTypeInt16
,
"
int16"
},
{
kNumberTypeInt32
,
"int32"
},
{
kNumberTypeInt64
,
"i
nt64"
},
{
kNumberTypeFloat16
,
"
float16"
},
{
kNumberTypeFloat32
,
"float32"
},
{
kNumberTypeFloat64
,
"f
loat64"
}};
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_IR_DTYPE_TYPE_ID_H_
mindspore/ccsrc/operator/composite/do_signature.cc
浏览文件 @
4df1edf5
...
...
@@ -223,11 +223,7 @@ void DoAutoCast(const std::string &func_name, const std::vector<Signature> &sign
if
(
it_name_map
==
type_name_map
.
end
())
{
continue
;
}
MS_LOG
(
EXCEPTION
)
<<
"In op '"
<<
func_name
<<
"',
\n
"
<<
"the type of writable argument is '"
<<
it_map
->
second
<<
"', "
<<
"but the largest type in the same SignatureEumDtype is '"
<<
it_name_map
->
second
<<
"'. The writable arg type is not equal to the largest type, "
<<
"so can not cast automatically."
;
RaiseExceptionForConvertRefDtype
(
func_name
,
it_map
->
second
,
it_name_map
->
second
);
}
continue
;
}
...
...
@@ -311,5 +307,14 @@ FuncGraphPtr DoSignatureMetaFuncGraph::GenerateFuncGraph(const AbstractBasePtrLi
func_graph
->
set_flag
(
FUNC_GRAPH_FLAG_CORE
,
true
);
return
func_graph
;
}
void
RaiseExceptionForConvertRefDtype
(
const
std
::
string
&
func_name
,
const
std
::
string
&
ref_type
,
const
std
::
string
&
target_type
)
{
MS_LOG
(
EXCEPTION
)
<<
"In op '"
<<
func_name
<<
"',
\n
"
<<
"the type of writable argument is '"
<<
ref_type
<<
"', "
<<
"but the largest type in the same SignatureEumDtype is '"
<<
target_type
<<
"'. The writable arg type is not equal to the largest type, "
<<
"so can not cast automatically."
;
}
}
// namespace prim
}
// namespace mindspore
mindspore/ccsrc/operator/composite/do_signature.h
浏览文件 @
4df1edf5
...
...
@@ -58,6 +58,9 @@ using RWSignaturePtr = std::shared_ptr<DoSignatureMetaFuncGraph>;
extern
const
std
::
map
<
TypeId
,
size_t
>
type_map
;
void
RaiseExceptionForConvertRefDtype
(
const
std
::
string
&
func_name
,
const
std
::
string
&
ref_type
,
const
std
::
string
&
target_type
);
AnfNodePtr
GenerateCNode
(
const
FuncGraphPtr
&
func_graph
,
const
std
::
string
&
func_name
,
const
ValuePtr
&
function
,
const
AbstractBasePtrList
&
args_spec_list
,
const
AnfNodePtrList
&
old_node_inputs
);
}
// namespace prim
...
...
mindspore/ccsrc/pynative/pynative_execute.cc
浏览文件 @
4df1edf5
...
...
@@ -184,6 +184,9 @@ std::map<SignatureEnumDType, TypeId> GetDstType(const py::tuple &py_args,
auto
arg
=
py
::
cast
<
tensor
::
TensorPtr
>
(
py_args
[
index
]);
TypeId
arg_type_id
=
arg
->
data_type
();
auto
type_priority
=
prim
::
type_map
.
find
(
arg_type_id
);
if
(
type_priority
==
prim
::
type_map
.
end
())
{
continue
;
}
if
(
type_priority
->
second
>
priority
)
{
max_type
=
type_priority
->
first
;
priority
=
type_priority
->
second
;
...
...
@@ -204,36 +207,14 @@ std::map<SignatureEnumDType, TypeId> GetDstType(const py::tuple &py_args,
}
std
::
string
TypeIdToMsTypeStr
(
const
TypeId
&
type_id
)
{
switch
(
type_id
)
{
case
kNumberTypeFloat16
:
return
"float16"
;
case
kNumberTypeFloat32
:
return
"float32"
;
case
kNumberTypeFloat64
:
return
"float64"
;
case
kNumberTypeInt8
:
return
"int8"
;
case
kNumberTypeInt16
:
return
"int16"
;
case
kNumberTypeInt32
:
return
"int32"
;
case
kNumberTypeInt64
:
return
"int64"
;
case
kNumberTypeUInt8
:
return
"uint8"
;
case
kNumberTypeUInt16
:
return
"uint16"
;
case
kNumberTypeUInt32
:
return
"uint32"
;
case
kNumberTypeUInt64
:
return
"uint64"
;
case
kNumberTypeBool
:
return
"bool_"
;
default:
MS_LOG
(
EXCEPTION
)
<<
"For implicit type conversion, not support the type: "
<<
TypeIdToType
(
type_id
);
auto
type_name
=
type_name_map
.
find
(
type_id
);
if
(
type_name
==
type_name_map
.
end
())
{
MS_LOG
(
EXCEPTION
)
<<
"For implicit type conversion, not support convert to the type: "
<<
TypeIdToType
(
type_id
);
}
return
type_name
->
second
;
}
py
::
object
DoAutoCast
(
const
py
::
object
arg
,
const
TypeId
&
type_id
)
{
py
::
object
DoAutoCast
(
const
py
::
object
&
arg
,
const
TypeId
&
type_id
)
{
py
::
tuple
args
(
3
);
std
::
string
module_name
=
"mindspore.ops.functional"
;
std
::
string
op_name
=
"cast"
;
...
...
@@ -283,11 +264,8 @@ py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tu
continue
;
}
if
(
signature
[
i
].
rw
==
SignatureEnumRW
::
kRWWrite
)
{
MS_LOG
(
EXCEPTION
)
<<
"In op '"
<<
prim
->
name
()
<<
"',
\n
"
<<
"the type of writable argument is '"
<<
TypeIdToMsTypeStr
(
arg
->
data_type
())
<<
"', "
<<
"but the largest type in the same SignatureEumDtype is '"
<<
TypeIdToMsTypeStr
(
it
->
second
)
<<
"'. The writable arg type is not equal to the largest type, "
<<
"so can not cast automatically."
;
prim
::
RaiseExceptionForConvertRefDtype
(
prim
->
name
(),
TypeIdToMsTypeStr
(
arg
->
data_type
()),
TypeIdToMsTypeStr
(
it
->
second
));
}
}
py
::
object
cast_output
=
DoAutoCast
(
py_args
[
i
],
it
->
second
);
...
...
tests/ut/python/pynative_mode/test_implicit_conversion.py
浏览文件 @
4df1edf5
...
...
@@ -15,7 +15,8 @@
""" test implicit conversion """
import
numpy
as
np
from
mindspore
import
Tensor
from
mindspore
import
Tensor
,
nn
from
mindspore.ops
import
composite
as
C
def
test_float_tensor_and_int_add
():
...
...
@@ -23,6 +24,7 @@ def test_float_tensor_and_int_add():
y
=
2
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
2.1
,
2.2
,
2.3
],
[
2.4
,
2.5
,
2.6
]],
dtype
=
np
.
float32
))
assert
ret_actual
.
dtype
==
ret_expect
.
dtype
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
...
...
@@ -31,6 +33,7 @@ def test_bool_tensor_and_float_add():
y
=
3.3
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
4.3
,
3.3
],
[
3.3
,
4.3
]],
dtype
=
np
.
float32
))
assert
ret_actual
.
dtype
==
ret_expect
.
dtype
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
...
...
@@ -39,6 +42,7 @@ def test_bool_tensor_and_int_add():
y
=
3
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
4
,
3
],
[
3
,
4
]],
dtype
=
np
.
int32
))
assert
ret_actual
.
dtype
==
ret_expect
.
dtype
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
...
...
@@ -47,13 +51,16 @@ def test_bool_and_int_tensor_add():
y
=
Tensor
(
np
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
]],
dtype
=
np
.
int32
))
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
2
,
3
,
4
],
[
5
,
6
,
7
]],
dtype
=
np
.
int32
))
assert
ret_actual
.
dtype
==
ret_expect
.
dtype
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
def
test_float_tensor_and_int_tensor_add
():
x
=
Tensor
(
np
.
array
([[
0.1
,
0.2
,
0.3
],
[
0.4
,
0.5
,
0.6
]],
dtype
=
np
.
float32
))
y
=
Tensor
(
np
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
]],
dtype
=
np
.
int32
))
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
1.1
,
2.2
,
3.3
],
[
4.4
,
5.5
,
6.6
]],
dtype
=
np
.
float32
))
assert
ret_actual
.
dtype
==
ret_expect
.
dtype
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
...
...
@@ -62,6 +69,7 @@ def test_float_tensor_and_float_tensor_add():
y
=
Tensor
(
np
.
array
([[
1.0
,
2.0
,
3.0
],
[
4.0
,
5.0
,
6.0
]],
dtype
=
np
.
float16
))
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
1.1
,
2.2
,
3.3
],
[
4.4
,
5.5
,
6.6
]],
dtype
=
np
.
float32
))
assert
ret_actual
.
dtype
==
ret_expect
.
dtype
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
...
...
@@ -70,6 +78,7 @@ def test_int_tensor_and_int_tensor_add():
y
=
Tensor
(
np
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
]],
dtype
=
np
.
int32
))
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
2
,
4
,
6
],
[
8
,
10
,
12
]],
dtype
=
np
.
int32
))
assert
ret_actual
.
dtype
==
ret_expect
.
dtype
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
...
...
@@ -79,3 +88,117 @@ def test_float_tensor_and_bool_tensors_add():
ret_actual
=
x
+
y
ret_expect
=
Tensor
(
np
.
array
([[
1.1
,
1.2
,
1.3
],
[
0.4
,
0.5
,
0.6
]],
dtype
=
np
.
float32
))
assert
(
ret_actual
.
asnumpy
()
==
ret_expect
.
asnumpy
()).
all
()
def
test_float_tensor_and_bool_tensors_add_grad
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
def
construct
(
self
,
x
,
y
):
return
x
+
y
class
GradNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
super
(
GradNet
,
self
).
__init__
()
self
.
net
=
net
def
construct
(
self
,
x
,
y
,
sens
):
return
C
.
grad_all_with_sens
(
self
.
net
)(
x
,
y
,
sens
)
x
=
Tensor
(
np
.
array
([[
0.1
,
0.2
,
0.3
],
[
0.4
,
0.5
,
0.6
]],
dtype
=
np
.
float32
))
y
=
Tensor
(
np
.
array
([[
True
,
True
,
True
],
[
False
,
False
,
False
]],
dtype
=
np
.
bool_
))
sens
=
Tensor
(
np
.
array
([[
1.0
,
2.0
,
0.0
],
[
0.0
,
3.0
,
4.0
]],
dtype
=
np
.
float32
))
net
=
Net
()
grad_net
=
GradNet
(
net
)
ret
=
grad_net
(
x
,
y
,
sens
)
assert
ret
[
0
].
dtype
==
x
.
dtype
assert
ret
[
1
].
dtype
==
y
.
dtype
assert
(
ret
[
0
].
asnumpy
()
==
sens
.
asnumpy
()).
all
()
assert
(
ret
[
1
].
asnumpy
()
==
sens
.
asnumpy
().
astype
(
np
.
bool_
)).
all
()
def
test_float_tensor_and_int_tensors_sub_grad
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
def
construct
(
self
,
x
,
y
):
return
x
-
y
class
GradNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
super
(
GradNet
,
self
).
__init__
()
self
.
net
=
net
def
construct
(
self
,
x
,
y
,
sens
):
return
C
.
grad_all_with_sens
(
self
.
net
)(
x
,
y
,
sens
)
x
=
Tensor
(
np
.
array
([[
0.1
,
0.2
,
0.3
],
[
0.4
,
0.5
,
0.6
]],
dtype
=
np
.
float32
))
y
=
Tensor
(
np
.
array
([[
1
,
2
,
3
],
[
4
,
5
,
6
]],
dtype
=
np
.
int32
))
sens
=
Tensor
(
np
.
array
([[
1.0
,
2.0
,
0.0
],
[
0.0
,
3.0
,
4.0
]],
dtype
=
np
.
float32
))
net
=
Net
()
grad_net
=
GradNet
(
net
)
ret
=
grad_net
(
x
,
y
,
sens
)
print
(
ret
)
assert
ret
[
0
].
dtype
==
x
.
dtype
assert
ret
[
1
].
dtype
==
y
.
dtype
assert
(
ret
[
0
].
asnumpy
()
==
sens
.
asnumpy
()).
all
()
assert
(
ret
[
1
].
asnumpy
()
==
sens
.
asnumpy
()
*
-
1
).
all
()
def
test_float16_tensor_and_float32_tensors_sub_grad
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
def
construct
(
self
,
x
,
y
):
return
x
-
y
class
GradNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
super
(
GradNet
,
self
).
__init__
()
self
.
net
=
net
def
construct
(
self
,
x
,
y
,
sens
):
return
C
.
grad_all_with_sens
(
self
.
net
)(
x
,
y
,
sens
)
x
=
Tensor
(
np
.
array
([[
0.1
,
0.2
,
0.3
],
[
0.4
,
0.5
,
0.6
]],
dtype
=
np
.
int32
))
y
=
Tensor
(
np
.
array
([[
1.0
,
2.0
,
3.0
],
[
4.0
,
5.0
,
6.0
]],
dtype
=
np
.
float32
))
sens
=
Tensor
(
np
.
array
([[
1.0
,
2.0
,
0.0
],
[
0.0
,
3.0
,
4.0
]],
dtype
=
np
.
float32
))
net
=
Net
()
grad_net
=
GradNet
(
net
)
ret
=
grad_net
(
x
,
y
,
sens
)
print
(
ret
)
assert
ret
[
0
].
dtype
==
x
.
dtype
assert
ret
[
1
].
dtype
==
y
.
dtype
assert
(
ret
[
0
].
asnumpy
()
==
sens
.
asnumpy
()).
all
()
assert
(
ret
[
1
].
asnumpy
()
==
sens
.
asnumpy
()
*
-
1
).
all
()
def
test_float_tensor_and_int_add_grad
():
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
def
construct
(
self
,
x
):
return
x
+
2
class
GradNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
super
(
GradNet
,
self
).
__init__
()
self
.
net
=
net
def
construct
(
self
,
x
,
sens
):
return
C
.
grad_all_with_sens
(
self
.
net
)(
x
,
sens
)
x
=
Tensor
(
np
.
array
([[
0.1
,
0.2
,
0.3
],
[
0.4
,
0.5
,
0.6
]],
dtype
=
np
.
float32
))
sens
=
Tensor
(
np
.
array
([[
1.0
,
2.0
,
0.0
],
[
0.0
,
3.0
,
4.0
]],
dtype
=
np
.
float32
))
net
=
Net
()
grad_net
=
GradNet
(
net
)
ret
=
grad_net
(
x
,
sens
)
assert
ret
[
0
].
dtype
==
x
.
dtype
assert
(
ret
[
0
].
asnumpy
()
==
sens
.
asnumpy
()).
all
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录