Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
19ce0c37
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
19ce0c37
编写于
5月 28, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 28, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1257 Implicit type conversion
Merge pull request !1257 from candanzg/implicit_type_conversion2
上级
9c858444
2429da19
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
501 addition
and
136 deletion
+501
-136
mindspore/ccsrc/operator/composite/do_signature.cc
mindspore/ccsrc/operator/composite/do_signature.cc
+141
-40
mindspore/common/initializer.py
mindspore/common/initializer.py
+2
-2
mindspore/common/parameter.py
mindspore/common/parameter.py
+5
-0
mindspore/ops/operations/math_ops.py
mindspore/ops/operations/math_ops.py
+4
-4
mindspore/ops/operations/nn_ops.py
mindspore/ops/operations/nn_ops.py
+8
-5
mindspore/ops/operations/other_ops.py
mindspore/ops/operations/other_ops.py
+3
-2
tests/st/ops/ascend/test_autocast.py
tests/st/ops/ascend/test_autocast.py
+244
-0
tests/ut/python/nn/test_cell_wrapper.py
tests/ut/python/nn/test_cell_wrapper.py
+1
-1
tests/ut/python/ops/test_math_ops.py
tests/ut/python/ops/test_math_ops.py
+1
-1
tests/ut/python/ops/test_math_ops_check.py
tests/ut/python/ops/test_math_ops_check.py
+92
-81
未找到文件。
mindspore/ccsrc/operator/composite/do_signature.cc
浏览文件 @
19ce0c37
...
...
@@ -33,6 +33,9 @@ namespace mindspore {
namespace
prim
{
namespace
{
using
PatternListType
=
std
::
initializer_list
<
BaseRef
>
;
const
std
::
map
<
TypeId
,
size_t
>
type_map
=
{{
kNumberTypeBool
,
1
},
{
kNumberTypeInt8
,
2
},
{
kNumberTypeUInt8
,
3
},
{
kNumberTypeInt16
,
4
},
{
kNumberTypeInt32
,
5
},
{
kNumberTypeInt64
,
6
},
{
kNumberTypeFloat16
,
7
},
{
kNumberTypeFloat32
,
8
},
{
kNumberTypeFloat64
,
9
}};
const
std
::
vector
<
Signature
>
&
GetSignature
(
const
ValuePtr
&
function
)
{
static
const
auto
empty
=
std
::
vector
<
Signature
>
();
...
...
@@ -44,6 +47,16 @@ const std::vector<Signature> &GetSignature(const ValuePtr &function) {
return
empty
;
}
const
std
::
string
GetOpName
(
const
ValuePtr
&
function
)
{
std
::
string
name
=
""
;
if
(
function
->
isa
<
Primitive
>
())
{
name
=
function
->
cast
<
PrimitivePyPtr
>
()
->
name
();
}
else
if
(
function
->
isa
<
MetaFuncGraph
>
())
{
name
=
function
->
cast
<
MetaFuncGraphPtr
>
()
->
name
();
}
return
name
;
}
void
ProcessDefault
(
const
std
::
string
&
func_name
,
const
AbstractBasePtrList
&
args_spec_list
,
const
std
::
vector
<
Signature
>
&
signature
,
bool
has_var
,
std
::
vector
<
AnfNodePtr
>
*
op_inputs
)
{
std
::
size_t
sig_size
=
signature
.
size
();
...
...
@@ -62,10 +75,89 @@ void ProcessDefault(const std::string &func_name, const AbstractBasePtrList &arg
}
}
}
bool
CompareTensorScalarType
(
const
TypeId
&
tensor_type
,
const
size_t
&
t_type_number
,
const
TypeId
&
scalar_type
,
const
size_t
&
s_type_number
)
{
if
(
scalar_type
==
kNumberTypeFloat16
||
scalar_type
==
kNumberTypeFloat32
||
scalar_type
==
kNumberTypeFloat64
)
{
if
(
tensor_type
==
kNumberTypeFloat16
||
tensor_type
==
kNumberTypeFloat32
||
tensor_type
==
kNumberTypeFloat64
)
{
return
t_type_number
>=
s_type_number
;
}
return
false
;
}
return
true
;
}
void
setMaxType
(
TypeId
*
max_type_id
,
TypeId
*
max_type
,
size_t
*
max_type_number
,
const
TypeId
type_id
,
const
TypeId
type
,
const
size_t
type_number
)
{
*
max_type_id
=
type_id
;
*
max_type
=
type
;
*
max_type_number
=
type_number
;
}
TypeId
GetMaxTypeId
(
const
abstract
::
AbstractBasePtrList
&
args_spec_list
,
std
::
vector
<
size_t
>
indexs
)
{
TypeId
max_type_id
=
kTypeUnknown
;
TypeId
max_type
=
kTypeUnknown
;
size_t
max_type_number
=
0
;
bool
has_int8
=
false
;
for
(
const
auto
&
index
:
indexs
)
{
TypeId
arg_type_id
=
kTypeUnknown
;
TypeId
arg_type
=
kTypeUnknown
;
AbstractBasePtr
arg_value
=
args_spec_list
[
index
];
if
(
arg_value
->
isa
<
abstract
::
AbstractRef
>
())
{
arg_value
=
arg_value
->
cast
<
abstract
::
AbstractRefPtr
>
()
->
ref
();
}
if
(
arg_value
->
isa
<
abstract
::
AbstractTensor
>
())
{
auto
tensor
=
arg_value
->
cast
<
abstract
::
AbstractTensorPtr
>
();
auto
tensor_type
=
tensor
->
element
()
->
BuildType
();
MS_EXCEPTION_IF_NULL
(
tensor_type
);
arg_type_id
=
tensor_type
->
type_id
();
arg_type
=
kObjectTypeTensorType
;
}
else
if
(
arg_value
->
isa
<
abstract
::
AbstractScalar
>
())
{
auto
scalar
=
arg_value
->
cast
<
abstract
::
AbstractScalarPtr
>
();
auto
scalar_type
=
scalar
->
BuildType
();
MS_EXCEPTION_IF_NULL
(
scalar_type
);
arg_type_id
=
scalar_type
->
type_id
();
arg_type
=
kObjectTypeNumber
;
}
else
{
continue
;
}
auto
it
=
type_map
.
find
(
arg_type_id
);
if
(
it
==
type_map
.
end
())
{
continue
;
}
if
(
arg_type_id
==
kNumberTypeInt8
)
{
has_int8
=
true
;
}
if
(
max_type_id
==
kTypeUnknown
)
{
setMaxType
(
&
max_type_id
,
&
max_type
,
&
max_type_number
,
arg_type_id
,
arg_type
,
it
->
second
);
continue
;
}
if
(
max_type
==
arg_type
)
{
if
(
it
->
second
>
max_type_number
)
{
setMaxType
(
&
max_type_id
,
&
max_type
,
&
max_type_number
,
arg_type_id
,
arg_type
,
it
->
second
);
}
}
else
{
if
(
arg_type
==
kObjectTypeTensorType
)
{
if
(
CompareTensorScalarType
(
arg_type_id
,
it
->
second
,
max_type_id
,
max_type_number
))
{
setMaxType
(
&
max_type_id
,
&
max_type
,
&
max_type_number
,
arg_type_id
,
arg_type
,
it
->
second
);
}
}
else
{
if
(
!
CompareTensorScalarType
(
max_type_id
,
max_type_number
,
arg_type_id
,
it
->
second
))
{
setMaxType
(
&
max_type_id
,
&
max_type
,
&
max_type_number
,
arg_type_id
,
arg_type
,
it
->
second
);
}
}
}
}
if
(
max_type_id
==
kNumberTypeUInt8
&&
has_int8
==
true
)
{
max_type_id
=
kNumberTypeInt16
;
}
return
max_type_id
;
}
// Get the largest type of index in the same SignatureEnumDType of arguments.
std
::
map
<
SignatureEnumDType
,
size_t
>
GetMaxDtypeIndex
(
const
std
::
vector
<
SignatureEnumDType
>
&
dtypes
,
const
abstract
::
AbstractBasePtrList
&
args_spec_list
)
{
std
::
map
<
SignatureEnumDType
,
TypeId
>
GetMaxDtype
(
const
std
::
vector
<
SignatureEnumDType
>
&
dtypes
,
const
abstract
::
AbstractBasePtrList
&
args_spec_list
)
{
// record index for signature.dtypes of the same type
// eg. [T, T1, T, T2, T, T1, T3] -> {{T:(0,2,4)}, {T1:(1,5)}, {T2:(3)}, {T3:(6)}}
std
::
map
<
SignatureEnumDType
,
std
::
vector
<
size_t
>>
type_indexs
;
...
...
@@ -77,10 +169,7 @@ std::map<SignatureEnumDType, size_t> GetMaxDtypeIndex(const std::vector<Signatur
it
->
second
.
push_back
(
i
);
}
}
// example:sig_dtype:[T, T1, T, T2, T, T1, T3, T4, T4]
// and args type: [int, Tensor, Tensor, float, Tensor, int, Tensor, int, float]
// result:{{T:2},{T1:1}}
std
::
map
<
SignatureEnumDType
,
size_t
>
dst_type
;
std
::
map
<
SignatureEnumDType
,
TypeId
>
dst_type
;
for
(
auto
it
=
type_indexs
.
begin
();
it
!=
type_indexs
.
end
();
(
void
)
++
it
)
{
auto
type
=
it
->
first
;
auto
indexs
=
it
->
second
;
...
...
@@ -88,36 +177,36 @@ std::map<SignatureEnumDType, size_t> GetMaxDtypeIndex(const std::vector<Signatur
if
(
indexs
.
size
()
<
2
)
{
continue
;
}
bool
has_tensor
=
false
;
for
(
const
auto
&
index
:
indexs
)
{
AbstractBasePtr
arg_value
=
args_spec_list
[
index
];
if
(
arg_value
->
isa
<
abstract
::
AbstractRef
>
())
{
arg_value
=
arg_value
->
cast
<
abstract
::
AbstractRefPtr
>
()
->
ref
();
}
if
(
arg_value
->
isa
<
abstract
::
AbstractTensor
>
())
{
(
void
)
dst_type
.
insert
(
std
::
make_pair
(
type
,
index
))
;
has_tensor
=
true
;
break
;
}
}
if
(
!
has_tensor
)
{
(
void
)
dst_type
.
insert
(
std
::
make_pair
(
type
,
kTypeUnknown
));
continue
;
}
(
void
)
dst_type
.
insert
(
std
::
make_pair
(
type
,
GetMaxTypeId
(
args_spec_list
,
indexs
)));
}
return
dst_type
;
}
AnfNodePtr
DoCast
(
const
AnfNodePtr
&
param
,
const
AnfNodePtr
&
source_param
,
const
FuncGraphPtr
&
graph
)
{
// op and module import path
auto
prim_dtype
=
prim
::
GetPythonOps
(
"dtype"
,
"mindspore.ops.functional"
);
MS_EXCEPTION_IF_NULL
(
prim_dtype
);
// op and module import path
AnfNodePtr
DoCast
(
const
AnfNodePtr
&
param
,
const
TypeId
&
type_id
,
const
FuncGraphPtr
&
graph
)
{
auto
prim_cast_class
=
prim
::
GetPythonOps
(
"Cast"
,
"mindspore.ops.operations"
);
MS_EXCEPTION_IF_NULL
(
prim_cast_class
);
auto
dtype_node
=
New
CNode
({
NewValueNode
(
prim_dtype
),
source_param
},
graph
);
auto
dtype_node
=
New
ValueNode
(
TypeIdToType
(
type_id
)
);
auto
cast_node
=
NewCNode
({
NewValueNode
(
prim_cast_class
)},
graph
);
return
NewCNode
({
cast_node
,
param
,
dtype_node
},
graph
);
}
void
DoAutoCast
(
const
std
::
vector
<
Signature
>
&
signature
,
const
abstract
::
AbstractBasePtrList
&
args_spec_list
,
const
FuncGraphPtr
&
graph
,
std
::
vector
<
AnfNodePtr
>
*
op_inputs
)
{
const
FuncGraphPtr
&
graph
,
std
::
vector
<
AnfNodePtr
>
*
op_inputs
,
const
std
::
set
<
size_t
>
&
write_indexs
)
{
std
::
vector
<
SignatureEnumDType
>
dtypes
;
(
void
)
std
::
transform
(
signature
.
begin
(),
signature
.
end
(),
std
::
back_inserter
(
dtypes
),
[](
const
Signature
&
sig
)
{
return
sig
.
dtype
;
});
...
...
@@ -126,33 +215,49 @@ void DoAutoCast(const std::vector<Signature> &signature, const abstract::Abstrac
return
;
}
// Stat the index of the arguments with the largest type in the same SignatureEnumDType.
std
::
map
<
SignatureEnumDType
,
size_t
>
dst_type
=
GetMaxDtypeIndex
(
dtypes
,
args_spec_list
);
std
::
map
<
SignatureEnumDType
,
TypeId
>
dst_type
=
GetMaxDtype
(
dtypes
,
args_spec_list
);
// Identify which arg requires auto cast
for
(
size_t
i
=
0
;
i
<
args_spec_list
.
size
();
++
i
)
{
auto
it
=
dst_type
.
find
(
dtypes
[
i
]);
if
(
it
==
dst_type
.
end
()
||
it
->
second
==
kTypeUnknown
)
{
continue
;
}
AbstractBasePtr
arg_value
=
args_spec_list
[
i
];
if
(
arg_value
->
isa
<
abstract
::
AbstractRef
>
())
{
arg_value
=
arg_value
->
cast
<
abstract
::
AbstractRefPtr
>
()
->
ref
();
}
auto
it
=
dst_type
.
find
(
dtypes
[
i
]);
if
(
it
==
dst_type
.
end
()
||
it
->
second
==
i
||
!
arg_value
->
isa
<
abstract
::
AbstractScalar
>
())
{
continue
;
}
// When scalar is of bool type, the type of tensor must also be of bool type,
// otherwise the cast operator will not be added.
auto
scalar
=
arg_value
->
cast
<
abstract
::
AbstractScalarPtr
>
();
auto
scalar_type
=
scalar
->
BuildType
();
MS_EXCEPTION_IF_NULL
(
scalar_type
);
if
(
scalar_type
->
type_id
()
==
kNumberTypeBool
)
{
auto
tensor
=
args_spec_list
[
it
->
second
]
->
cast
<
abstract
::
AbstractTensorPtr
>
();
TypeId
arg_type_id
=
kTypeUnknown
;
if
(
arg_value
->
isa
<
abstract
::
AbstractTensor
>
())
{
auto
tensor
=
arg_value
->
cast
<
abstract
::
AbstractTensorPtr
>
();
auto
tensor_type
=
tensor
->
element
()
->
BuildType
();
MS_EXCEPTION_IF_NULL
(
tensor_type
);
if
(
tensor_type
->
type_id
()
!=
kNumberTypeBool
)
{
continue
;
arg_type_id
=
tensor_type
->
type_id
();
}
else
if
(
arg_value
->
isa
<
abstract
::
AbstractScalar
>
())
{
auto
scalar
=
arg_value
->
cast
<
abstract
::
AbstractScalarPtr
>
();
auto
scalar_type
=
scalar
->
BuildType
();
MS_EXCEPTION_IF_NULL
(
scalar_type
);
arg_type_id
=
scalar_type
->
type_id
();
}
auto
it_map
=
type_map
.
find
(
arg_type_id
);
if
(
it_map
==
type_map
.
end
())
{
continue
;
}
auto
rw_it
=
write_indexs
.
find
(
i
);
if
(
rw_it
!=
write_indexs
.
end
())
{
if
(
arg_type_id
!=
it
->
second
)
{
MS_LOG
(
EXCEPTION
)
<<
"In op '"
<<
GetOpName
(
graph
)
<<
"', argument '"
<<
args_spec_list
[
i
]
<<
"' can not cast type from '"
<<
TypeIdLabel
(
arg_type_id
)
<<
"' to '"
<<
TypeIdLabel
(
it
->
second
)
<<
"' automatically."
;
}
continue
;
}
if
(
arg_value
->
isa
<
abstract
::
AbstractTensor
>
()
&&
arg_type_id
==
it
->
second
)
{
continue
;
}
// get source node for cast
AnfNodePtr
source_node
=
(
*
op_inputs
)[
it
->
second
+
1
];
(
*
op_inputs
)[
i
+
1
]
=
DoCast
((
*
op_inputs
)[
i
+
1
],
source_node
,
graph
);
if
((
arg_type_id
==
kNumberTypeBool
||
it
->
second
==
kNumberTypeBool
)
&&
arg_type_id
!=
it
->
second
)
{
continue
;
}
(
*
op_inputs
)[
i
+
1
]
=
DoCast
((
*
op_inputs
)[
i
+
1
],
it
->
second
,
graph
);
}
}
...
...
@@ -173,10 +278,10 @@ AnfNodePtr BuildNewCNode(const FuncGraphPtr &func_graph, const std::string &func
}
}
std
::
vector
<
AnfNodePtr
>
op_inputs
;
std
::
set
<
size_t
>
write_indexs
;
op_inputs
.
push_back
(
NewValueNode
(
function
));
// Assume, the write input of op is always the first input. We check if any write op,
// and add cast op on other inputs to keep the same type with assigned parameter.
AnfNodePtr
assign_source
=
nullptr
;
for
(
size_t
i
=
0
;
i
<
args_spec_list
.
size
();
++
i
)
{
AnfNodePtr
param
=
params_list
[
i
];
SignatureEnumRW
sig
=
SignatureEnumRW
::
kRWDefault
;
...
...
@@ -191,22 +296,18 @@ AnfNodePtr BuildNewCNode(const FuncGraphPtr &func_graph, const std::string &func
if
(
sig
==
SignatureEnumRW
::
kRWRead
)
{
param
=
func_graph
->
NewCNode
({
NewValueNode
(
prim
::
kPrimGetRefValue
),
param
});
}
else
if
(
sig
==
SignatureEnumRW
::
kRWWrite
)
{
assign_source
=
func_graph
->
NewCNode
({
NewValueNode
(
prim
::
kPrimGetRefOrigin
),
param
}
);
write_indexs
.
insert
(
i
);
param
=
func_graph
->
NewCNode
({
NewValueNode
(
prim
::
kPrimGetRefKey
),
param
});
}
// If sig is SignatureEnumRW::kRWRef, not do anything.
}
else
if
(
sig
==
SignatureEnumRW
::
kRWWrite
&&
type
->
type_id
()
!=
kObjectTypeRefKey
)
{
MS_EXCEPTION
(
TypeError
)
<<
"Function "
<<
func_name
<<
"'s input "
<<
i
<<
" should be a Parameter."
;
}
// add cast op here
if
(
assign_source
!=
nullptr
&&
sig
!=
SignatureEnumRW
::
kRWWrite
)
{
param
=
DoCast
(
param
,
assign_source
,
func_graph
);
}
op_inputs
.
push_back
(
param
);
}
// process default
ProcessDefault
(
func_name
,
args_spec_list
,
signature
,
has_var
,
&
op_inputs
);
DoAutoCast
(
signature
,
args_spec_list
,
func_graph
,
&
op_inputs
);
DoAutoCast
(
signature
,
args_spec_list
,
func_graph
,
&
op_inputs
,
write_indexs
);
return
func_graph
->
NewCNode
(
op_inputs
);
}
}
// namespace
...
...
mindspore/common/initializer.py
浏览文件 @
19ce0c37
...
...
@@ -321,8 +321,8 @@ def initializer(init, shape=None, dtype=mstype.float32):
dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mindspore.float32.
Returns:
Union[Tensor, Initialize
d
], When `init` is Tensor, the return is Tensor object,
otherwise the return is Initialize object.
Union[Tensor, Initialize
r
], When `init` is Tensor, the return is Tensor object,
otherwise the return is Initialize object.
Examples:
>>> tensor = initializer('ones', [1, 2, 3], mindspore.float32)
...
...
mindspore/common/parameter.py
浏览文件 @
19ce0c37
...
...
@@ -16,6 +16,7 @@
"""Parameter for cell."""
import
numbers
from
copy
import
copy
,
deepcopy
from
.
import
dtype
as
mstype
from
.initializer
import
initializer
,
Initializer
from
.tensor
import
Tensor
,
MetaTensor
from
.._checkparam
import
_check_str_by_regular
...
...
@@ -199,6 +200,10 @@ class Parameter:
elif
isinstance
(
data
,
Initializer
):
self
.
init_mode
=
data
data
=
MetaTensor
(
self
.
init_mode
.
dtype
,
self
.
init_mode
.
shape
)
elif
isinstance
(
data
,
int
):
data
=
Tensor
(
data
,
dtype
=
mstype
.
int32
)
elif
isinstance
(
data
,
float
):
data
=
Tensor
(
data
,
dtype
=
mstype
.
float32
)
else
:
data
=
Tensor
(
data
)
data
.
init_flag
=
False
...
...
mindspore/ops/operations/math_ops.py
浏览文件 @
19ce0c37
...
...
@@ -166,8 +166,8 @@ class AssignAdd(PrimitiveWithInfer):
>>> net(value)
"""
__mindspore_signature__
=
(
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
),
(
'value'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
)
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
),
(
'value'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
)
)
@
prim_attr_register
...
...
@@ -210,8 +210,8 @@ class AssignSub(PrimitiveWithInfer):
"""
__mindspore_signature__
=
(
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
),
(
'value'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
)
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
),
(
'value'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
)
)
@
prim_attr_register
...
...
mindspore/ops/operations/nn_ops.py
浏览文件 @
19ce0c37
...
...
@@ -24,6 +24,7 @@ import numpy as np
from
...
import
context
from
..._c_expression
import
signature_rw
as
sig_rw
from
..._c_expression
import
signature_kind
as
sig_kind
from
..._c_expression
import
signature_dtype
as
sig_dtype
from
..._checkparam
import
Validator
as
validator
from
..._checkparam
import
Rel
from
...common
import
dtype
as
mstype
...
...
@@ -1495,11 +1496,13 @@ class ApplyMomentum(PrimitiveWithInfer):
Please refer to the usage in nn.ApplyMomentum.
"""
__mindspore_signature__
=
(
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
),
(
'accumulation'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
),
(
'learning_rate'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
),
(
'gradient'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
),
(
'momentum'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
)
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
),
(
'accumulation'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
),
(
'learning_rate'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
),
(
'gradient'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
),
(
'momentum'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
)
)
@
prim_attr_register
...
...
mindspore/ops/operations/other_ops.py
浏览文件 @
19ce0c37
...
...
@@ -16,6 +16,7 @@
"""Other operators."""
from
..._c_expression
import
signature_rw
as
sig_rw
from
..._c_expression
import
signature_kind
as
sig_kind
from
..._c_expression
import
signature_dtype
as
sig_dtype
from
..._checkparam
import
Validator
as
validator
,
Rel
from
...common
import
dtype
as
mstype
from
..primitive
import
Primitive
,
PrimitiveWithInfer
,
prim_attr_register
...
...
@@ -46,8 +47,8 @@ class Assign(PrimitiveWithInfer):
>>> net(x)
"""
__mindspore_signature__
=
(
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
),
(
'value'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
)
(
'variable'
,
sig_rw
.
RW_WRITE
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
),
(
'value'
,
sig_rw
.
RW_READ
,
sig_kind
.
KIND_POSITIONAL_KEYWORD
,
sig_kind
.
KIND_EMPTY_DEFAULT_VALUE
,
sig_dtype
.
T
)
)
@
prim_attr_register
def
__init__
(
self
):
...
...
tests/st/ops/ascend/test_autocast.py
0 → 100644
浏览文件 @
19ce0c37
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""multitype_ops directory test case"""
import
numpy
as
np
from
functools
import
partial
,
reduce
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore
import
dtype
as
mstype
from
mindspore.ops
import
functional
as
F
,
composite
as
C
import
mindspore.context
as
context
import
pytest
class
TensorIntAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
super
(
TensorIntAutoCast
,
self
).
__init__
()
self
.
i
=
2
def
construct
(
self
,
t
):
z
=
F
.
tensor_mul
(
t
,
self
.
i
)
return
z
class
TensorFPAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
super
(
TensorFPAutoCast
,
self
).
__init__
()
self
.
f
=
1.2
def
construct
(
self
,
t
):
z
=
F
.
tensor_mul
(
t
,
self
.
f
)
return
z
class
TensorBoolAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
super
(
TensorBoolAutoCast
,
self
).
__init__
()
self
.
f
=
True
def
construct
(
self
,
t
):
z
=
F
.
tensor_mul
(
t
,
self
.
f
)
return
z
class
TensorAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
super
(
TensorAutoCast
,
self
).
__init__
()
def
construct
(
self
,
t1
,
t2
):
z
=
F
.
tensor_mul
(
t1
,
t2
)
return
z
def
test_tensor_auto_cast
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
t0
=
Tensor
([
True
,
False
],
mstype
.
bool_
)
t_uint8
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
uint8
)
t_int8
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
int8
)
t_int16
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
int16
)
t_int32
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
int32
)
t_int64
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
int64
)
t_fp16
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float16
)
t_fp32
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float32
)
t_fp64
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float64
)
net
=
TensorAutoCast
()
rs
=
net
(
t_uint8
,
t_int8
)
assert
rs
.
dtype
()
==
mstype
.
int16
rs
=
net
(
t_uint8
,
t_int16
)
assert
rs
.
dtype
()
==
mstype
.
int16
rs
=
net
(
t_uint8
,
t_int32
)
assert
rs
.
dtype
()
==
mstype
.
int32
rs
=
net
(
t_uint8
,
t_int64
)
assert
rs
.
dtype
()
==
mstype
.
int64
rs
=
net
(
t_int8
,
t_int16
)
assert
rs
.
dtype
()
==
mstype
.
int16
rs
=
net
(
t_int8
,
t_int32
)
assert
rs
.
dtype
()
==
mstype
.
int32
rs
=
net
(
t_int8
,
t_int64
)
assert
rs
.
dtype
()
==
mstype
.
int64
rs
=
net
(
t_int16
,
t_int32
)
assert
rs
.
dtype
()
==
mstype
.
int32
rs
=
net
(
t_int16
,
t_int64
)
assert
rs
.
dtype
()
==
mstype
.
int64
rs
=
net
(
t_int32
,
t_int64
)
assert
rs
.
dtype
()
==
mstype
.
int64
rs
=
net
(
t_fp16
,
t_fp32
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
net
(
t_fp16
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_fp32
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_uint8
,
t_fp16
)
assert
rs
.
dtype
()
==
mstype
.
float16
rs
=
net
(
t_uint8
,
t_fp32
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
net
(
t_uint8
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_int8
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_int16
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_int32
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_int64
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_fp16
,
t_int8
)
assert
rs
.
dtype
()
==
mstype
.
float16
rs
=
net
(
t_fp16
,
t_uint8
)
assert
rs
.
dtype
()
==
mstype
.
float16
rs
=
net
(
t_fp16
,
t_int16
)
assert
rs
.
dtype
()
==
mstype
.
float16
rs
=
net
(
t_fp16
,
t_int32
)
assert
rs
.
dtype
()
==
mstype
.
float16
rs
=
net
(
t_fp16
,
t_int64
)
assert
rs
.
dtype
()
==
mstype
.
float16
tint
=
TensorIntAutoCast
()
rs
=
tint
(
t_uint8
)
assert
rs
.
dtype
()
==
mstype
.
uint8
rs
=
tint
(
t_int8
)
assert
rs
.
dtype
()
==
mstype
.
int8
rs
=
tint
(
t_int16
)
assert
rs
.
dtype
()
==
mstype
.
int16
rs
=
tint
(
t_int32
)
assert
rs
.
dtype
()
==
mstype
.
int32
rs
=
tint
(
t_int64
)
assert
rs
.
dtype
()
==
mstype
.
int64
rs
=
tint
(
t_fp16
)
assert
rs
.
dtype
()
==
mstype
.
float16
rs
=
tint
(
t_fp32
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tint
(
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
tfp
=
TensorFPAutoCast
()
rs
=
tfp
(
t_uint8
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tfp
(
t_int8
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tfp
(
t_int16
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tfp
(
t_int32
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tfp
(
t_int64
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tfp
(
t_fp16
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tfp
(
t_fp32
)
assert
rs
.
dtype
()
==
mstype
.
float32
rs
=
tfp
(
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
t_uint16
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
uint16
)
t_uint32
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
uint32
)
t_uint64
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
uint64
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_uint8
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_int8
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_int16
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_int32
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_int64
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_uint8
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_int8
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_int16
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_int32
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_int64
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_uint8
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_int8
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_int16
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_int32
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_int64
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_fp16
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_fp32
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint16
,
t_fp64
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_fp16
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_fp32
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint32
,
t_fp64
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_fp16
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_fp32
)
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_fp64
)
with
pytest
.
raises
(
TypeError
):
tfp
(
t_uint16
)
with
pytest
.
raises
(
TypeError
):
tfp
(
t_uint32
)
with
pytest
.
raises
(
TypeError
):
tfp
(
t_uint64
)
with
pytest
.
raises
(
TypeError
):
tint
(
t_uint16
)
with
pytest
.
raises
(
TypeError
):
tint
(
t_uint32
)
with
pytest
.
raises
(
TypeError
):
tint
(
t_uint64
)
bnet
=
TensorBoolAutoCast
()
with
pytest
.
raises
(
TypeError
):
bnet
(
t_uint8
)
with
pytest
.
raises
(
TypeError
):
bnet
(
t_int8
)
with
pytest
.
raises
(
TypeError
):
bnet
(
t_int16
)
with
pytest
.
raises
(
TypeError
):
bnet
(
t_int32
)
with
pytest
.
raises
(
TypeError
):
bnet
(
t_int64
)
with
pytest
.
raises
(
TypeError
):
bnet
(
t_fp16
)
with
pytest
.
raises
(
TypeError
):
bnet
(
t_fp32
)
with
pytest
.
raises
(
TypeError
):
bnet
(
t_fp64
)
tests/ut/python/nn/test_cell_wrapper.py
浏览文件 @
19ce0c37
...
...
@@ -64,7 +64,7 @@ def test_parameter_update_int32_and_tensor():
param_step
=
train_network
.
parameters_dict
()[
'global_step'
]
update_global_step
=
ParameterUpdate
(
param_step
)
input_step
=
Tensor
(
np
.
array
([
1000
]),
mstype
.
floa
t32
)
input_step
=
Tensor
(
np
.
array
([
1000
]),
mstype
.
in
t32
)
_executor
.
compile
(
update_global_step
,
input_step
)
...
...
tests/ut/python/ops/test_math_ops.py
浏览文件 @
19ce0c37
...
...
@@ -463,7 +463,7 @@ raise_set = [
'block'
:
(
lambda
x
:
P
.
StridedSlice
(
new_axis_mask
=
"1.1"
),
{
'exception'
:
TypeError
}),
'desc_inputs'
:
[
0
]}),
(
'AssignAdd_Error'
,
{
'block'
:
(
P
.
AssignAdd
(),
{
'exception'
:
Type
Error
}),
'block'
:
(
P
.
AssignAdd
(),
{
'exception'
:
Index
Error
}),
'desc_inputs'
:
[[
1
]]}),
]
...
...
tests/ut/python/ops/test_math_ops_check.py
浏览文件 @
19ce0c37
...
...
@@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
""" test ops """
import
functools
import
numpy
as
np
import
mindspore.nn
as
nn
...
...
@@ -22,7 +23,8 @@ from mindspore.common.parameter import Parameter
from
mindspore.ops
import
operations
as
P
from
....mindspore_test_framework.mindspore_test
import
mindspore_test
from
....mindspore_test_framework.pipeline.forward.compile_forward
\
import
pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception
import
pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception
,
\
pipeline_for_compile_forward_ge_graph_for_case_by_case_config
class
AssignAddNet
(
nn
.
Cell
):
...
...
@@ -77,11 +79,6 @@ class CumSumNet(nn.Cell):
raise_set
=
[
# input two tensors, but element types are not same
(
'TensorAdd1'
,
{
'block'
:
(
P
.
TensorAdd
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'TensorAdd'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'TensorAdd2'
,
{
'block'
:
(
P
.
TensorAdd
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'TensorAdd'
]}),
...
...
@@ -256,22 +253,12 @@ raise_set = [
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
2
,
3
]).
astype
(
np
.
bool_
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Sub1'
,
{
'block'
:
(
P
.
Sub
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Sub'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'Sub2'
,
{
'block'
:
(
P
.
Sub
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Sub'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
5
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Mul1'
,
{
'block'
:
(
P
.
Mul
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Mul'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'Mul2'
,
{
'block'
:
(
P
.
Mul
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Mul'
]}),
...
...
@@ -327,55 +314,30 @@ raise_set = [
'desc_inputs'
:
[
5.0
],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Minimum1'
,
{
'block'
:
(
P
.
Minimum
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Minimum'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'Minimum2'
,
{
'block'
:
(
P
.
Minimum
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Minimum'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
5
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Maximum1'
,
{
'block'
:
(
P
.
Maximum
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Maximum'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'Maximum2'
,
{
'block'
:
(
P
.
Maximum
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Maximum'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
5
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'RealDiv1'
,
{
'block'
:
(
P
.
RealDiv
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'RealDiv'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'RealDiv2'
,
{
'block'
:
(
P
.
RealDiv
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'RealDiv'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
5
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Div1'
,
{
'block'
:
(
P
.
Div
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Div'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'Div2'
,
{
'block'
:
(
P
.
Div
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Div'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
5
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'FloorDiv1'
,
{
'block'
:
(
P
.
FloorDiv
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'FloorDiv'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'FloorDiv2'
,
{
'block'
:
(
P
.
FloorDiv
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'FloorDiv'
]}),
...
...
@@ -389,11 +351,6 @@ raise_set = [
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
2
,
3
]).
astype
(
np
.
int32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'FloorMod1'
,
{
'block'
:
(
P
.
FloorMod
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'FloorMod'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'FFloorMod2'
,
{
'block'
:
(
P
.
FloorMod
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'FloorMod'
]}),
...
...
@@ -407,11 +364,6 @@ raise_set = [
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
2
,
3
]).
astype
(
np
.
bool_
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'Equal1'
,
{
'block'
:
(
P
.
Equal
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Equal'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# shape of x and y not match
(
'Equal2'
,
{
'block'
:
(
P
.
Equal
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Equal'
]}),
...
...
@@ -430,55 +382,30 @@ raise_set = [
'skip'
:
[
'backward'
]}),
# shape of x and y not match
# type of x and y not match
(
'NotEqual1'
,
{
'block'
:
(
P
.
NotEqual
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'NotEqual'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# shape of x and y not match
(
'NotEqual2'
,
{
'block'
:
(
P
.
NotEqual
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'NotEqual'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
2
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'Greater1'
,
{
'block'
:
(
P
.
Greater
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Greater'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# shape of x and y not match
(
'Greater2'
,
{
'block'
:
(
P
.
Greater
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Greater'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
2
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'GreaterEqual1'
,
{
'block'
:
(
P
.
GreaterEqual
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'GreaterEqual'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# shape of x and y not match
(
'GreaterEqual2'
,
{
'block'
:
(
P
.
GreaterEqual
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'GreaterEqual'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
2
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'Less1'
,
{
'block'
:
(
P
.
Less
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Less'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# shape of x and y not match
(
'Less2'
,
{
'block'
:
(
P
.
Less
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Less'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
)),
Tensor
(
np
.
ones
([
3
,
2
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'LessEqual1'
,
{
'block'
:
(
P
.
LessEqual
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'LessEqual'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# shape of x and y not match
(
'LessEqual2'
,
{
'block'
:
(
P
.
LessEqual
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'LessEqual'
]}),
...
...
@@ -643,11 +570,6 @@ raise_set = [
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
bool_
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Atan21'
,
{
'block'
:
(
P
.
Atan2
(),
{
'exception'
:
TypeError
,
'error_keywords'
:
[
'Atan2'
]}),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, their shapes do not match
(
'Atan22'
,
{
'block'
:
(
P
.
Atan2
(),
{
'exception'
:
ValueError
,
'error_keywords'
:
[
'Atan2'
]}),
...
...
@@ -655,7 +577,96 @@ raise_set = [
'skip'
:
[
'backward'
]}),
]
test_case_math_ops
=
[
# input two tensors, but element types are not same
(
'TensorAdd1'
,
{
'block'
:
P
.
TensorAdd
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Sub1'
,
{
'block'
:
P
.
Sub
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Mul1'
,
{
'block'
:
P
.
Mul
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Minimum1'
,
{
'block'
:
P
.
Minimum
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Maximum1'
,
{
'block'
:
P
.
Maximum
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'RealDiv1'
,
{
'block'
:
P
.
RealDiv
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Div1'
,
{
'block'
:
P
.
Div
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'FloorDiv1'
,
{
'block'
:
P
.
FloorDiv
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'FloorMod1'
,
{
'block'
:
P
.
FloorMod
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'Equal1'
,
{
'block'
:
P
.
Equal
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'NotEqual1'
,
{
'block'
:
P
.
NotEqual
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'Greater1'
,
{
'block'
:
P
.
Greater
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'GreaterEqual1'
,
{
'block'
:
P
.
GreaterEqual
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'Less1'
,
{
'block'
:
P
.
Less
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# type of x and y not match
(
'LessEqual1'
,
{
'block'
:
P
.
LessEqual
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
# input two tensors, but element types are not same
(
'Atan21'
,
{
'block'
:
P
.
Atan2
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
]
@
mindspore_test
(
pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception
)
def
test_check_exception
():
return
raise_set
@
mindspore_test
(
pipeline_for_compile_forward_ge_graph_for_case_by_case_config
)
def
test_exec
():
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
return
functools
.
reduce
(
lambda
x
,
y
:
x
+
y
,
[
test_case_math_ops
])
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录