Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
86d197df
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
86d197df
编写于
5月 29, 2020
作者:
J
jinyaohui
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean pylint
上级
85e686e0
变更
48
隐藏空白更改
内联
并排
Showing
48 changed file
with
211 addition
and
185 deletion
+211
-185
mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py
...re/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py
+2
-2
mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py
...e/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py
+1
-0
model_zoo/Transformer/src/transformer_for_train.py
model_zoo/Transformer/src/transformer_for_train.py
+9
-4
model_zoo/bert/src/bert_for_pre_training.py
model_zoo/bert/src/bert_for_pre_training.py
+5
-0
tests/mindspore_test_framework/apps/test_bert_parts.py
tests/mindspore_test_framework/apps/test_bert_parts.py
+4
-3
tests/mindspore_test_framework/components/executor/check_exceptions.py
...re_test_framework/components/executor/check_exceptions.py
+2
-1
tests/mindspore_test_framework/utils/check_gradient.py
tests/mindspore_test_framework/utils/check_gradient.py
+2
-1
tests/mindspore_test_framework/utils/dataset_util.py
tests/mindspore_test_framework/utils/dataset_util.py
+2
-1
tests/mindspore_test_framework/utils/debug_util.py
tests/mindspore_test_framework/utils/debug_util.py
+1
-2
tests/mindspore_test_framework/utils/other_util.py
tests/mindspore_test_framework/utils/other_util.py
+1
-2
tests/st/networks/models/bert/src/bert_for_pre_training.py
tests/st/networks/models/bert/src/bert_for_pre_training.py
+5
-0
tests/st/ops/ascend/test_autocast.py
tests/st/ops/ascend/test_autocast.py
+12
-7
tests/st/ops/ascend/test_ops_infer.py
tests/st/ops/ascend/test_ops_infer.py
+5
-1
tests/st/ops/cpu/test_transpose_op.py
tests/st/ops/cpu/test_transpose_op.py
+3
-1
tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py
tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py
+1
-0
tests/ut/python/dtype/test_dictionary.py
tests/ut/python/dtype/test_dictionary.py
+2
-2
tests/ut/python/dtype/test_hypermap.py
tests/ut/python/dtype/test_hypermap.py
+0
-28
tests/ut/python/dtype/test_list.py
tests/ut/python/dtype/test_list.py
+2
-2
tests/ut/python/dtype/test_tuple.py
tests/ut/python/dtype/test_tuple.py
+1
-1
tests/ut/python/exec/test_AssignAdd.py
tests/ut/python/exec/test_AssignAdd.py
+1
-1
tests/ut/python/ir/test_tensor.py
tests/ut/python/ir/test_tensor.py
+3
-3
tests/ut/python/keep_order/test_keep_order.py
tests/ut/python/keep_order/test_keep_order.py
+4
-4
tests/ut/python/model/test_mix_precision.py
tests/ut/python/model/test_mix_precision.py
+0
-3
tests/ut/python/nn/test_pooling.py
tests/ut/python/nn/test_pooling.py
+3
-2
tests/ut/python/nn/test_psnr.py
tests/ut/python/nn/test_psnr.py
+3
-3
tests/ut/python/nn/test_ssim.py
tests/ut/python/nn/test_ssim.py
+15
-15
tests/ut/python/onnx/test_onnx.py
tests/ut/python/onnx/test_onnx.py
+3
-2
tests/ut/python/ops/test_math_ops_check.py
tests/ut/python/ops/test_math_ops_check.py
+4
-3
tests/ut/python/ops/test_ops_attr_infer.py
tests/ut/python/ops/test_ops_attr_infer.py
+48
-3
tests/ut/python/ops/test_ops_check.py
tests/ut/python/ops/test_ops_check.py
+1
-1
tests/ut/python/optimizer/test_debug_location.py
tests/ut/python/optimizer/test_debug_location.py
+5
-5
tests/ut/python/optimizer/test_optimize_with_loss_scale.py
tests/ut/python/optimizer/test_optimize_with_loss_scale.py
+8
-8
tests/ut/python/parallel/test_alltoall.py
tests/ut/python/parallel/test_alltoall.py
+2
-2
tests/ut/python/parallel/test_two_matmul.py
tests/ut/python/parallel/test_two_matmul.py
+1
-1
tests/ut/python/parameter_feature/test_parameter.py
tests/ut/python/parameter_feature/test_parameter.py
+1
-1
tests/ut/python/parameter_feature/test_var_grad.py
tests/ut/python/parameter_feature/test_var_grad.py
+11
-11
tests/ut/python/pipeline/parse/test_cont_break.py
tests/ut/python/pipeline/parse/test_cont_break.py
+1
-1
tests/ut/python/pynative_mode/test_framstruct.py
tests/ut/python/pynative_mode/test_framstruct.py
+2
-0
tests/ut/python/pynative_mode/test_hook.py
tests/ut/python/pynative_mode/test_hook.py
+15
-14
tests/ut/python/pynative_mode/test_stop_gradient.py
tests/ut/python/pynative_mode/test_stop_gradient.py
+1
-2
tests/ut/python/train/quant/mobilenetv2.py
tests/ut/python/train/quant/mobilenetv2.py
+1
-0
tests/ut/python/train/quant/mobilenetv2_combined.py
tests/ut/python/train/quant/mobilenetv2_combined.py
+1
-0
tests/ut/python/train/quant/test_quant.py
tests/ut/python/train/quant/test_quant.py
+0
-30
tests/ut/python/train/summary/summary_reader.py
tests/ut/python/train/summary/summary_reader.py
+2
-1
tests/ut/python/train/test_amp.py
tests/ut/python/train/test_amp.py
+9
-8
tests/ut/python/train/test_training.py
tests/ut/python/train/test_training.py
+1
-1
tests/ut/python/utils/test_callback.py
tests/ut/python/utils/test_callback.py
+3
-2
tests/vm_impl/nn_ops_vm_impl.py
tests/vm_impl/nn_ops_vm_impl.py
+2
-0
未找到文件。
mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py
浏览文件 @
86d197df
...
@@ -244,8 +244,8 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
...
@@ -244,8 +244,8 @@ def check_supported(input_x1, input_x2, bias=None, output_y={}, trans_a=False, t
return
True
return
True
# pylint: disable=locally-disabled,too-many-arguments, too-many-locals, too-many-statements,
# pylint: disable=
locally-disabled,too-many-arguments, too-many-locals, too-many
-statements
# pylint: disable=
inconsistent-return
-statements
# @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str)
# @util.check_input_type(dict, dict, (dict, NoneType), dict, bool, bool, str)
@
op_info_register
(
matmul_cube_dense_left_op_info
)
@
op_info_register
(
matmul_cube_dense_left_op_info
)
def
CusMatMulCubeDenseLeft
(
input_x1
,
input_x2
,
bias
=
None
,
output_y
=
{},
trans_a
=
False
,
trans_b
=
False
,
def
CusMatMulCubeDenseLeft
(
input_x1
,
input_x2
,
bias
=
None
,
output_y
=
{},
trans_a
=
False
,
trans_b
=
False
,
...
...
mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py
浏览文件 @
86d197df
...
@@ -40,6 +40,7 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
...
@@ -40,6 +40,7 @@ matmul_cube_dense_right_op_info = TBERegOp("CusMatMulCubeDenseRight") \
.
get_op_info
()
.
get_op_info
()
# pylint: disable=inconsistent-return-statements
@
op_info_register
(
matmul_cube_dense_right_op_info
)
@
op_info_register
(
matmul_cube_dense_right_op_info
)
def
CusMatMulCubeDenseRight
(
input_x1
,
input_x2
,
input_x3
,
bias
=
None
,
output_y
=
{},
trans_a
=
False
,
trans_b
=
False
,
def
CusMatMulCubeDenseRight
(
input_x1
,
input_x2
,
input_x3
,
bias
=
None
,
output_y
=
{},
trans_a
=
False
,
trans_b
=
False
,
kernel_name
=
"matmulcube"
):
kernel_name
=
"matmulcube"
):
...
...
model_zoo/Transformer/src/transformer_for_train.py
浏览文件 @
86d197df
...
@@ -31,6 +31,8 @@ from .transformer_model import TransformerModel
...
@@ -31,6 +31,8 @@ from .transformer_model import TransformerModel
GRADIENT_CLIP_TYPE
=
1
GRADIENT_CLIP_TYPE
=
1
GRADIENT_CLIP_VALUE
=
5.0
GRADIENT_CLIP_VALUE
=
5.0
# pylint: disable=consider-using-in
class
ClipGradients
(
nn
.
Cell
):
class
ClipGradients
(
nn
.
Cell
):
"""
"""
Clip gradients.
Clip gradients.
...
@@ -48,11 +50,12 @@ class ClipGradients(nn.Cell):
...
@@ -48,11 +50,12 @@ class ClipGradients(nn.Cell):
self
.
clip_by_norm
=
nn
.
ClipByNorm
()
self
.
clip_by_norm
=
nn
.
ClipByNorm
()
self
.
cast
=
P
.
Cast
()
self
.
cast
=
P
.
Cast
()
self
.
dtype
=
P
.
DType
()
self
.
dtype
=
P
.
DType
()
def
construct
(
self
,
def
construct
(
self
,
grads
,
grads
,
clip_type
,
clip_type
,
clip_value
):
clip_value
):
#return grads
#
return grads
if
clip_type
!=
0
and
clip_type
!=
1
:
if
clip_type
!=
0
and
clip_type
!=
1
:
return
grads
return
grads
...
@@ -83,8 +86,8 @@ class TransformerTrainingLoss(nn.Cell):
...
@@ -83,8 +86,8 @@ class TransformerTrainingLoss(nn.Cell):
super
(
TransformerTrainingLoss
,
self
).
__init__
(
auto_prefix
=
False
)
super
(
TransformerTrainingLoss
,
self
).
__init__
(
auto_prefix
=
False
)
self
.
vocab_size
=
config
.
vocab_size
self
.
vocab_size
=
config
.
vocab_size
self
.
onehot
=
P
.
OneHot
()
self
.
onehot
=
P
.
OneHot
()
self
.
on_value
=
Tensor
(
float
(
1
-
config
.
label_smoothing
),
mstype
.
float32
)
self
.
on_value
=
Tensor
(
float
(
1
-
config
.
label_smoothing
),
mstype
.
float32
)
self
.
off_value
=
Tensor
(
config
.
label_smoothing
/
float
(
self
.
vocab_size
-
1
),
mstype
.
float32
)
self
.
off_value
=
Tensor
(
config
.
label_smoothing
/
float
(
self
.
vocab_size
-
1
),
mstype
.
float32
)
self
.
reduce_sum
=
P
.
ReduceSum
()
self
.
reduce_sum
=
P
.
ReduceSum
()
self
.
reduce_mean
=
P
.
ReduceMean
()
self
.
reduce_mean
=
P
.
ReduceMean
()
self
.
reshape
=
P
.
Reshape
()
self
.
reshape
=
P
.
Reshape
()
...
@@ -92,7 +95,7 @@ class TransformerTrainingLoss(nn.Cell):
...
@@ -92,7 +95,7 @@ class TransformerTrainingLoss(nn.Cell):
self
.
flatten
=
P
.
Flatten
()
self
.
flatten
=
P
.
Flatten
()
self
.
neg
=
P
.
Neg
()
self
.
neg
=
P
.
Neg
()
self
.
cast
=
P
.
Cast
()
self
.
cast
=
P
.
Cast
()
self
.
flat_shape
=
(
config
.
batch_size
*
config
.
seq_length
,)
self
.
flat_shape
=
(
config
.
batch_size
*
config
.
seq_length
,)
def
construct
(
self
,
prediction_scores
,
label_ids
,
label_weights
):
def
construct
(
self
,
prediction_scores
,
label_ids
,
label_weights
):
"""Defines the computation performed."""
"""Defines the computation performed."""
...
@@ -217,10 +220,12 @@ class TransformerTrainOneStepCell(nn.Cell):
...
@@ -217,10 +220,12 @@ class TransformerTrainOneStepCell(nn.Cell):
grad_scale
=
C
.
MultitypeFuncGraph
(
"grad_scale"
)
grad_scale
=
C
.
MultitypeFuncGraph
(
"grad_scale"
)
reciprocal
=
P
.
Reciprocal
()
reciprocal
=
P
.
Reciprocal
()
@
grad_scale
.
register
(
"Tensor"
,
"Tensor"
)
@
grad_scale
.
register
(
"Tensor"
,
"Tensor"
)
def
tensor_grad_scale
(
scale
,
grad
):
def
tensor_grad_scale
(
scale
,
grad
):
return
grad
*
F
.
cast
(
reciprocal
(
scale
),
F
.
dtype
(
grad
))
return
grad
*
F
.
cast
(
reciprocal
(
scale
),
F
.
dtype
(
grad
))
class
TransformerTrainOneStepWithLossScaleCell
(
nn
.
Cell
):
class
TransformerTrainOneStepWithLossScaleCell
(
nn
.
Cell
):
"""
"""
Encapsulation class of Transformer network training.
Encapsulation class of Transformer network training.
...
...
model_zoo/bert/src/bert_for_pre_training.py
浏览文件 @
86d197df
...
@@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
...
@@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
_nn_clip_by_norm
=
nn
.
ClipByNorm
()
_nn_clip_by_norm
=
nn
.
ClipByNorm
()
clip_grad
=
C
.
MultitypeFuncGraph
(
"clip_grad"
)
clip_grad
=
C
.
MultitypeFuncGraph
(
"clip_grad"
)
# pylint: disable=consider-using-in
@
clip_grad
.
register
(
"Number"
,
"Number"
,
"Tensor"
)
@
clip_grad
.
register
(
"Number"
,
"Number"
,
"Tensor"
)
def
_clip_grad
(
clip_type
,
clip_value
,
grad
):
def
_clip_grad
(
clip_type
,
clip_value
,
grad
):
"""
"""
...
@@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
...
@@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
new_grad
=
_nn_clip_by_norm
(
grad
,
F
.
cast
(
F
.
tuple_to_array
((
clip_value
,)),
dt
))
new_grad
=
_nn_clip_by_norm
(
grad
,
F
.
cast
(
F
.
tuple_to_array
((
clip_value
,)),
dt
))
return
new_grad
return
new_grad
class
GetMaskedLMOutput
(
nn
.
Cell
):
class
GetMaskedLMOutput
(
nn
.
Cell
):
"""
"""
Get masked lm output.
Get masked lm output.
...
@@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
...
@@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
self
.
loss_scale
=
Parameter
(
Tensor
(
scale_update_cell
.
get_loss_scale
(),
dtype
=
mstype
.
float32
),
self
.
loss_scale
=
Parameter
(
Tensor
(
scale_update_cell
.
get_loss_scale
(),
dtype
=
mstype
.
float32
),
name
=
"loss_scale"
)
name
=
"loss_scale"
)
self
.
add_flags
(
has_effect
=
True
)
self
.
add_flags
(
has_effect
=
True
)
def
construct
(
self
,
def
construct
(
self
,
input_ids
,
input_ids
,
input_mask
,
input_mask
,
...
...
tests/mindspore_test_framework/apps/test_bert_parts.py
浏览文件 @
86d197df
...
@@ -15,14 +15,15 @@
...
@@ -15,14 +15,15 @@
"""Test bert submodules."""
"""Test bert submodules."""
import
numpy
as
np
import
os
import
os
from
mindspore
import
Tensor
import
numpy
as
np
from
mindspore
import
nn
,
context
from
mindspore.model_zoo.Bert_NEZHA
import
EmbeddingLookup
,
GetMaskedLMOutput
,
\
from
mindspore.model_zoo.Bert_NEZHA
import
EmbeddingLookup
,
GetMaskedLMOutput
,
\
BertConfig
,
BertPreTraining
,
BertNetworkWithLoss
BertConfig
,
BertPreTraining
,
BertNetworkWithLoss
from
mindspore.model_zoo.Bert_NEZHA.bert_model
import
BertModel
from
mindspore.model_zoo.Bert_NEZHA.bert_model
import
BertModel
from
mindspore
import
Tensor
from
mindspore
import
nn
,
context
from
..mindspore_test
import
mindspore_test
from
..mindspore_test
import
mindspore_test
from
..pipeline.forward.compile_forward
import
pipeline_for_compile_forward_anf_graph_for_case_by_case_config
,
\
from
..pipeline.forward.compile_forward
import
pipeline_for_compile_forward_anf_graph_for_case_by_case_config
,
\
pipeline_for_compile_forward_ge_graph_for_case_by_case_config
pipeline_for_compile_forward_ge_graph_for_case_by_case_config
...
...
tests/mindspore_test_framework/components/executor/check_exceptions.py
浏览文件 @
86d197df
...
@@ -15,9 +15,10 @@
...
@@ -15,9 +15,10 @@
"""Component that Check if the function raises the expected Exception."""
"""Component that Check if the function raises the expected Exception."""
import
pytest
import
sys
import
sys
import
pytest
from
...components.icomponent
import
IExectorComponent
from
...components.icomponent
import
IExectorComponent
from
...utils
import
keyword
from
...utils
import
keyword
...
...
tests/mindspore_test_framework/utils/check_gradient.py
浏览文件 @
86d197df
...
@@ -16,9 +16,10 @@
...
@@ -16,9 +16,10 @@
"""Implementation of Numerical gradients checking."""
"""Implementation of Numerical gradients checking."""
# pylint: disable=missing-docstring
# pylint: disable=missing-docstring
from
typing
import
Callable
,
List
,
Any
import
mindspore._c_expression
as
_c_expression
import
mindspore._c_expression
as
_c_expression
import
numpy
as
np
import
numpy
as
np
from
typing
import
Callable
,
List
,
Any
from
mindspore
import
ParameterTuple
from
mindspore
import
ParameterTuple
from
mindspore
import
Tensor
from
mindspore
import
Tensor
...
...
tests/mindspore_test_framework/utils/dataset_util.py
浏览文件 @
86d197df
...
@@ -15,9 +15,10 @@
...
@@ -15,9 +15,10 @@
"""Dataset utils."""
"""Dataset utils."""
import
numpy
as
np
import
random
import
random
import
numpy
as
np
from
mindspore
import
Tensor
from
mindspore
import
Tensor
...
...
tests/mindspore_test_framework/utils/debug_util.py
浏览文件 @
86d197df
...
@@ -24,8 +24,7 @@ from mindspore.ops import operations as P
...
@@ -24,8 +24,7 @@ from mindspore.ops import operations as P
from
mindspore.ops._grad.grad_base
import
bprop_getters
from
mindspore.ops._grad.grad_base
import
bprop_getters
from
mindspore.ops.primitive
import
prim_attr_register
,
PrimitiveWithInfer
from
mindspore.ops.primitive
import
prim_attr_register
,
PrimitiveWithInfer
logging
.
basicConfig
(
level
=
logging
.
DEBUG
,
format
=
logging
.
basicConfig
(
level
=
logging
.
DEBUG
,
format
=
'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s'
)
'[%(levelname)s] %(asctime)s %(pathname)s:%(lineno)d %(message)s'
)
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
...
...
tests/mindspore_test_framework/utils/other_util.py
浏览文件 @
86d197df
...
@@ -14,9 +14,8 @@
...
@@ -14,9 +14,8 @@
# ============================================================================
# ============================================================================
"""Other utils."""
"""Other utils."""
import
mindspore._c_expression
as
_c_expression
import
numpy
as
np
import
numpy
as
np
import
mindspore._c_expression
as
_c_expression
from
mindspore.common.tensor
import
Tensor
from
mindspore.common.tensor
import
Tensor
...
...
tests/st/networks/models/bert/src/bert_for_pre_training.py
浏览文件 @
86d197df
...
@@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
...
@@ -34,6 +34,9 @@ GRADIENT_CLIP_VALUE = 1.0
_nn_clip_by_norm
=
nn
.
ClipByNorm
()
_nn_clip_by_norm
=
nn
.
ClipByNorm
()
clip_grad
=
C
.
MultitypeFuncGraph
(
"clip_grad"
)
clip_grad
=
C
.
MultitypeFuncGraph
(
"clip_grad"
)
# pylint: disable=consider-using-in
@
clip_grad
.
register
(
"Number"
,
"Number"
,
"Tensor"
)
@
clip_grad
.
register
(
"Number"
,
"Number"
,
"Tensor"
)
def
_clip_grad
(
clip_type
,
clip_value
,
grad
):
def
_clip_grad
(
clip_type
,
clip_value
,
grad
):
"""
"""
...
@@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
...
@@ -57,6 +60,7 @@ def _clip_grad(clip_type, clip_value, grad):
new_grad
=
_nn_clip_by_norm
(
grad
,
F
.
cast
(
F
.
tuple_to_array
((
clip_value
,)),
dt
))
new_grad
=
_nn_clip_by_norm
(
grad
,
F
.
cast
(
F
.
tuple_to_array
((
clip_value
,)),
dt
))
return
new_grad
return
new_grad
class
GetMaskedLMOutput
(
nn
.
Cell
):
class
GetMaskedLMOutput
(
nn
.
Cell
):
"""
"""
Get masked lm output.
Get masked lm output.
...
@@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
...
@@ -377,6 +381,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell):
self
.
loss_scale
=
Parameter
(
Tensor
(
scale_update_cell
.
get_loss_scale
(),
dtype
=
mstype
.
float32
),
self
.
loss_scale
=
Parameter
(
Tensor
(
scale_update_cell
.
get_loss_scale
(),
dtype
=
mstype
.
float32
),
name
=
"loss_scale"
)
name
=
"loss_scale"
)
self
.
add_flags
(
has_effect
=
True
)
self
.
add_flags
(
has_effect
=
True
)
def
construct
(
self
,
def
construct
(
self
,
input_ids
,
input_ids
,
input_mask
,
input_mask
,
...
...
tests/st/ops/ascend/test_autocast.py
浏览文件 @
86d197df
...
@@ -23,35 +23,41 @@ from mindspore.ops import functional as F, composite as C
...
@@ -23,35 +23,41 @@ from mindspore.ops import functional as F, composite as C
import
mindspore.context
as
context
import
mindspore.context
as
context
import
pytest
import
pytest
class
TensorIntAutoCast
(
nn
.
Cell
):
class
TensorIntAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
def
__init__
(
self
,
):
super
(
TensorIntAutoCast
,
self
).
__init__
()
super
(
TensorIntAutoCast
,
self
).
__init__
()
self
.
i
=
2
self
.
i
=
2
def
construct
(
self
,
t
):
def
construct
(
self
,
t
):
z
=
F
.
tensor_mul
(
t
,
self
.
i
)
z
=
F
.
tensor_mul
(
t
,
self
.
i
)
return
z
return
z
class
TensorFPAutoCast
(
nn
.
Cell
):
class
TensorFPAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
def
__init__
(
self
,
):
super
(
TensorFPAutoCast
,
self
).
__init__
()
super
(
TensorFPAutoCast
,
self
).
__init__
()
self
.
f
=
1.2
self
.
f
=
1.2
def
construct
(
self
,
t
):
def
construct
(
self
,
t
):
z
=
F
.
tensor_mul
(
t
,
self
.
f
)
z
=
F
.
tensor_mul
(
t
,
self
.
f
)
return
z
return
z
class
TensorBoolAutoCast
(
nn
.
Cell
):
class
TensorBoolAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
def
__init__
(
self
,
):
super
(
TensorBoolAutoCast
,
self
).
__init__
()
super
(
TensorBoolAutoCast
,
self
).
__init__
()
self
.
f
=
True
self
.
f
=
True
def
construct
(
self
,
t
):
def
construct
(
self
,
t
):
z
=
F
.
tensor_mul
(
t
,
self
.
f
)
z
=
F
.
tensor_mul
(
t
,
self
.
f
)
return
z
return
z
class
TensorAutoCast
(
nn
.
Cell
):
class
TensorAutoCast
(
nn
.
Cell
):
def
__init__
(
self
,):
def
__init__
(
self
,
):
super
(
TensorAutoCast
,
self
).
__init__
()
super
(
TensorAutoCast
,
self
).
__init__
()
def
construct
(
self
,
t1
,
t2
):
def
construct
(
self
,
t1
,
t2
):
z
=
F
.
tensor_mul
(
t1
,
t2
)
z
=
F
.
tensor_mul
(
t1
,
t2
)
return
z
return
z
...
@@ -68,7 +74,7 @@ def test_tensor_auto_cast():
...
@@ -68,7 +74,7 @@ def test_tensor_auto_cast():
t_fp16
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float16
)
t_fp16
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float16
)
t_fp32
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float32
)
t_fp32
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float32
)
t_fp64
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float64
)
t_fp64
=
Tensor
(
np
.
ones
([
2
,
1
,
2
,
2
]),
mstype
.
float64
)
net
=
TensorAutoCast
()
net
=
TensorAutoCast
()
rs
=
net
(
t_uint8
,
t_int8
)
rs
=
net
(
t_uint8
,
t_int8
)
assert
rs
.
dtype
()
==
mstype
.
int16
assert
rs
.
dtype
()
==
mstype
.
int16
rs
=
net
(
t_uint8
,
t_int16
)
rs
=
net
(
t_uint8
,
t_int16
)
...
@@ -96,7 +102,7 @@ def test_tensor_auto_cast():
...
@@ -96,7 +102,7 @@ def test_tensor_auto_cast():
assert
rs
.
dtype
()
==
mstype
.
float64
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_fp32
,
t_fp64
)
rs
=
net
(
t_fp32
,
t_fp64
)
assert
rs
.
dtype
()
==
mstype
.
float64
assert
rs
.
dtype
()
==
mstype
.
float64
rs
=
net
(
t_uint8
,
t_fp16
)
rs
=
net
(
t_uint8
,
t_fp16
)
assert
rs
.
dtype
()
==
mstype
.
float16
assert
rs
.
dtype
()
==
mstype
.
float16
rs
=
net
(
t_uint8
,
t_fp32
)
rs
=
net
(
t_uint8
,
t_fp32
)
...
@@ -210,7 +216,6 @@ def test_tensor_auto_cast():
...
@@ -210,7 +216,6 @@ def test_tensor_auto_cast():
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
net
(
t_uint64
,
t_fp64
)
net
(
t_uint64
,
t_fp64
)
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
tfp
(
t_uint16
)
tfp
(
t_uint16
)
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
...
...
tests/st/ops/ascend/test_ops_infer.py
浏览文件 @
86d197df
...
@@ -21,6 +21,7 @@ import mindspore.common.dtype as mstype
...
@@ -21,6 +21,7 @@ import mindspore.common.dtype as mstype
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore.ops
import
operations
as
P
from
mindspore.ops
import
operations
as
P
from
mindspore
import
context
from
mindspore
import
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
save_graphs
=
True
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
save_graphs
=
True
)
...
@@ -29,14 +30,16 @@ def test_cast_op_attr():
...
@@ -29,14 +30,16 @@ def test_cast_op_attr():
def
__init__
(
self
):
def
__init__
(
self
):
super
(
CastNet
,
self
).
__init__
()
super
(
CastNet
,
self
).
__init__
()
self
.
cast
=
P
.
Cast
()
self
.
cast
=
P
.
Cast
()
def
construct
(
self
,
x
,
t
):
def
construct
(
self
,
x
,
t
):
return
self
.
cast
(
x
,
t
)
return
self
.
cast
(
x
,
t
)
class
CastTypeTest
(
nn
.
Cell
):
class
CastTypeTest
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
def
__init__
(
self
,
net
):
super
(
CastTypeTest
,
self
).
__init__
()
super
(
CastTypeTest
,
self
).
__init__
()
self
.
net
=
net
self
.
net
=
net
self
.
cast
=
P
.
Cast
()
self
.
cast
=
P
.
Cast
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
cast_op
=
self
.
cast
cast_op
=
self
.
cast
t1
=
cast_op
(
x
,
mstype
.
float32
)
t1
=
cast_op
(
x
,
mstype
.
float32
)
...
@@ -46,6 +49,7 @@ def test_cast_op_attr():
...
@@ -46,6 +49,7 @@ def test_cast_op_attr():
t4
=
cast_net
(
y
,
mstype
.
int32
)
t4
=
cast_net
(
y
,
mstype
.
int32
)
t5
=
cast_net
(
z
,
mstype
.
float16
)
t5
=
cast_net
(
z
,
mstype
.
float16
)
return
(
t1
,
t2
,
t3
,
t4
,
t5
)
return
(
t1
,
t2
,
t3
,
t4
,
t5
)
net
=
CastTypeTest
(
CastNet
())
net
=
CastTypeTest
(
CastNet
())
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
int32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
int32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
...
...
tests/st/ops/cpu/test_transpose_op.py
浏览文件 @
86d197df
...
@@ -142,4 +142,6 @@ def test_transpose():
...
@@ -142,4 +142,6 @@ def test_transpose():
assert
(
output
[
1
].
asnumpy
()
==
expect1
).
all
()
assert
(
output
[
1
].
asnumpy
()
==
expect1
).
all
()
assert
(
output
[
2
].
asnumpy
()
==
expect2
).
all
()
assert
(
output
[
2
].
asnumpy
()
==
expect2
).
all
()
assert
(
output
[
3
].
asnumpy
()
==
expect3
).
all
()
assert
(
output
[
3
].
asnumpy
()
==
expect3
).
all
()
test_transpose
()
\ No newline at end of file
test_transpose
()
tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py
浏览文件 @
86d197df
...
@@ -1043,6 +1043,7 @@ def test_print_tuple_wrapper(tag):
...
@@ -1043,6 +1043,7 @@ def test_print_tuple_wrapper(tag):
return
fns
[
tag
]
return
fns
[
tag
]
# pylint: disable=unnecessary-semicolon
def
test_constant_duplicate_mul
(
tag
):
def
test_constant_duplicate_mul
(
tag
):
fns
=
FnDict
()
fns
=
FnDict
()
Mul
=
Primitive
(
'Mul'
);
Mul
=
Primitive
(
'Mul'
);
...
...
tests/ut/python/dtype/test_dictionary.py
浏览文件 @
86d197df
...
@@ -152,7 +152,7 @@ def test_dict_set_item():
...
@@ -152,7 +152,7 @@ def test_dict_set_item():
x
=
Tensor
(
np
.
ones
([
2
,
2
,
3
],
np
.
float32
))
x
=
Tensor
(
np
.
ones
([
2
,
2
,
3
],
np
.
float32
))
net
=
DictSetNet
()
net
=
DictSetNet
()
out
=
net
(
x
)
_
=
net
(
x
)
# if the dictionary item does not exist, create a new one
# if the dictionary item does not exist, create a new one
...
@@ -168,4 +168,4 @@ def test_dict_set_item_create_new():
...
@@ -168,4 +168,4 @@ def test_dict_set_item_create_new():
return
my_dict
return
my_dict
x
=
Tensor
(
np
.
ones
([
2
,
2
,
3
],
np
.
float32
))
x
=
Tensor
(
np
.
ones
([
2
,
2
,
3
],
np
.
float32
))
net
=
DictSetNet
()
net
=
DictSetNet
()
out
=
net
(
x
)
_
=
net
(
x
)
tests/ut/python/dtype/test_hypermap.py
浏览文件 @
86d197df
...
@@ -81,31 +81,3 @@ def test_hypermap_func_const():
...
@@ -81,31 +81,3 @@ def test_hypermap_func_const():
net
=
NetMap
()
net
=
NetMap
()
assert
net
()
==
(
8
,
12
,
16
)
assert
net
()
==
(
8
,
12
,
16
)
"""
def test_hypermap_func_variable():
class NetMap(Cell):
def __init__(self):
super(NetMap, self).__init__()
def double(self, x):
return 2 * x
def triple(self, x):
return 3 * x
def square(self, x):
return x * x
def construct(self, x):
_list = [self.double, self.triple, self.square]
return map(lambda f: f(x), _list)
x = Tensor(np.ones([3, 2, 3], np.float32))
net = NetMap()
with pytest.raises(RuntimeError) as ex:
net(x)
assert "HyperMap don't support Closure with free variable yet" in str(ex.value)
"""
tests/ut/python/dtype/test_list.py
浏览文件 @
86d197df
...
@@ -133,7 +133,7 @@ def test_list_append_2():
...
@@ -133,7 +133,7 @@ def test_list_append_2():
class
ListOperate
(
nn
.
Cell
):
class
ListOperate
(
nn
.
Cell
):
def
__init__
(
self
,
):
def
__init__
(
self
,):
super
(
ListOperate
,
self
).
__init__
()
super
(
ListOperate
,
self
).
__init__
()
def
construct
(
self
,
t
,
l
):
def
construct
(
self
,
t
,
l
):
...
@@ -153,7 +153,7 @@ class ListOperate(nn.Cell):
...
@@ -153,7 +153,7 @@ class ListOperate(nn.Cell):
class
InListNet
(
nn
.
Cell
):
class
InListNet
(
nn
.
Cell
):
def
__init__
(
self
,
):
def
__init__
(
self
,):
super
(
InListNet
,
self
).
__init__
()
super
(
InListNet
,
self
).
__init__
()
self
.
list_
=
[
1
,
2
,
3
,
4
,
5
,
"ok"
]
self
.
list_
=
[
1
,
2
,
3
,
4
,
5
,
"ok"
]
...
...
tests/ut/python/dtype/test_tuple.py
浏览文件 @
86d197df
...
@@ -53,7 +53,7 @@ class NestTupleGraphNet(nn.Cell):
...
@@ -53,7 +53,7 @@ class NestTupleGraphNet(nn.Cell):
class
InTupleNet
(
nn
.
Cell
):
class
InTupleNet
(
nn
.
Cell
):
def
__init__
(
self
,
):
def
__init__
(
self
,):
super
(
InTupleNet
,
self
).
__init__
()
super
(
InTupleNet
,
self
).
__init__
()
self
.
tuple_
=
(
1
,
2
,
3
,
4
,
5
,
"ok"
)
self
.
tuple_
=
(
1
,
2
,
3
,
4
,
5
,
"ok"
)
...
...
tests/ut/python/exec/test_AssignAdd.py
浏览文件 @
86d197df
...
@@ -99,4 +99,4 @@ def test_assignadd_scalar_cast():
...
@@ -99,4 +99,4 @@ def test_assignadd_scalar_cast():
net
=
AssignAddNet
()
net
=
AssignAddNet
()
x
=
Tensor
(
np
.
ones
([
1
]).
astype
(
np
.
int64
)
*
102
)
x
=
Tensor
(
np
.
ones
([
1
]).
astype
(
np
.
int64
)
*
102
)
# _executor.compile(net, 1)
# _executor.compile(net, 1)
result
=
net
(
x
)
_
=
net
(
x
)
tests/ut/python/ir/test_tensor.py
浏览文件 @
86d197df
...
@@ -429,9 +429,9 @@ def test_tensor_dtype_np_int64():
...
@@ -429,9 +429,9 @@ def test_tensor_dtype_np_int64():
def
test_tensor_dtype_fp32_to_bool
():
def
test_tensor_dtype_fp32_to_bool
():
with
pytest
.
raises
(
RuntimeError
):
with
pytest
.
raises
(
RuntimeError
):
input
=
np
.
random
.
randn
(
2
,
3
,
4
,
5
).
astype
(
np
.
float32
)
input
_
=
np
.
random
.
randn
(
2
,
3
,
4
,
5
).
astype
(
np
.
float32
)
input
=
ms
.
Tensor
(
input
)
input
_
=
ms
.
Tensor
(
input_
)
input_me
=
ms
.
Tensor
(
input
,
dtype
=
ms
.
bool_
)
_
=
ms
.
Tensor
(
input_
,
dtype
=
ms
.
bool_
)
def
test_tensor_operation
():
def
test_tensor_operation
():
...
...
tests/ut/python/keep_order/test_keep_order.py
浏览文件 @
86d197df
...
@@ -41,10 +41,10 @@ class Func(nn.Cell):
...
@@ -41,10 +41,10 @@ class Func(nn.Cell):
def
construct
(
self
,
x
,
y
):
def
construct
(
self
,
x
,
y
):
init
=
self
.
alloc_status
()
init
=
self
.
alloc_status
()
sum
=
add
(
x
,
y
)
sum
_
=
add
(
x
,
y
)
product
=
mul1
(
x
,
y
)
product
=
mul1
(
x
,
y
)
flag
=
self
.
get_status
(
init
)
flag
=
self
.
get_status
(
init
)
out
=
add2
(
sum
,
product
)
out
=
add2
(
sum
_
,
product
)
clear
=
self
.
clear_status
(
flag
)
clear
=
self
.
clear_status
(
flag
)
out
=
F
.
depend
(
out
,
clear
)
out
=
F
.
depend
(
out
,
clear
)
return
out
return
out
...
@@ -88,7 +88,7 @@ def test_sens():
...
@@ -88,7 +88,7 @@ def test_sens():
sens
=
Tensor
(
np
.
ones
([
3
,
3
]).
astype
(
np
.
float32
))
sens
=
Tensor
(
np
.
ones
([
3
,
3
]).
astype
(
np
.
float32
))
net
=
Net
()
net
=
Net
()
net
.
add_flags
(
has_effect
=
True
)
net
.
add_flags
(
has_effect
=
True
)
out
=
net
(
x
,
y
,
sens
)
_
=
net
(
x
,
y
,
sens
)
class
Net_hyper
(
nn
.
Cell
):
class
Net_hyper
(
nn
.
Cell
):
...
@@ -119,7 +119,7 @@ def test_hyper_add():
...
@@ -119,7 +119,7 @@ def test_hyper_add():
sens
=
Tensor
(
np
.
ones
([
3
,
3
]).
astype
(
np
.
float32
))
sens
=
Tensor
(
np
.
ones
([
3
,
3
]).
astype
(
np
.
float32
))
net
=
Net_hyper
()
net
=
Net_hyper
()
net
.
add_flags
(
has_effect
=
True
)
net
.
add_flags
(
has_effect
=
True
)
out
=
net
(
x
,
y
,
sens
)
_
=
net
(
x
,
y
,
sens
)
def
test_keep_order_io_effect_exception_return_dtype
():
def
test_keep_order_io_effect_exception_return_dtype
():
...
...
tests/ut/python/model/test_mix_precision.py
浏览文件 @
86d197df
...
@@ -148,9 +148,6 @@ def test_cast():
...
@@ -148,9 +148,6 @@ def test_cast():
_executor
.
compile
(
net
,
x
)
_executor
.
compile
(
net
,
x
)
"""test grad of PReLU, which cause AddN(generated by grad) fail"""
class
IRBlockZ
(
nn
.
Cell
):
class
IRBlockZ
(
nn
.
Cell
):
def
__init__
(
self
,
inplanes
,
planes
):
def
__init__
(
self
,
inplanes
,
planes
):
super
(
IRBlockZ
,
self
).
__init__
()
super
(
IRBlockZ
,
self
).
__init__
()
...
...
tests/ut/python/nn/test_pooling.py
浏览文件 @
86d197df
...
@@ -46,6 +46,7 @@ class MaxNet(nn.Cell):
...
@@ -46,6 +46,7 @@ class MaxNet(nn.Cell):
kernel_size
,
kernel_size
,
stride
=
None
,
stride
=
None
,
padding
=
0
):
padding
=
0
):
_
=
padding
super
(
MaxNet
,
self
).
__init__
()
super
(
MaxNet
,
self
).
__init__
()
self
.
maxpool
=
nn
.
MaxPool2d
(
kernel_size
,
self
.
maxpool
=
nn
.
MaxPool2d
(
kernel_size
,
stride
)
stride
)
...
@@ -73,5 +74,5 @@ class Avg1dNet(nn.Cell):
...
@@ -73,5 +74,5 @@ class Avg1dNet(nn.Cell):
def
test_avg1d
():
def
test_avg1d
():
net
=
Avg1dNet
(
6
,
1
)
net
=
Avg1dNet
(
6
,
1
)
input
=
Tensor
(
np
.
random
.
randint
(
0
,
255
,
[
1
,
3
,
6
]).
astype
(
np
.
float32
))
input
_
=
Tensor
(
np
.
random
.
randint
(
0
,
255
,
[
1
,
3
,
6
]).
astype
(
np
.
float32
))
_executor
.
compile
(
net
,
input
)
_executor
.
compile
(
net
,
input
_
)
tests/ut/python/nn/test_psnr.py
浏览文件 @
86d197df
...
@@ -52,19 +52,19 @@ def test_compile_psnr_grayscale():
...
@@ -52,19 +52,19 @@ def test_compile_psnr_grayscale():
def
test_psnr_max_val_negative
():
def
test_psnr_max_val_negative
():
max_val
=
-
1
max_val
=
-
1
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
PSNRNet
(
max_val
)
_
=
PSNRNet
(
max_val
)
def
test_psnr_max_val_bool
():
def
test_psnr_max_val_bool
():
max_val
=
True
max_val
=
True
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
net
=
PSNRNet
(
max_val
)
_
=
PSNRNet
(
max_val
)
def
test_psnr_max_val_zero
():
def
test_psnr_max_val_zero
():
max_val
=
0
max_val
=
0
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
PSNRNet
(
max_val
)
_
=
PSNRNet
(
max_val
)
def
test_psnr_different_shape
():
def
test_psnr_different_shape
():
...
...
tests/ut/python/nn/test_ssim.py
浏览文件 @
86d197df
...
@@ -51,59 +51,59 @@ def test_compile_grayscale():
...
@@ -51,59 +51,59 @@ def test_compile_grayscale():
def
test_ssim_max_val_negative
():
def
test_ssim_max_val_negative
():
max_val
=
-
1
max_val
=
-
1
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
max_val
)
_
=
SSIMNet
(
max_val
)
def
test_ssim_max_val_bool
():
def
test_ssim_max_val_bool
():
max_val
=
True
max_val
=
True
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
net
=
SSIMNet
(
max_val
)
_
=
SSIMNet
(
max_val
)
def
test_ssim_max_val_zero
():
def
test_ssim_max_val_zero
():
max_val
=
0
max_val
=
0
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
max_val
)
_
=
SSIMNet
(
max_val
)
def
test_ssim_filter_size_float
():
def
test_ssim_filter_size_float
():
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
net
=
SSIMNet
(
filter_size
=
1.1
)
_
=
SSIMNet
(
filter_size
=
1.1
)
def
test_ssim_filter_size_zero
():
def
test_ssim_filter_size_zero
():
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
filter_size
=
0
)
_
=
SSIMNet
(
filter_size
=
0
)
def
test_ssim_filter_sigma_zero
():
def
test_ssim_filter_sigma_zero
():
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
filter_sigma
=
0.0
)
_
=
SSIMNet
(
filter_sigma
=
0.0
)
def
test_ssim_filter_sigma_negative
():
def
test_ssim_filter_sigma_negative
():
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
filter_sigma
=-
0.1
)
_
=
SSIMNet
(
filter_sigma
=-
0.1
)
def
test_ssim_k1_k2_wrong_value
():
def
test_ssim_k1_k2_wrong_value
():
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k1
=
1.1
)
_
=
SSIMNet
(
k1
=
1.1
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k1
=
1.0
)
_
=
SSIMNet
(
k1
=
1.0
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k1
=
0.0
)
_
=
SSIMNet
(
k1
=
0.0
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k1
=-
1.0
)
_
=
SSIMNet
(
k1
=-
1.0
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k2
=
1.1
)
_
=
SSIMNet
(
k2
=
1.1
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k2
=
1.0
)
_
=
SSIMNet
(
k2
=
1.0
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k2
=
0.0
)
_
=
SSIMNet
(
k2
=
0.0
)
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
net
=
SSIMNet
(
k2
=-
1.0
)
_
=
SSIMNet
(
k2
=-
1.0
)
def
test_ssim_different_shape
():
def
test_ssim_different_shape
():
...
...
tests/ut/python/onnx/test_onnx.py
浏览文件 @
86d197df
...
@@ -64,13 +64,13 @@ class BatchNormTester(nn.Cell):
...
@@ -64,13 +64,13 @@ class BatchNormTester(nn.Cell):
def
test_batchnorm_train_onnx_export
():
def
test_batchnorm_train_onnx_export
():
"test onnx export interface does not modify trainable flag of a network"
"test onnx export interface does not modify trainable flag of a network"
input
=
Tensor
(
np
.
ones
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
*
0.01
)
input
_
=
Tensor
(
np
.
ones
([
1
,
3
,
32
,
32
]).
astype
(
np
.
float32
)
*
0.01
)
net
=
BatchNormTester
(
3
)
net
=
BatchNormTester
(
3
)
net
.
set_train
()
net
.
set_train
()
if
not
net
.
training
:
if
not
net
.
training
:
raise
ValueError
(
'netowrk is not in training mode'
)
raise
ValueError
(
'netowrk is not in training mode'
)
onnx_file
=
'batch_norm.onnx'
onnx_file
=
'batch_norm.onnx'
export
(
net
,
input
,
file_name
=
onnx_file
,
file_format
=
'ONNX'
)
export
(
net
,
input
_
,
file_name
=
onnx_file
,
file_format
=
'ONNX'
)
if
not
net
.
training
:
if
not
net
.
training
:
raise
ValueError
(
'netowrk is not in training mode'
)
raise
ValueError
(
'netowrk is not in training mode'
)
...
@@ -172,6 +172,7 @@ net_cfgs = [
...
@@ -172,6 +172,7 @@ net_cfgs = [
def
get_id
(
cfg
):
def
get_id
(
cfg
):
_
=
cfg
return
list
(
map
(
lambda
x
:
x
[
0
],
net_cfgs
))
return
list
(
map
(
lambda
x
:
x
[
0
],
net_cfgs
))
...
...
tests/ut/python/ops/test_math_ops_check.py
浏览文件 @
86d197df
...
@@ -28,7 +28,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \
...
@@ -28,7 +28,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \
class
AssignAddNet
(
nn
.
Cell
):
class
AssignAddNet
(
nn
.
Cell
):
def
__init__
(
self
,):
def
__init__
(
self
,
):
super
(
AssignAddNet
,
self
).
__init__
()
super
(
AssignAddNet
,
self
).
__init__
()
self
.
op
=
P
.
AssignAdd
()
self
.
op
=
P
.
AssignAdd
()
self
.
inputdata
=
Parameter
(
Tensor
(
np
.
zeros
([
1
]).
astype
(
np
.
bool_
),
mstype
.
bool_
),
name
=
"assign_add1"
)
self
.
inputdata
=
Parameter
(
Tensor
(
np
.
zeros
([
1
]).
astype
(
np
.
bool_
),
mstype
.
bool_
),
name
=
"assign_add1"
)
...
@@ -39,7 +39,7 @@ class AssignAddNet(nn.Cell):
...
@@ -39,7 +39,7 @@ class AssignAddNet(nn.Cell):
class
AssignSubNet
(
nn
.
Cell
):
class
AssignSubNet
(
nn
.
Cell
):
def
__init__
(
self
,):
def
__init__
(
self
,
):
super
(
AssignSubNet
,
self
).
__init__
()
super
(
AssignSubNet
,
self
).
__init__
()
self
.
op
=
P
.
AssignSub
()
self
.
op
=
P
.
AssignSub
()
self
.
inputdata
=
Parameter
(
Tensor
(
np
.
zeros
([
1
]).
astype
(
np
.
bool_
),
mstype
.
bool_
),
name
=
"assign_sub1"
)
self
.
inputdata
=
Parameter
(
Tensor
(
np
.
zeros
([
1
]).
astype
(
np
.
bool_
),
mstype
.
bool_
),
name
=
"assign_sub1"
)
...
@@ -635,7 +635,7 @@ test_case_math_ops = [
...
@@ -635,7 +635,7 @@ test_case_math_ops = [
'skip'
:
[
'backward'
]}),
'skip'
:
[
'backward'
]}),
# type of x and y not match
# type of x and y not match
(
'Greater1'
,
{
(
'Greater1'
,
{
'block'
:
P
.
Greater
(),
'block'
:
P
.
Greater
(),
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'desc_inputs'
:
[
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
int32
)),
Tensor
(
np
.
ones
([
3
,
4
]).
astype
(
np
.
float32
))],
'skip'
:
[
'backward'
]}),
'skip'
:
[
'backward'
]}),
# type of x and y not match
# type of x and y not match
...
@@ -660,6 +660,7 @@ test_case_math_ops = [
...
@@ -660,6 +660,7 @@ test_case_math_ops = [
'skip'
:
[
'backward'
]}),
'skip'
:
[
'backward'
]}),
]
]
@
mindspore_test
(
pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception
)
@
mindspore_test
(
pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception
)
def
test_check_exception
():
def
test_check_exception
():
return
raise_set
return
raise_set
...
...
tests/ut/python/ops/test_ops_attr_infer.py
浏览文件 @
86d197df
...
@@ -21,21 +21,25 @@ import mindspore.context as context
...
@@ -21,21 +21,25 @@ import mindspore.context as context
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore.ops
import
functional
as
F
from
mindspore.ops
import
functional
as
F
from
mindspore.ops
import
prim_attr_register
,
PrimitiveWithInfer
from
mindspore.ops
import
prim_attr_register
,
PrimitiveWithInfer
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
save_graphs
=
True
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
save_graphs
=
True
)
class
FakeOp
(
PrimitiveWithInfer
):
class
FakeOp
(
PrimitiveWithInfer
):
@
prim_attr_register
@
prim_attr_register
def
__init__
(
self
):
def
__init__
(
self
):
""""""
""""""
def
infer_shape
(
self
,
x
,
y
):
def
infer_shape
(
self
,
x
,
y
):
self
.
second_shape
=
y
self
.
second_shape
=
y
self
.
add_prim_attr
(
"second_shape"
,
y
)
self
.
add_prim_attr
(
"second_shape"
,
y
)
return
x
return
x
def
infer_dtype
(
self
,
x
,
y
):
def
infer_dtype
(
self
,
x
,
y
):
return
x
return
x
# test the normal case that should generate independent primitive because of different
# test the normal case that should generate independent primitive because of different
# generated attributes after inference
# generated attributes after inference
def
test_conv2d_same_primitive
():
def
test_conv2d_same_primitive
():
class
Conv2DSameNet
(
nn
.
Cell
):
class
Conv2DSameNet
(
nn
.
Cell
):
...
@@ -43,15 +47,18 @@ def test_conv2d_same_primitive():
...
@@ -43,15 +47,18 @@ def test_conv2d_same_primitive():
super
(
Conv2DSameNet
,
self
).
__init__
()
super
(
Conv2DSameNet
,
self
).
__init__
()
self
.
conv1
=
nn
.
Conv2d
(
16
,
64
,
(
1
,
41
),
(
1
,
4
),
"same"
,
0
,
1
,
has_bias
=
True
)
self
.
conv1
=
nn
.
Conv2d
(
16
,
64
,
(
1
,
41
),
(
1
,
4
),
"same"
,
0
,
1
,
has_bias
=
True
)
self
.
conv2
=
nn
.
Conv2d
(
16
,
64
,
(
1
,
41
),
(
1
,
4
),
"same"
,
0
,
1
,
has_bias
=
True
)
self
.
conv2
=
nn
.
Conv2d
(
16
,
64
,
(
1
,
41
),
(
1
,
4
),
"same"
,
0
,
1
,
has_bias
=
True
)
def
construct
(
self
,
x
,
y
):
def
construct
(
self
,
x
,
y
):
r1
=
self
.
conv1
(
x
)
r1
=
self
.
conv1
(
x
)
r2
=
self
.
conv2
(
y
)
r2
=
self
.
conv2
(
y
)
return
(
r1
,
r2
)
return
(
r1
,
r2
)
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
net
=
Conv2DSameNet
()
net
=
Conv2DSameNet
()
net
(
t1
,
t2
)
net
(
t1
,
t2
)
# test cell as high order argument
# test cell as high order argument
# The graph with free variables used as argument is not supported yet
# The graph with free variables used as argument is not supported yet
# because of the limit of inference specialize system
# because of the limit of inference specialize system
...
@@ -59,18 +66,22 @@ def Xtest_conv2d_op_with_arg():
...
@@ -59,18 +66,22 @@ def Xtest_conv2d_op_with_arg():
class
Conv2dNet
(
nn
.
Cell
):
class
Conv2dNet
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Conv2dNet
,
self
).
__init__
()
super
(
Conv2dNet
,
self
).
__init__
()
def
construct
(
self
,
op
,
x
):
def
construct
(
self
,
op
,
x
):
return
op
(
x
)
return
op
(
x
)
class
OpsNet
(
nn
.
Cell
):
class
OpsNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
def
__init__
(
self
,
net
):
super
(
OpsNet
,
self
).
__init__
()
super
(
OpsNet
,
self
).
__init__
()
self
.
opnet
=
net
self
.
opnet
=
net
self
.
conv2
=
nn
.
Conv2d
(
16
,
64
,
(
1
,
41
),
(
1
,
4
),
"same"
,
0
,
1
,
has_bias
=
True
)
self
.
conv2
=
nn
.
Conv2d
(
16
,
64
,
(
1
,
41
),
(
1
,
4
),
"same"
,
0
,
1
,
has_bias
=
True
)
def
construct
(
self
,
x
,
y
):
def
construct
(
self
,
x
,
y
):
conv_op
=
self
.
conv2
conv_op
=
self
.
conv2
a
=
self
.
opnet
(
conv_op
,
x
)
a
=
self
.
opnet
(
conv_op
,
x
)
b
=
self
.
opnet
(
conv_op
,
y
)
b
=
self
.
opnet
(
conv_op
,
y
)
return
(
a
,
b
)
return
(
a
,
b
)
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
net
=
OpsNet
(
Conv2dNet
())
net
=
OpsNet
(
Conv2dNet
())
...
@@ -82,23 +93,29 @@ def test_conv2d_op_with_arg():
...
@@ -82,23 +93,29 @@ def test_conv2d_op_with_arg():
def
__init__
(
self
):
def
__init__
(
self
):
super
(
FackOpNet
,
self
).
__init__
()
super
(
FackOpNet
,
self
).
__init__
()
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
):
def
construct
(
self
,
x
,
y
):
return
self
.
op
(
x
,
y
)
return
self
.
op
(
x
,
y
)
class
OpNet
(
nn
.
Cell
):
class
OpNet
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
OpNet
,
self
).
__init__
()
super
(
OpNet
,
self
).
__init__
()
def
construct
(
self
,
op
,
x
,
y
):
def
construct
(
self
,
op
,
x
,
y
):
return
op
(
x
,
y
)
return
op
(
x
,
y
)
class
OpsNet
(
nn
.
Cell
):
class
OpsNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
def
__init__
(
self
,
net
):
super
(
OpsNet
,
self
).
__init__
()
super
(
OpsNet
,
self
).
__init__
()
self
.
opnet
=
net
self
.
opnet
=
net
self
.
op
=
FackOpNet
()
self
.
op
=
FackOpNet
()
def
construct
(
self
,
x
,
y
):
def
construct
(
self
,
x
,
y
):
op
=
self
.
op
op
=
self
.
op
a
=
self
.
opnet
(
op
,
x
,
y
)
a
=
self
.
opnet
(
op
,
x
,
y
)
b
=
self
.
opnet
(
op
,
y
,
x
)
b
=
self
.
opnet
(
op
,
y
,
x
)
return
(
a
,
b
)
return
(
a
,
b
)
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
net
=
OpsNet
(
OpNet
())
net
=
OpsNet
(
OpNet
())
...
@@ -110,63 +127,77 @@ def test_conv2d_op_with_arg_same_input():
...
@@ -110,63 +127,77 @@ def test_conv2d_op_with_arg_same_input():
def
__init__
(
self
):
def
__init__
(
self
):
super
(
FackOpNet
,
self
).
__init__
()
super
(
FackOpNet
,
self
).
__init__
()
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
):
def
construct
(
self
,
x
,
y
):
return
self
.
op
(
x
,
y
)
return
self
.
op
(
x
,
y
)
class
OpNet
(
nn
.
Cell
):
class
OpNet
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
OpNet
,
self
).
__init__
()
super
(
OpNet
,
self
).
__init__
()
def
construct
(
self
,
op
,
x
,
y
):
def
construct
(
self
,
op
,
x
,
y
):
return
op
(
x
,
y
)
return
op
(
x
,
y
)
class
OpsNet
(
nn
.
Cell
):
class
OpsNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
def
__init__
(
self
,
net
):
super
(
OpsNet
,
self
).
__init__
()
super
(
OpsNet
,
self
).
__init__
()
self
.
opnet
=
net
self
.
opnet
=
net
self
.
op
=
FackOpNet
()
self
.
op
=
FackOpNet
()
def
construct
(
self
,
x
,
y
):
def
construct
(
self
,
x
,
y
):
op
=
self
.
op
op
=
self
.
op
a
=
self
.
opnet
(
op
,
x
,
x
)
a
=
self
.
opnet
(
op
,
x
,
x
)
b
=
self
.
opnet
(
op
,
y
,
x
)
b
=
self
.
opnet
(
op
,
y
,
x
)
return
(
a
,
b
)
return
(
a
,
b
)
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
net
=
OpsNet
(
OpNet
())
net
=
OpsNet
(
OpNet
())
net
(
t1
,
t2
)
net
(
t1
,
t2
)
# test op with partial
# test op with partial
def
test_op_as_partial
():
def
test_op_as_partial
():
class
OpAsPartial
(
nn
.
Cell
):
class
OpAsPartial
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
OpAsPartial
,
self
).
__init__
()
super
(
OpAsPartial
,
self
).
__init__
()
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
partial_op
=
F
.
partial
(
self
.
op
,
x
)
partial_op
=
F
.
partial
(
self
.
op
,
x
)
a
=
partial_op
(
y
)
a
=
partial_op
(
y
)
b
=
partial_op
(
z
)
b
=
partial_op
(
z
)
return
a
,
b
return
a
,
b
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
net
=
OpAsPartial
()
net
=
OpAsPartial
()
net
(
t1
,
t2
,
t3
)
net
(
t1
,
t2
,
t3
)
# test op with partial
# test op with partial
def
test_op_as_partial_inside
():
def
test_op_as_partial_inside
():
class
OpAsPartial
(
nn
.
Cell
):
class
OpAsPartial
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
OpAsPartial
,
self
).
__init__
()
super
(
OpAsPartial
,
self
).
__init__
()
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
partial_op
=
F
.
partial
(
self
.
op
,
x
)
partial_op
=
F
.
partial
(
self
.
op
,
x
)
a
=
partial_op
(
y
)
a
=
partial_op
(
y
)
b
=
partial_op
(
z
)
b
=
partial_op
(
z
)
return
a
,
b
return
a
,
b
class
OuterNet
(
nn
.
Cell
):
class
OuterNet
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
OuterNet
,
self
).
__init__
()
super
(
OuterNet
,
self
).
__init__
()
self
.
net
=
OpAsPartial
()
self
.
net
=
OpAsPartial
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
a
,
b
=
self
.
net
(
x
,
y
,
z
)
a
,
b
=
self
.
net
(
x
,
y
,
z
)
return
a
,
b
return
a
,
b
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
...
@@ -180,12 +211,14 @@ def test_op_as_partial_independent():
...
@@ -180,12 +211,14 @@ def test_op_as_partial_independent():
def
__init__
(
self
):
def
__init__
(
self
):
super
(
OpAsPartial
,
self
).
__init__
()
super
(
OpAsPartial
,
self
).
__init__
()
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
partial_op1
=
F
.
partial
(
self
.
op
,
x
)
partial_op1
=
F
.
partial
(
self
.
op
,
x
)
a
=
partial_op1
(
y
)
a
=
partial_op1
(
y
)
partial_op2
=
F
.
partial
(
self
.
op
,
x
)
partial_op2
=
F
.
partial
(
self
.
op
,
x
)
b
=
partial_op2
(
z
)
b
=
partial_op2
(
z
)
return
a
,
b
return
a
,
b
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
...
@@ -198,6 +231,7 @@ def test_nest_partial():
...
@@ -198,6 +231,7 @@ def test_nest_partial():
def
__init__
(
self
):
def
__init__
(
self
):
super
(
NestPartial
,
self
).
__init__
()
super
(
NestPartial
,
self
).
__init__
()
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
partial_op1
=
F
.
partial
(
self
.
op
)
partial_op1
=
F
.
partial
(
self
.
op
)
partial_op2
=
F
.
partial
(
partial_op1
,
x
)
partial_op2
=
F
.
partial
(
partial_op1
,
x
)
...
@@ -206,54 +240,65 @@ def test_nest_partial():
...
@@ -206,54 +240,65 @@ def test_nest_partial():
partial_op4
=
F
.
partial
(
partial_op3
,
x
)
partial_op4
=
F
.
partial
(
partial_op3
,
x
)
b
=
partial_op4
(
z
)
b
=
partial_op4
(
z
)
return
a
,
b
return
a
,
b
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
net
=
NestPartial
()
net
=
NestPartial
()
net
(
t1
,
t2
,
t3
)
net
(
t1
,
t2
,
t3
)
# high order argument
# high order argument
# op and op args as network arguments
# op and op args as network arguments
def
test_op_with_arg_as_input
():
def
test_op_with_arg_as_input
():
class
WithOpArgNet
(
nn
.
Cell
):
class
WithOpArgNet
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
WithOpArgNet
,
self
).
__init__
()
super
(
WithOpArgNet
,
self
).
__init__
()
def
construct
(
self
,
op
,
x
,
y
):
def
construct
(
self
,
op
,
x
,
y
):
return
op
(
x
,
y
)
return
op
(
x
,
y
)
class
OpsNet
(
nn
.
Cell
):
class
OpsNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
def
__init__
(
self
,
net
):
super
(
OpsNet
,
self
).
__init__
()
super
(
OpsNet
,
self
).
__init__
()
self
.
opnet
=
net
self
.
opnet
=
net
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
op
=
self
.
op
op
=
self
.
op
a
=
self
.
opnet
(
op
,
x
,
z
)
a
=
self
.
opnet
(
op
,
x
,
z
)
b
=
self
.
opnet
(
op
,
x
,
y
)
b
=
self
.
opnet
(
op
,
x
,
y
)
return
(
a
,
b
)
return
(
a
,
b
)
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
net
=
OpsNet
(
WithOpArgNet
())
net
=
OpsNet
(
WithOpArgNet
())
net
(
t1
,
t2
,
t3
)
net
(
t1
,
t2
,
t3
)
# The partial application used as argument is not supported yet
# The partial application used as argument is not supported yet
# because of the limit of inference specialize system
# because of the limit of inference specialize system
def
Xtest_partial_as_arg
():
def
Xtest_partial_as_arg
():
class
PartialArgNet
(
nn
.
Cell
):
class
PartialArgNet
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
PartialArgNet
,
self
).
__init__
()
super
(
PartialArgNet
,
self
).
__init__
()
def
construct
(
self
,
partial_op
,
y
):
def
construct
(
self
,
partial_op
,
y
):
return
partial_op
(
y
)
return
partial_op
(
y
)
class
OpsNet
(
nn
.
Cell
):
class
OpsNet
(
nn
.
Cell
):
def
__init__
(
self
,
net
):
def
__init__
(
self
,
net
):
super
(
OpsNet
,
self
).
__init__
()
super
(
OpsNet
,
self
).
__init__
()
self
.
partial_net
=
net
self
.
partial_net
=
net
self
.
op
=
FakeOp
()
self
.
op
=
FakeOp
()
def
construct
(
self
,
x
,
y
,
z
):
def
construct
(
self
,
x
,
y
,
z
):
partial_op
=
F
.
partial
(
self
.
op
,
x
)
partial_op
=
F
.
partial
(
self
.
op
,
x
)
a
=
self
.
partial_net
(
partial_op
,
z
)
a
=
self
.
partial_net
(
partial_op
,
z
)
b
=
self
.
partial_net
(
partial_op
,
y
)
b
=
self
.
partial_net
(
partial_op
,
y
)
return
(
a
,
b
)
return
(
a
,
b
)
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t1
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1918
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t2
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
3840
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
t3
=
Tensor
(
np
.
ones
([
1
,
16
,
1
,
1234
]).
astype
(
np
.
float32
))
...
...
tests/ut/python/ops/test_ops_check.py
浏览文件 @
86d197df
...
@@ -32,6 +32,7 @@ from ....mindspore_test_framework.pipeline.forward.verify_exception \
...
@@ -32,6 +32,7 @@ from ....mindspore_test_framework.pipeline.forward.verify_exception \
logging
.
basicConfig
(
level
=
logging
.
WARNING
)
logging
.
basicConfig
(
level
=
logging
.
WARNING
)
# pylint: disable=abstract-method
class
NetMissConstruct
(
nn
.
Cell
):
class
NetMissConstruct
(
nn
.
Cell
):
""" NetMissConstruct definition """
""" NetMissConstruct definition """
...
@@ -46,7 +47,6 @@ class NetMissConstruct(nn.Cell):
...
@@ -46,7 +47,6 @@ class NetMissConstruct(nn.Cell):
self
.
max_pool2d
=
nn
.
MaxPool2d
(
kernel_size
=
2
)
self
.
max_pool2d
=
nn
.
MaxPool2d
(
kernel_size
=
2
)
self
.
flatten
=
P
.
Flatten
()
self
.
flatten
=
P
.
Flatten
()
# pylint: disable=abstract-method
# TestCase: Mis-spelled 'construct' to 'construtc'
# TestCase: Mis-spelled 'construct' to 'construtc'
def
construtc
(
self
,
x
):
def
construtc
(
self
,
x
):
x
=
self
.
max_pool2d
(
self
.
relu
(
self
.
conv1
(
x
)))
x
=
self
.
max_pool2d
(
self
.
relu
(
self
.
conv1
(
x
)))
...
...
tests/ut/python/optimizer/test_debug_location.py
浏览文件 @
86d197df
...
@@ -44,7 +44,7 @@ class MockNeg(PrimitiveWithInfer):
...
@@ -44,7 +44,7 @@ class MockNeg(PrimitiveWithInfer):
def
infer_dtype
(
self
,
input_x
):
def
infer_dtype
(
self
,
input_x
):
raise
TypeError
(
"InferError"
)
raise
TypeError
(
"InferError"
)
return
input_x
#
return input_x
class
MockSub
(
PrimitiveWithInfer
):
class
MockSub
(
PrimitiveWithInfer
):
...
@@ -79,8 +79,8 @@ class Net(nn.Cell):
...
@@ -79,8 +79,8 @@ class Net(nn.Cell):
self
.
matmul
=
P
.
MatMul
()
self
.
matmul
=
P
.
MatMul
()
self
.
add
=
P
.
TensorAdd
()
self
.
add
=
P
.
TensorAdd
()
def
construct
(
self
,
input
):
def
construct
(
self
,
input
_
):
output
=
self
.
add
(
self
.
matmul
(
input
,
self
.
weight
),
self
.
bias
)
output
=
self
.
add
(
self
.
matmul
(
input
_
,
self
.
weight
),
self
.
bias
)
return
output
return
output
...
@@ -93,9 +93,9 @@ class NetFP16(nn.Cell):
...
@@ -93,9 +93,9 @@ class NetFP16(nn.Cell):
self
.
add
=
P
.
TensorAdd
()
self
.
add
=
P
.
TensorAdd
()
self
.
cast
=
P
.
Cast
()
self
.
cast
=
P
.
Cast
()
def
construct
(
self
,
input
):
def
construct
(
self
,
input
_
):
output
=
self
.
cast
(
output
=
self
.
cast
(
self
.
add
(
self
.
matmul
(
self
.
cast
(
input
,
mstype
.
float16
),
self
.
cast
(
self
.
weight
,
mstype
.
float16
)),
self
.
add
(
self
.
matmul
(
self
.
cast
(
input
_
,
mstype
.
float16
),
self
.
cast
(
self
.
weight
,
mstype
.
float16
)),
self
.
cast
(
self
.
bias
,
mstype
.
float16
)),
mstype
.
float32
)
self
.
cast
(
self
.
bias
,
mstype
.
float16
)),
mstype
.
float32
)
return
output
return
output
...
...
tests/ut/python/optimizer/test_optimize_with_loss_scale.py
浏览文件 @
86d197df
...
@@ -42,10 +42,10 @@ class MindDataSet(MindData):
...
@@ -42,10 +42,10 @@ class MindDataSet(MindData):
if
self
.
_size
<
self
.
_iter_num
:
if
self
.
_size
<
self
.
_iter_num
:
raise
StopIteration
raise
StopIteration
self
.
_iter_num
+=
1
self
.
_iter_num
+=
1
nex
t
=
[]
ls
t
=
[]
for
shape
,
type
in
zip
(
self
.
_output_shapes
,
self
.
_np_types
):
for
shape
_
,
type_
in
zip
(
self
.
_output_shapes
,
self
.
_np_types
):
next
.
append
(
Tensor
(
np
.
ones
(
shape
).
astype
(
type
)))
lst
.
append
(
Tensor
(
np
.
ones
(
shape_
).
astype
(
type_
)))
return
tuple
(
nex
t
)
return
tuple
(
ls
t
)
class
Net
(
nn
.
Cell
):
class
Net
(
nn
.
Cell
):
...
@@ -56,8 +56,8 @@ class Net(nn.Cell):
...
@@ -56,8 +56,8 @@ class Net(nn.Cell):
self
.
matmul
=
P
.
MatMul
()
self
.
matmul
=
P
.
MatMul
()
self
.
add
=
P
.
TensorAdd
()
self
.
add
=
P
.
TensorAdd
()
def
construct
(
self
,
input
):
def
construct
(
self
,
input
_
):
output
=
self
.
add
(
self
.
matmul
(
input
,
self
.
weight
),
self
.
bias
)
output
=
self
.
add
(
self
.
matmul
(
input
_
,
self
.
weight
),
self
.
bias
)
return
output
return
output
...
@@ -70,9 +70,9 @@ class NetFP16(nn.Cell):
...
@@ -70,9 +70,9 @@ class NetFP16(nn.Cell):
self
.
add
=
P
.
TensorAdd
()
self
.
add
=
P
.
TensorAdd
()
self
.
cast
=
P
.
Cast
()
self
.
cast
=
P
.
Cast
()
def
construct
(
self
,
input
):
def
construct
(
self
,
input
_
):
output
=
self
.
cast
(
output
=
self
.
cast
(
self
.
add
(
self
.
matmul
(
self
.
cast
(
input
,
mstype
.
float16
),
self
.
cast
(
self
.
weight
,
mstype
.
float16
)),
self
.
add
(
self
.
matmul
(
self
.
cast
(
input
_
,
mstype
.
float16
),
self
.
cast
(
self
.
weight
,
mstype
.
float16
)),
self
.
cast
(
self
.
bias
,
mstype
.
float16
)),
mstype
.
float32
)
self
.
cast
(
self
.
bias
,
mstype
.
float16
)),
mstype
.
float32
)
return
output
return
output
...
...
tests/ut/python/parallel/test_alltoall.py
浏览文件 @
86d197df
...
@@ -97,8 +97,8 @@ def test_all_to_all():
...
@@ -97,8 +97,8 @@ def test_all_to_all():
print
(
strategys
)
print
(
strategys
)
expect_dict
=
{
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits'
expect_dict
=
{
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits'
'/SoftmaxCrossEntropyWithLogits-op3'
:
[[
8
,
1
],
[
8
,
1
]],
'/SoftmaxCrossEntropyWithLogits-op3'
:
[[
8
,
1
],
[
8
,
1
]],
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/
OneHot-op4'
:
[
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/
'
[
8
,
1
],
[],
[]],
'OneHot-op4'
:
[
[
8
,
1
],
[],
[]],
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1'
:
[
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1'
:
[
[
8
,
1
]],
[
8
,
1
]],
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0'
:
[
'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0'
:
[
...
...
tests/ut/python/parallel/test_two_matmul.py
浏览文件 @
86d197df
...
@@ -170,4 +170,4 @@ def test_matmul_forward_reduce_scatter_transpose():
...
@@ -170,4 +170,4 @@ def test_matmul_forward_reduce_scatter_transpose():
x
=
Tensor
(
np
.
ones
([
128
,
32
]),
dtype
=
ms
.
float32
)
x
=
Tensor
(
np
.
ones
([
128
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
y
=
Tensor
(
np
.
ones
([
64
,
32
]),
dtype
=
ms
.
float32
)
b
=
Tensor
(
np
.
ones
([
128
,
64
]),
dtype
=
ms
.
float32
)
b
=
Tensor
(
np
.
ones
([
128
,
64
]),
dtype
=
ms
.
float32
)
compile_net
(
net
,
x
,
y
,
b
)
compile_net
(
net
,
x
,
y
,
b
)
\ No newline at end of file
tests/ut/python/parameter_feature/test_parameter.py
浏览文件 @
86d197df
...
@@ -280,4 +280,4 @@ def test_mixed_precision_const_parameter():
...
@@ -280,4 +280,4 @@ def test_mixed_precision_const_parameter():
x
=
Tensor
(
np
.
ones
((
1
,
3
,
28
,
28
),
np
.
float32
))
x
=
Tensor
(
np
.
ones
((
1
,
3
,
28
,
28
),
np
.
float32
))
y
=
Tensor
(
np
.
ones
((
1
,
3
,
14
,
14
),
np
.
float32
))
y
=
Tensor
(
np
.
ones
((
1
,
3
,
14
,
14
),
np
.
float32
))
z
=
Tensor
(
np
.
ones
((
1
,
3
,
28
,
28
),
np
.
float32
))
z
=
Tensor
(
np
.
ones
((
1
,
3
,
28
,
28
),
np
.
float32
))
out
=
net
(
x
,
y
,
z
)
_
=
net
(
x
,
y
,
z
)
tests/ut/python/parameter_feature/test_var_grad.py
浏览文件 @
86d197df
...
@@ -39,7 +39,7 @@ def test_net_vargs_expand():
...
@@ -39,7 +39,7 @@ def test_net_vargs_expand():
y
=
Tensor
(
np
.
random
.
normal
(
0
,
1
,
[
3
,
4
,
5
]).
astype
(
np
.
float32
))
y
=
Tensor
(
np
.
random
.
normal
(
0
,
1
,
[
3
,
4
,
5
]).
astype
(
np
.
float32
))
sens
=
Tensor
(
np
.
random
.
normal
(
0
,
1
,
[
3
,
4
,
5
]).
astype
(
np
.
float32
))
sens
=
Tensor
(
np
.
random
.
normal
(
0
,
1
,
[
3
,
4
,
5
]).
astype
(
np
.
float32
))
net
=
AddNet
()
net
=
AddNet
()
out
=
C
.
grad_all_with_sens
(
net
,
net
.
trainable_params
())(
x
,
y
,
sens
)
_
=
C
.
grad_all_with_sens
(
net
,
net
.
trainable_params
())(
x
,
y
,
sens
)
class
VarNet
(
Cell
):
class
VarNet
(
Cell
):
...
@@ -111,7 +111,7 @@ def test_all_var_args_grad_with_sens():
...
@@ -111,7 +111,7 @@ def test_all_var_args_grad_with_sens():
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
,
sens
)
_
=
grad_net
(
x
,
y
,
sens
)
def
test_grad_list_var_args
():
def
test_grad_list_var_args
():
...
@@ -128,7 +128,7 @@ def test_grad_list_var_args():
...
@@ -128,7 +128,7 @@ def test_grad_list_var_args():
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
)
_
=
grad_net
(
x
,
y
)
def
test_grad_all_var_args
():
def
test_grad_all_var_args
():
...
@@ -145,7 +145,7 @@ def test_grad_all_var_args():
...
@@ -145,7 +145,7 @@ def test_grad_all_var_args():
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
)
_
=
grad_net
(
x
,
y
)
def
test_grad_all_var_args_with_sens
():
def
test_grad_all_var_args_with_sens
():
...
@@ -163,7 +163,7 @@ def test_grad_all_var_args_with_sens():
...
@@ -163,7 +163,7 @@ def test_grad_all_var_args_with_sens():
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
,
sens
)
_
=
grad_net
(
x
,
y
,
sens
)
def
test_grad_var_args_with_sens
():
def
test_grad_var_args_with_sens
():
...
@@ -181,7 +181,7 @@ def test_grad_var_args_with_sens():
...
@@ -181,7 +181,7 @@ def test_grad_var_args_with_sens():
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
,
sens
)
_
=
grad_net
(
x
,
y
,
sens
)
def
test_var_args_grad
():
def
test_var_args_grad
():
...
@@ -219,7 +219,7 @@ def test_var_args_grad():
...
@@ -219,7 +219,7 @@ def test_var_args_grad():
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
,
sens
)
_
=
grad_net
(
x
,
y
,
sens
)
def
test_var_args_positional
():
def
test_var_args_positional
():
...
@@ -253,7 +253,7 @@ def test_var_args_positional():
...
@@ -253,7 +253,7 @@ def test_var_args_positional():
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
)
_
=
grad_net
(
x
,
y
)
def
test_grad_within_if_else
():
def
test_grad_within_if_else
():
...
@@ -271,7 +271,7 @@ def test_grad_within_if_else():
...
@@ -271,7 +271,7 @@ def test_grad_within_if_else():
x
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
x
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
y
=
Tensor
(
np
.
ones
([
3
,
4
,
5
]),
dtype
=
mstype
.
float32
)
sens
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
_
=
Tensor
(
1.0
,
dtype
=
mstype
.
float32
)
net
=
VarNet
(
SecondNet
())
net
=
VarNet
(
SecondNet
())
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
out
=
grad_net
(
x
,
y
)
out
=
grad_net
(
x
,
y
)
...
@@ -316,10 +316,10 @@ def test_grad_for_concat():
...
@@ -316,10 +316,10 @@ def test_grad_for_concat():
net
=
Concat
(
axis
=
self
.
axis
)
net
=
Concat
(
axis
=
self
.
axis
)
grad_net
=
GradNet
(
net
)
grad_net
=
GradNet
(
net
)
grad_net
.
set_train
()
grad_net
.
set_train
()
input_grad
=
grad_net
(
*
inputs
,
Tensor
(
self
.
out_grad_np
))
_
=
grad_net
(
*
inputs
,
Tensor
(
self
.
out_grad_np
))
def
grad_cmp
(
self
):
def
grad_cmp
(
self
):
input_grad_mindspore
=
self
.
grad_mindspore_impl
()
self
.
grad_mindspore_impl
()
fact
=
ConcatFactory
(
input_shape
=
(
fact
=
ConcatFactory
(
input_shape
=
(
(
2
,
184320
,
1
),
(
2
,
46080
,
1
),
(
2
,
11520
,
1
),
(
2
,
2880
,
1
),
(
2
,
720
,
1
)),
axis
=
1
)
(
2
,
184320
,
1
),
(
2
,
46080
,
1
),
(
2
,
11520
,
1
),
(
2
,
2880
,
1
),
(
2
,
720
,
1
)),
axis
=
1
)
...
...
tests/ut/python/pipeline/parse/test_cont_break.py
浏览文件 @
86d197df
...
@@ -84,7 +84,7 @@ class for_loop_with_cont_break(Cell):
...
@@ -84,7 +84,7 @@ class for_loop_with_cont_break(Cell):
if
i
>
5
:
if
i
>
5
:
x
*=
3
x
*=
3
break
break
x
*=
2
#
x *= 2
x
=
x
*
2
x
=
x
*
2
pass
pass
return
x
return
x
...
...
tests/ut/python/pynative_mode/test_framstruct.py
浏览文件 @
86d197df
...
@@ -123,6 +123,7 @@ def sub(x, y):
...
@@ -123,6 +123,7 @@ def sub(x, y):
return
x
-
y
return
x
-
y
# pylint: disable=using-constant-test
@
ms_function
@
ms_function
def
if_always_true
(
x
):
def
if_always_true
(
x
):
""" if_always_true """
""" if_always_true """
...
@@ -870,6 +871,7 @@ def test_grad_refactor_14():
...
@@ -870,6 +871,7 @@ def test_grad_refactor_14():
assert
C
.
grad_all
(
grad_refactor_14
)(
2
,
3
)
==
(
3
,
9
)
assert
C
.
grad_all
(
grad_refactor_14
)(
2
,
3
)
==
(
3
,
9
)
# pylint: disable=using-constant-test
class
IfDeferInline
(
nn
.
Cell
):
class
IfDeferInline
(
nn
.
Cell
):
def
__init__
(
self
,
mul_size
):
def
__init__
(
self
,
mul_size
):
super
().
__init__
()
super
().
__init__
()
...
...
tests/ut/python/pynative_mode/test_hook.py
浏览文件 @
86d197df
import
numpy
as
np
import
numpy
as
np
import
mindspore.nn
as
nn
import
mindspore.nn
as
nn
import
mindspore.ops.operations
as
P
import
mindspore.ops.operations
as
P
from
mindspore
import
context
from
mindspore.ops
import
composite
as
C
from
mindspore.ops
import
composite
as
C
from
mindspore.common
import
dtype
as
mstype
from
mindspore
import
context
,
Tensor
,
ParameterTuple
from
mindspore
import
context
,
Tensor
,
ParameterTuple
from
mindspore.common.initializer
import
TruncatedNormal
from
mindspore.common.initializer
import
TruncatedNormal
from
mindspore.nn
import
Dense
,
WithLossCell
,
SoftmaxCrossEntropyWithLogits
,
Momentum
from
mindspore.nn
import
WithLossCell
,
Momentum
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"GPU"
)
...
@@ -18,25 +16,28 @@ def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
...
@@ -18,25 +16,28 @@ def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
,
weight_init
=
weight
,
has_bias
=
False
,
pad_mode
=
"valid"
)
weight_init
=
weight
,
has_bias
=
False
,
pad_mode
=
"valid"
)
def
fc_with_initialize
(
input_channels
,
out_channels
):
def
fc_with_initialize
(
input_channels
,
out_channels
):
"""weight initial for fc layer"""
"""weight initial for fc layer"""
weight
=
weight_variable
()
weight
=
weight_variable
()
bias
=
weight_variable
()
bias
=
weight_variable
()
return
nn
.
Dense
(
input_channels
,
out_channels
,
weight
,
bias
)
return
nn
.
Dense
(
input_channels
,
out_channels
,
weight
,
bias
)
def
weight_variable
():
def
weight_variable
():
"""weight initial"""
"""weight initial"""
return
TruncatedNormal
(
0.02
)
return
TruncatedNormal
(
0.02
)
def
cell_hook_function
(
cell_id
,
grad_input
,
grad_output
):
def
cell_hook_function
(
cell_id
,
grad_input
,
grad_output
):
print
(
cell_id
)
print
(
cell_id
)
assert
(
grad_output
[
0
].
asnumpy
().
shape
==
(
32
,
6
,
14
,
14
))
assert
(
grad_output
[
0
].
asnumpy
().
shape
==
(
32
,
6
,
14
,
14
))
assert
(
grad_input
[
0
].
asnumpy
().
shape
==
(
32
,
16
,
10
,
10
))
assert
(
grad_input
[
0
].
asnumpy
().
shape
==
(
32
,
16
,
10
,
10
))
def
var_hook_function
(
grad_out
):
def
var_hook_function
(
grad_out
):
print
(
"grad:"
,
grad_out
)
print
(
"grad:"
,
grad_out
)
assert
(
grad_out
[
0
].
asnumpy
().
shape
==
(
32
,
120
))
assert
(
grad_out
[
0
].
asnumpy
().
shape
==
(
32
,
120
))
class
LeNet5
(
nn
.
Cell
):
class
LeNet5
(
nn
.
Cell
):
...
@@ -82,7 +83,7 @@ class LeNet5(nn.Cell):
...
@@ -82,7 +83,7 @@ class LeNet5(nn.Cell):
x
=
self
.
fc3
(
x
)
x
=
self
.
fc3
(
x
)
return
x
return
x
class
GradWrap
(
nn
.
Cell
):
class
GradWrap
(
nn
.
Cell
):
""" GradWrap definition """
""" GradWrap definition """
def
__init__
(
self
,
network
):
def
__init__
(
self
,
network
):
...
@@ -94,6 +95,7 @@ class GradWrap(nn.Cell):
...
@@ -94,6 +95,7 @@ class GradWrap(nn.Cell):
weights
=
self
.
weights
weights
=
self
.
weights
return
C
.
GradOperation
(
'get_by_list'
,
get_by_list
=
True
)(
self
.
network
,
weights
)(
x
,
label
)
return
C
.
GradOperation
(
'get_by_list'
,
get_by_list
=
True
)(
self
.
network
,
weights
)(
x
,
label
)
def
test_hook
():
def
test_hook
():
net
=
LeNet5
()
net
=
LeNet5
()
optimizer
=
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
0.1
,
0.9
)
optimizer
=
Momentum
(
filter
(
lambda
x
:
x
.
requires_grad
,
net
.
get_parameters
()),
0.1
,
0.9
)
...
@@ -101,7 +103,7 @@ def test_hook():
...
@@ -101,7 +103,7 @@ def test_hook():
net_with_criterion
=
WithLossCell
(
net
,
criterion
)
net_with_criterion
=
WithLossCell
(
net
,
criterion
)
train_network
=
GradWrap
(
net_with_criterion
)
train_network
=
GradWrap
(
net_with_criterion
)
train_network
.
set_train
()
train_network
.
set_train
()
input_data
=
Tensor
(
np
.
ones
([
net
.
batch_size
,
1
,
32
,
32
]).
astype
(
np
.
float32
)
*
0.01
)
input_data
=
Tensor
(
np
.
ones
([
net
.
batch_size
,
1
,
32
,
32
]).
astype
(
np
.
float32
)
*
0.01
)
label
=
Tensor
(
np
.
ones
([
net
.
batch_size
,
net
.
num_class
]).
astype
(
np
.
float32
))
label
=
Tensor
(
np
.
ones
([
net
.
batch_size
,
net
.
num_class
]).
astype
(
np
.
float32
))
output
=
net
(
Tensor
(
input_data
))
output
=
net
(
Tensor
(
input_data
))
...
@@ -111,8 +113,6 @@ def test_hook():
...
@@ -111,8 +113,6 @@ def test_hook():
print
(
loss_output
.
asnumpy
().
shape
)
print
(
loss_output
.
asnumpy
().
shape
)
class
MulAdd
(
nn
.
Cell
):
class
MulAdd
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
MulAdd
,
self
).
__init__
()
super
(
MulAdd
,
self
).
__init__
()
...
@@ -121,12 +121,13 @@ class MulAdd(nn.Cell):
...
@@ -121,12 +121,13 @@ class MulAdd(nn.Cell):
return
2
*
x
+
y
return
2
*
x
+
y
def
bprop
(
self
,
x
,
y
,
out
,
dout
):
def
bprop
(
self
,
x
,
y
,
out
,
dout
):
assert
(
x
==
1
)
assert
(
x
==
1
)
assert
(
y
==
2
)
assert
(
y
==
2
)
assert
(
out
==
4
)
assert
(
out
==
4
)
assert
(
dout
==
1
)
assert
(
dout
==
1
)
return
3
*
dout
,
2
*
y
return
3
*
dout
,
2
*
y
def
test_custom_bprop
():
def
test_custom_bprop
():
mul_add
=
MulAdd
()
mul_add
=
MulAdd
()
mul_add
.
bprop_debug
=
True
mul_add
.
bprop_debug
=
True
...
...
tests/ut/python/pynative_mode/test_stop_gradient.py
浏览文件 @
86d197df
...
@@ -18,10 +18,9 @@ import pytest
...
@@ -18,10 +18,9 @@ import pytest
import
mindspore.common.dtype
as
mstype
import
mindspore.common.dtype
as
mstype
import
mindspore.nn
as
nn
import
mindspore.nn
as
nn
from
mindspore
import
Parameter
,
ParameterTuple
,
Tensor
from
mindspore
import
Parameter
,
ParameterTuple
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore
import
context
from
mindspore
import
context
from
mindspore
import
context
from
mindspore.common.api
import
ms_function
from
mindspore.common.api
import
ms_function
from
mindspore.ops
import
composite
as
C
from
mindspore.ops
import
composite
as
C
from
mindspore.ops
import
operations
as
P
from
mindspore.ops
import
operations
as
P
...
...
tests/ut/python/train/quant/mobilenetv2.py
浏览文件 @
86d197df
...
@@ -60,6 +60,7 @@ class InvertedResidual(nn.Cell):
...
@@ -60,6 +60,7 @@ class InvertedResidual(nn.Cell):
class
MobileNetV2
(
nn
.
Cell
):
class
MobileNetV2
(
nn
.
Cell
):
def
__init__
(
self
,
num_class
=
1000
,
input_size
=
224
,
width_mul
=
1.
):
def
__init__
(
self
,
num_class
=
1000
,
input_size
=
224
,
width_mul
=
1.
):
super
(
MobileNetV2
,
self
).
__init__
()
super
(
MobileNetV2
,
self
).
__init__
()
_
=
input_size
block
=
InvertedResidual
block
=
InvertedResidual
input_channel
=
32
input_channel
=
32
last_channel
=
1280
last_channel
=
1280
...
...
tests/ut/python/train/quant/mobilenetv2_combined.py
浏览文件 @
86d197df
...
@@ -68,6 +68,7 @@ class InvertedResidual(nn.Cell):
...
@@ -68,6 +68,7 @@ class InvertedResidual(nn.Cell):
class
MobileNetV2
(
nn
.
Cell
):
class
MobileNetV2
(
nn
.
Cell
):
def
__init__
(
self
,
num_class
=
1000
,
input_size
=
224
,
width_mul
=
1.
):
def
__init__
(
self
,
num_class
=
1000
,
input_size
=
224
,
width_mul
=
1.
):
super
(
MobileNetV2
,
self
).
__init__
()
super
(
MobileNetV2
,
self
).
__init__
()
_
=
input_size
block
=
InvertedResidual
block
=
InvertedResidual
input_channel
=
32
input_channel
=
32
last_channel
=
1280
last_channel
=
1280
...
...
tests/ut/python/train/quant/test_quant.py
浏览文件 @
86d197df
...
@@ -63,33 +63,3 @@ class LeNet5(nn.Cell):
...
@@ -63,33 +63,3 @@ class LeNet5(nn.Cell):
x
=
self
.
fc2
(
x
)
x
=
self
.
fc2
(
x
)
x
=
self
.
fc3
(
x
)
x
=
self
.
fc3
(
x
)
return
x
return
x
"""
def test_qat_lenet():
net = LeNet5()
net = qat.convert_quant_network(
net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8)
def test_qat_mobile():
net = MobileNetV2()
img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32))
net = qat.convert_quant_network(
net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8)
net(img)
def test_qat_mobile_train():
net = MobileNetV2(num_class=10)
img = Tensor(np.ones((1, 3, 224, 224)).astype(np.float32))
label = Tensor(np.ones((1, 10)).astype(np.float32))
net = qat.convert_quant_network(
net, quant_delay=0, bn_fold=False, freeze_bn=10000, weight_bits=8, act_bits=8)
loss = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
optimizer = nn.Momentum(net.trainable_params(),
learning_rate=0.1, momentum=0.9)
net = nn.WithLossCell(net, loss)
net = nn.TrainOneStepCell(net, optimizer)
net(img, label)
"""
\ No newline at end of file
tests/ut/python/train/summary/summary_reader.py
浏览文件 @
86d197df
...
@@ -13,9 +13,10 @@
...
@@ -13,9 +13,10 @@
# limitations under the License.
# limitations under the License.
# ============================================================================
# ============================================================================
"""Summary reader."""
"""Summary reader."""
import
mindspore.train.summary_pb2
as
summary_pb2
import
struct
import
struct
import
mindspore.train.summary_pb2
as
summary_pb2
_HEADER_SIZE
=
8
_HEADER_SIZE
=
8
_HEADER_CRC_SIZE
=
4
_HEADER_CRC_SIZE
=
4
_DATA_CRC_SIZE
=
4
_DATA_CRC_SIZE
=
4
...
...
tests/ut/python/train/test_amp.py
浏览文件 @
86d197df
...
@@ -25,6 +25,7 @@ from ....dataset_mock import MindData
...
@@ -25,6 +25,7 @@ from ....dataset_mock import MindData
def
setup_module
(
module
):
def
setup_module
(
module
):
_
=
module
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
...
@@ -56,7 +57,7 @@ def test_amp_o0():
...
@@ -56,7 +57,7 @@ def test_amp_o0():
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
level
=
"O0"
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
level
=
"O0"
)
output
=
train_network
(
inputs
,
label
)
_
=
train_network
(
inputs
,
label
)
def
test_amp_o2
():
def
test_amp_o2
():
...
@@ -66,7 +67,7 @@ def test_amp_o2():
...
@@ -66,7 +67,7 @@ def test_amp_o2():
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
level
=
"O2"
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
level
=
"O2"
)
output
=
train_network
(
inputs
,
label
)
_
=
train_network
(
inputs
,
label
)
def
test_amp_o2_loss
():
def
test_amp_o2_loss
():
...
@@ -76,7 +77,7 @@ def test_amp_o2_loss():
...
@@ -76,7 +77,7 @@ def test_amp_o2_loss():
loss
=
nn
.
MSELoss
()
loss
=
nn
.
MSELoss
()
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
loss
,
level
=
"O2"
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
loss
,
level
=
"O2"
)
output
=
train_network
(
inputs
,
label
)
_
=
train_network
(
inputs
,
label
)
def
test_amp_o0_loss
():
def
test_amp_o0_loss
():
...
@@ -86,7 +87,7 @@ def test_amp_o0_loss():
...
@@ -86,7 +87,7 @@ def test_amp_o0_loss():
loss
=
nn
.
MSELoss
()
loss
=
nn
.
MSELoss
()
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
optimizer
=
nn
.
Momentum
(
net
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
loss
)
train_network
=
amp
.
build_train_network
(
net
,
optimizer
,
loss
)
output
=
train_network
(
inputs
,
label
)
_
=
train_network
(
inputs
,
label
)
class
MindDataSet
(
MindData
):
class
MindDataSet
(
MindData
):
...
@@ -100,10 +101,10 @@ class MindDataSet(MindData):
...
@@ -100,10 +101,10 @@ class MindDataSet(MindData):
if
self
.
_size
<
self
.
_iter_num
:
if
self
.
_size
<
self
.
_iter_num
:
raise
StopIteration
raise
StopIteration
self
.
_iter_num
+=
1
self
.
_iter_num
+=
1
nex
t
=
[]
ls
t
=
[]
for
shape
,
type
in
zip
(
self
.
_output_shapes
,
self
.
_np_types
):
for
shape
_
,
type_
in
zip
(
self
.
_output_shapes
,
self
.
_np_types
):
next
.
append
(
Tensor
(
np
.
ones
(
shape
).
astype
(
type
)))
lst
.
append
(
Tensor
(
np
.
ones
(
shape_
).
astype
(
type_
)))
return
tuple
(
nex
t
)
return
tuple
(
ls
t
)
def
test_compile_model_train_O0
():
def
test_compile_model_train_O0
():
...
...
tests/ut/python/train/test_training.py
浏览文件 @
86d197df
...
@@ -151,7 +151,7 @@ def test_eval():
...
@@ -151,7 +151,7 @@ def test_eval():
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
model2
.
eval
(
dataset
)
model2
.
eval
(
dataset
)
net3
=
LossNet
()
_
=
LossNet
()
model3
=
Model
(
net2
,
eval_network
=
net2
,
metrics
=
{
"loss"
})
model3
=
Model
(
net2
,
eval_network
=
net2
,
metrics
=
{
"loss"
})
with
pytest
.
raises
(
ValueError
):
with
pytest
.
raises
(
ValueError
):
model3
.
eval
(
dataset
)
model3
.
eval
(
dataset
)
...
...
tests/ut/python/utils/test_callback.py
浏览文件 @
86d197df
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
"""test callback function."""
"""test callback function."""
import
os
import
os
import
stat
import
stat
import
numpy
as
np
import
numpy
as
np
import
pytest
import
pytest
...
@@ -283,14 +284,14 @@ def test_build_callbacks():
...
@@ -283,14 +284,14 @@ def test_build_callbacks():
callbacks
=
[
ck_obj
,
loss_cb_1
,
'Error'
,
None
]
callbacks
=
[
ck_obj
,
loss_cb_1
,
'Error'
,
None
]
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
callback_list
=
_build_callbacks
(
callbacks
)
_
=
_build_callbacks
(
callbacks
)
def
test_RunContext
():
def
test_RunContext
():
"""Test RunContext."""
"""Test RunContext."""
context_err
=
666
context_err
=
666
with
pytest
.
raises
(
TypeError
):
with
pytest
.
raises
(
TypeError
):
context
=
RunContext
(
context_err
)
_
=
RunContext
(
context_err
)
cb_params
=
_InternalCallbackParam
()
cb_params
=
_InternalCallbackParam
()
cb_params
.
member1
=
1
cb_params
.
member1
=
1
...
...
tests/vm_impl/nn_ops_vm_impl.py
浏览文件 @
86d197df
...
@@ -223,6 +223,7 @@ def vm_impl_avg_pool_grad(self):
...
@@ -223,6 +223,7 @@ def vm_impl_avg_pool_grad(self):
return
vm_impl
return
vm_impl
# pylint: disable=function-redefined
@
vm_impl_getters
.
register
(
G
.
FusedBatchNormGrad
)
@
vm_impl_getters
.
register
(
G
.
FusedBatchNormGrad
)
def
vm_impl_fused_batch_norm_grad
(
self
):
def
vm_impl_fused_batch_norm_grad
(
self
):
"""Generate vm_impl function for FusedBatchNormGrad"""
"""Generate vm_impl function for FusedBatchNormGrad"""
...
@@ -239,6 +240,7 @@ def vm_impl_fused_batch_norm_grad(self):
...
@@ -239,6 +240,7 @@ def vm_impl_fused_batch_norm_grad(self):
return
vm_impl
return
vm_impl
# pylint: disable=function-redefined
@
vm_impl_getters
.
register
(
G
.
BatchNormGrad
)
@
vm_impl_getters
.
register
(
G
.
BatchNormGrad
)
def
vm_impl_fused_batch_norm_grad
(
self
):
def
vm_impl_fused_batch_norm_grad
(
self
):
"""Generate vm_impl function for BatchNormGrad"""
"""Generate vm_impl function for BatchNormGrad"""
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录