Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
2bc3fcb1
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2bc3fcb1
编写于
5月 11, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 11, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1044 clean pylint warning in test dir
Merge pull request !1044 from jinyaohui/clean_pylint_test
上级
19ecc997
d40e89b1
变更
103
展开全部
隐藏空白更改
内联
并排
Showing
103 changed file
with
942 addition
and
576 deletion
+942
-576
tests/st/ops/ascend/test_add.py
tests/st/ops/ascend/test_add.py
+8
-3
tests/st/ops/ascend/test_addn.py
tests/st/ops/ascend/test_addn.py
+5
-1
tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py
tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py
+80
-67
tests/st/ops/ascend/test_aicpu_ops/test_flatten.py
tests/st/ops/ascend/test_aicpu_ops/test_flatten.py
+68
-57
tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py
tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py
+80
-67
tests/st/ops/ascend/test_aicpu_ops/test_reshape.py
tests/st/ops/ascend/test_aicpu_ops/test_reshape.py
+80
-67
tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py
tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py
+80
-67
tests/st/ops/ascend/test_apply_momentum.py
tests/st/ops/ascend/test_apply_momentum.py
+10
-5
tests/st/ops/ascend/test_biasAddGrad.py
tests/st/ops/ascend/test_biasAddGrad.py
+14
-11
tests/st/ops/ascend/test_bias_add_grad.py
tests/st/ops/ascend/test_bias_add_grad.py
+4
-1
tests/st/ops/ascend/test_conv.py
tests/st/ops/ascend/test_conv.py
+12
-11
tests/st/ops/ascend/test_conv2dGradFilter.py
tests/st/ops/ascend/test_conv2dGradFilter.py
+24
-21
tests/st/ops/ascend/test_conv_grad.py
tests/st/ops/ascend/test_conv_grad.py
+12
-8
tests/st/ops/ascend/test_dense.py
tests/st/ops/ascend/test_dense.py
+4
-0
tests/st/ops/ascend/test_dense_grad.py
tests/st/ops/ascend/test_dense_grad.py
+4
-0
tests/st/ops/ascend/test_drop_out_gen_mask.py
tests/st/ops/ascend/test_drop_out_gen_mask.py
+1
-0
tests/st/ops/ascend/test_full_connection.py
tests/st/ops/ascend/test_full_connection.py
+1
-0
tests/st/ops/ascend/test_fused_batchnorm.py
tests/st/ops/ascend/test_fused_batchnorm.py
+4
-2
tests/st/ops/ascend/test_fused_batchnorm_grad.py
tests/st/ops/ascend/test_fused_batchnorm_grad.py
+7
-3
tests/st/ops/ascend/test_image_gradients.py
tests/st/ops/ascend/test_image_gradients.py
+21
-19
tests/st/ops/ascend/test_matmul.py
tests/st/ops/ascend/test_matmul.py
+7
-2
tests/st/ops/ascend/test_maxpool.py
tests/st/ops/ascend/test_maxpool.py
+3
-2
tests/st/ops/ascend/test_maxpool_grad.py
tests/st/ops/ascend/test_maxpool_grad.py
+1
-0
tests/st/ops/ascend/test_maxpool_with_argmax_grad.py
tests/st/ops/ascend/test_maxpool_with_argmax_grad.py
+6
-2
tests/st/ops/ascend/test_relu.py
tests/st/ops/ascend/test_relu.py
+5
-1
tests/st/ops/ascend/test_relu_grad.py
tests/st/ops/ascend/test_relu_grad.py
+6
-2
tests/st/ops/ascend/test_reshape.py
tests/st/ops/ascend/test_reshape.py
+14
-10
tests/st/ops/ascend/test_simplemean.py
tests/st/ops/ascend/test_simplemean.py
+5
-1
tests/st/ops/ascend/test_simplemean_grad.py
tests/st/ops/ascend/test_simplemean_grad.py
+6
-2
tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py
...st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py
+6
-2
tests/st/ops/ascend/test_sparse_softmax_cross_entropy_with_logits_grad.py
...end/test_sparse_softmax_cross_entropy_with_logits_grad.py
+1
-0
tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py
tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py
+4
-2
tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py
tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py
+4
-2
tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py
tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py
+5
-0
tests/st/ops/ascend/test_tbe_ops/test_add.py
tests/st/ops/ascend/test_tbe_ops/test_add.py
+4
-2
tests/st/ops/ascend/test_tbe_ops/test_addn.py
tests/st/ops/ascend/test_tbe_ops/test_addn.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py
tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py
+10
-9
tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py
tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py
+8
-5
tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py
tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py
+5
-0
tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py
tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py
+2
-2
tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py
tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py
+6
-3
tests/st/ops/ascend/test_tbe_ops/test_bias_add.py
tests/st/ops/ascend/test_tbe_ops/test_bias_add.py
+2
-1
tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py
tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py
+4
-0
tests/st/ops/ascend/test_tbe_ops/test_concat.py
tests/st/ops/ascend/test_tbe_ops/test_concat.py
+3
-2
tests/st/ops/ascend/test_tbe_ops/test_conv.py
tests/st/ops/ascend/test_tbe_ops/test_conv.py
+10
-11
tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py
...st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py
+10
-7
tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py
.../st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py
+15
-12
tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py
tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py
+3
-2
tests/st/ops/ascend/test_tbe_ops/test_gelu.py
tests/st/ops/ascend/test_tbe_ops/test_gelu.py
+3
-0
tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py
tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py
+4
-1
tests/st/ops/ascend/test_tbe_ops/test_greater.py
tests/st/ops/ascend/test_tbe_ops/test_greater.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_layernorm.py
tests/st/ops/ascend/test_tbe_ops/test_layernorm.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py
tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py
+6
-1
tests/st/ops/ascend/test_tbe_ops/test_less.py
tests/st/ops/ascend/test_tbe_ops/test_less.py
+7
-3
tests/st/ops/ascend/test_tbe_ops/test_less_equal.py
tests/st/ops/ascend/test_tbe_ops/test_less_equal.py
+7
-3
tests/st/ops/ascend/test_tbe_ops/test_logical_and.py
tests/st/ops/ascend/test_tbe_ops/test_logical_and.py
+6
-1
tests/st/ops/ascend/test_tbe_ops/test_logical_not.py
tests/st/ops/ascend/test_tbe_ops/test_logical_not.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_logical_or.py
tests/st/ops/ascend/test_tbe_ops/test_logical_or.py
+6
-1
tests/st/ops/ascend/test_tbe_ops/test_matmul.py
tests/st/ops/ascend/test_tbe_ops/test_matmul.py
+5
-2
tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py
tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py
+7
-2
tests/st/ops/ascend/test_tbe_ops/test_maximum.py
tests/st/ops/ascend/test_tbe_ops/test_maximum.py
+7
-3
tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py
tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_maxpool.py
tests/st/ops/ascend/test_tbe_ops/test_maxpool.py
+2
-2
tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py
tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py
+1
-0
tests/st/ops/ascend/test_tbe_ops/test_minimum.py
tests/st/ops/ascend/test_tbe_ops/test_minimum.py
+7
-3
tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py
tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py
+4
-1
tests/st/ops/ascend/test_tbe_ops/test_mul.py
tests/st/ops/ascend/test_tbe_ops/test_mul.py
+7
-2
tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py
...st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py
+4
-1
tests/st/ops/ascend/test_tbe_ops/test_npu_clear_float_status.py
...st/ops/ascend/test_tbe_ops/test_npu_clear_float_status.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_npu_get_float_status.py
...s/st/ops/ascend/test_tbe_ops/test_npu_get_float_status.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_pad.py
tests/st/ops/ascend/test_tbe_ops/test_pad.py
+4
-1
tests/st/ops/ascend/test_tbe_ops/test_pow.py
tests/st/ops/ascend/test_tbe_ops/test_pow.py
+7
-3
tests/st/ops/ascend/test_tbe_ops/test_realdiv.py
tests/st/ops/ascend/test_tbe_ops/test_realdiv.py
+7
-2
tests/st/ops/ascend/test_tbe_ops/test_reciprocal.py
tests/st/ops/ascend/test_tbe_ops/test_reciprocal.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_relu.py
tests/st/ops/ascend/test_tbe_ops/test_relu.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py
tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py
+6
-2
tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py
tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor.py
...t/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor.py
+3
-0
tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py
.../ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py
+2
-1
tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py
tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py
+3
-1
tests/st/ops/ascend/test_tbe_ops/test_select.py
tests/st/ops/ascend/test_tbe_ops/test_select.py
+12
-7
tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py
tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py
+3
-0
tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits.py
...nd/test_tbe_ops/test_sigmoid_cross_entropy_with_logits.py
+1
-0
tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py
...st_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py
+1
-0
tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py
tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py
+2
-1
tests/st/ops/ascend/test_tbe_ops/test_slice.py
tests/st/ops/ascend/test_tbe_ops/test_slice.py
+8
-6
tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss.py
tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss.py
+1
-0
tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py
tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py
+1
-0
tests/st/ops/ascend/test_tbe_ops/test_softmax.py
tests/st/ops/ascend/test_tbe_ops/test_softmax.py
+6
-1
tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py
...nd/test_tbe_ops/test_softmax_cross_entropy_with_logits.py
+2
-1
tests/st/ops/ascend/test_tbe_ops/test_split.py
tests/st/ops/ascend/test_tbe_ops/test_split.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_sqrt.py
tests/st/ops/ascend/test_tbe_ops/test_sqrt.py
+6
-1
tests/st/ops/ascend/test_tbe_ops/test_square.py
tests/st/ops/ascend/test_tbe_ops/test_square.py
+6
-1
tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py
tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py
+12
-6
tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py
tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py
+5
-0
tests/st/ops/ascend/test_tbe_ops/test_sub.py
tests/st/ops/ascend/test_tbe_ops/test_sub.py
+7
-3
tests/st/ops/ascend/test_tbe_ops/test_tanh.py
tests/st/ops/ascend/test_tbe_ops/test_tanh.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py
tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py
+5
-1
tests/st/ops/ascend/test_tbe_ops/test_tile.py
tests/st/ops/ascend/test_tbe_ops/test_tile.py
+1
-0
tests/st/ops/ascend/test_tbe_ops/test_topk.py
tests/st/ops/ascend/test_tbe_ops/test_topk.py
+4
-2
tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py
tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py
+4
-1
tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py
...s/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py
+5
-1
tests/st/ops/ascend/test_tdt_data_ms.py
tests/st/ops/ascend/test_tdt_data_ms.py
+1
-2
未找到文件。
tests/st/ops/ascend/test_add.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,18 +20,23 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
context
.
set_context
(
enable_task_sink
=
True
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
add
=
P
.
TensorAdd
()
def
construct
(
self
,
x
,
y
):
return
self
.
add
(
x
,
y
)
x
=
np
.
ones
([
1
,
3
,
3
,
4
]).
astype
(
np
.
float32
)
y
=
np
.
ones
([
1
,
3
,
3
,
4
]).
astype
(
np
.
float32
)
x
=
np
.
ones
([
1
,
3
,
3
,
4
]).
astype
(
np
.
float32
)
y
=
np
.
ones
([
1
,
3
,
3
,
4
]).
astype
(
np
.
float32
)
def
test_net
():
add
=
Net
()
...
...
tests/st/ops/ascend/test_addn.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,15 +20,19 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
add
=
P
.
AddN
()
def
construct
(
self
,
x
,
y
):
return
self
.
add
((
x
,
y
))
def
test_net
():
x
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
y
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
...
...
tests/st/ops/ascend/test_aicpu_ops/test_expand_dims.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,97 +18,110 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
expand_dims
=
P
.
ExpandDims
()
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
expand_dims
=
P
.
ExpandDims
()
def
construct
(
self
,
tensor
,
dim
):
return
self
.
expand_dims
(
tensor
,
dim
)
def
construct
(
self
,
tensor
,
dim
):
return
self
.
expand_dims
(
tensor
,
dim
)
def
test_net_bool
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_int8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_uint8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_int16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_uint16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_int32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_uint32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_int64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_uint64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_float16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_float32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
def
test_net_float64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
),
-
1
)
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
expand_dims
(
x
,
-
1
)))
tests/st/ops/ascend/test_aicpu_ops/test_flatten.py
浏览文件 @
2bc3fcb1
...
...
@@ -17,83 +17,94 @@ from mindspore.ops import operations as P
import
mindspore.nn
as
nn
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
flatten
=
P
.
Flatten
()
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
flatten
=
P
.
Flatten
()
def
construct
(
self
,
tensor
):
return
self
.
flatten
(
tensor
)
def
construct
(
self
,
tensor
):
return
self
.
flatten
(
tensor
)
def
test_net_int8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_uint8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_int16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_uint16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_int32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_uint32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_int64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_uint64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_float16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
def
test_net_float32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
flatten
()))
tests/st/ops/ascend/test_aicpu_ops/test_is_finite.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,97 +18,110 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
isfinite
=
P
.
IsFinite
()
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
isfinite
=
P
.
IsFinite
()
def
construct
(
self
,
tensor
):
return
self
.
isfinite
(
tensor
)
def
construct
(
self
,
tensor
):
return
self
.
isfinite
(
tensor
)
def
test_net_bool
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_int8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_uint8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_int16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_uint16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_int32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_uint32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_int64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_uint64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_float16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_float32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
def
test_net_float64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
isfinite
(
x
)))
tests/st/ops/ascend/test_aicpu_ops/test_reshape.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,97 +18,110 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
reshape
=
P
.
Reshape
()
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
reshape
=
P
.
Reshape
()
def
construct
(
self
,
tensor
):
return
self
.
reshape
(
tensor
,
(
4
,
4
))
def
construct
(
self
,
tensor
):
return
self
.
reshape
(
tensor
,
(
4
,
4
))
def
test_net_bool
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_int8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_uint8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_int16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_uint16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_int32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_uint32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_int64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_uint64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_float16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_float32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
def
test_net_float64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
np
.
reshape
(
x
,
(
4
,
4
))))
tests/st/ops/ascend/test_aicpu_ops/test_squeeze.py
浏览文件 @
2bc3fcb1
...
...
@@ -17,97 +17,110 @@ from mindspore.ops import operations as P
import
mindspore.nn
as
nn
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
squeeze
=
P
.
Squeeze
()
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
squeeze
=
P
.
Squeeze
()
def
construct
(
self
,
tensor
):
return
self
.
squeeze
(
tensor
)
def
construct
(
self
,
tensor
):
return
self
.
squeeze
(
tensor
)
def
test_net_bool
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
bool
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_int8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_uint8
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint8
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_int16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_uint16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_int32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_uint32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_int64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
int64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_uint64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
uint64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_float16
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_float32
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
def
test_net_float64
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float64
)
net
=
Net
()
output
=
net
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
assert
(
np
.
all
(
output
.
asnumpy
()
==
x
.
squeeze
()))
tests/st/ops/ascend/test_apply_momentum.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,24 +20,29 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
apply_momentum
=
P
.
ApplyMomentum
(
gradient_scale
=
1024.0
)
self
.
variable
=
Parameter
(
initializer
(
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'variable'
)
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'variable'
)
self
.
accumulation
=
Parameter
(
initializer
(
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'accumulation'
)
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'accumulation'
)
self
.
learning_rate
=
Parameter
(
initializer
(
'normal'
,
[
1
,
]),
name
=
'learning_rate'
)
'normal'
,
[
1
,
]),
name
=
'learning_rate'
)
self
.
gradient
=
Parameter
(
initializer
(
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'gradient'
)
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'gradient'
)
self
.
momentum
=
Parameter
(
initializer
(
'normal'
,
[
1
,
]),
name
=
'momentum'
)
'normal'
,
[
1
,
]),
name
=
'momentum'
)
def
construct
(
self
):
return
self
.
apply_momentum
(
self
.
variable
,
self
.
accumulation
,
self
.
learning_rate
,
self
.
gradient
,
self
.
momentum
)
def
test_net
():
apply_momentum
=
Net
()
output
=
apply_momentum
()
...
...
tests/st/ops/ascend/test_biasAddGrad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,22 +21,25 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
bias_add_grad
=
G
.
BiasAddGrad
()
#
self.dout = Parameter(initializer(
#
'normal', [2, 3, 3, 4]), name='dout')
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
bias_add_grad
=
G
.
BiasAddGrad
()
#
self.dout = Parameter(initializer(
#
'normal', [2, 3, 3, 4]), name='dout')
@
ms_function
def
construct
(
self
,
dout
):
return
self
.
bias_add_grad
(
dout
)
@
ms_function
def
construct
(
self
,
dout
):
return
self
.
bias_add_grad
(
dout
)
dout
=
np
.
ones
([
2
,
3
,
4
,
4
]).
astype
(
np
.
float32
)
dout
=
np
.
ones
([
2
,
3
,
4
,
4
]).
astype
(
np
.
float32
)
bias_add_grad
=
Net
()
output
=
bias_add_grad
(
Tensor
(
dout
))
expect_output
=
np
.
array
([
32.
,
32.
,
32.
]).
astype
(
np
.
float32
)
assert
np
.
all
(
output
.
asnumpy
()
==
expect_output
),
"bias_add_grad execute failed, please check current code commit"
expect_output
=
np
.
array
([
32.
,
32.
,
32.
]).
astype
(
np
.
float32
)
assert
np
.
all
(
output
.
asnumpy
()
==
expect_output
),
"bias_add_grad execute failed, please check current code commit"
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_bias_add_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,17 +21,20 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
bias_add_grad
=
G
.
BiasAddGrad
()
@
ms_function
def
construct
(
self
,
dout
):
return
self
.
bias_add_grad
(
dout
)
def
test_net
():
dout
=
np
.
random
.
rand
(
1
,
1001
).
astype
(
np
.
float32
)
bias_add_grad
=
Net
()
...
...
tests/st/ops/ascend/test_conv.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,32 +20,33 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
out_channel
=
64
kernel_size
=
7
self
.
conv
=
P
.
Conv2D
(
out_channel
,
kernel_size
,
mode
=
1
,
pad_mode
=
"valid"
,
pad
=
0
,
stride
=
1
,
dilation
=
1
,
group
=
1
)
kernel_size
,
mode
=
1
,
pad_mode
=
"valid"
,
pad
=
0
,
stride
=
1
,
dilation
=
1
,
group
=
1
)
self
.
w
=
Parameter
(
initializer
(
'normal'
,
[
64
,
3
,
7
,
7
]),
name
=
'w'
)
'normal'
,
[
64
,
3
,
7
,
7
]),
name
=
'w'
)
@
ms_function
def
construct
(
self
,
x
):
return
self
.
conv
(
x
,
self
.
w
)
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
3
,
224
,
224
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
32
,
3
,
224
,
224
).
astype
(
np
.
float32
)
conv
=
Net
()
output
=
conv
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_conv2dGradFilter.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,37 +21,40 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
conv2d_grad
=
G
.
Conv2DBackpropFilter
(
4
,
1
)
yt
=
Tensor
(
np
.
array
([[[[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
]]]]).
astype
(
np
.
float32
))
self
.
y
=
Parameter
(
yt
,
name
=
'y'
)
self
.
get_shape
=
P
.
Shape
()
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
conv2d_grad
=
G
.
Conv2DBackpropFilter
(
4
,
1
)
yt
=
Tensor
(
np
.
array
([[[[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
]]]]).
astype
(
np
.
float32
))
self
.
y
=
Parameter
(
yt
,
name
=
'y'
)
self
.
get_shape
=
P
.
Shape
()
@
ms_function
def
construct
(
self
,
x
,
out
):
return
self
.
conv2d_grad
(
out
,
x
,
self
.
get_shape
(
self
.
y
))
@
ms_function
def
construct
(
self
,
x
,
out
):
return
self
.
conv2d_grad
(
out
,
x
,
self
.
get_shape
(
self
.
y
))
x
=
Tensor
(
np
.
array
([[[
[
3
,
0
,
1
,
2
,
7
,
4
],
[
1
,
5
,
8
,
9
,
3
,
1
],
[
2
,
7
,
2
,
5
,
1
,
3
],
[
0
,
1
,
3
,
1
,
7
,
8
],
[
4
,
2
,
1
,
6
,
2
,
8
],
[
2
,
4
,
5
,
2
,
3
,
9
]]]]).
astype
(
np
.
float32
))
[
3
,
0
,
1
,
2
,
7
,
4
],
[
1
,
5
,
8
,
9
,
3
,
1
],
[
2
,
7
,
2
,
5
,
1
,
3
],
[
0
,
1
,
3
,
1
,
7
,
8
],
[
4
,
2
,
1
,
6
,
2
,
8
],
[
2
,
4
,
5
,
2
,
3
,
9
]]]]).
astype
(
np
.
float32
))
out
=
Tensor
(
np
.
array
([[[
[
-
5
,
-
4
,
0
,
8
],
[
-
10
,
-
2
,
2
,
3
],
[
0
,
-
2
,
-
4
,
-
7
],
[
-
3
,
-
2
,
-
3
,
-
16
]]]]).
astype
(
np
.
float32
))
[
-
5
,
-
4
,
0
,
8
],
[
-
10
,
-
2
,
2
,
3
],
[
0
,
-
2
,
-
4
,
-
7
],
[
-
3
,
-
2
,
-
3
,
-
16
]]]]).
astype
(
np
.
float32
))
operator
=
Net
()
output
=
operator
(
x
,
out
)
expect_out
=
np
.
array
([[[[
-
60.
,
-
142.
,
-
265.
],[
-
104.
,
-
211.
,
-
322.
],
[
-
102.
,
-
144.
,
-
248.
]]]]).
astype
(
np
.
float32
)
expect_out
=
np
.
array
([[[[
-
60.
,
-
142.
,
-
265.
],
[
-
104.
,
-
211.
,
-
322.
],
[
-
102.
,
-
144.
,
-
248.
]]]]).
astype
(
np
.
float32
)
print
(
output
.
asnumpy
())
print
(
expect_out
)
assert
np
.
all
(
output
.
asnumpy
()
==
expect_out
),
"conv2d_grad execute failed, please check current code commit"
assert
np
.
all
(
output
.
asnumpy
()
==
expect_out
),
"conv2d_grad execute failed, please check current code commit"
tests/st/ops/ascend/test_conv_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,10 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,26 +35,28 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
,
output_grad
):
return
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
out_channel
=
512
kernel_size
=
2048
self
.
conv
=
P
.
Conv2D
(
out_channel
,
(
kernel_size
,
kernel_size
),
mode
=
1
,
pad_mode
=
"same"
,
pad
=
3
,
stride
=
2
,
dilation
=
1
,
group
=
1
)
(
kernel_size
,
kernel_size
),
mode
=
1
,
pad_mode
=
"same"
,
pad
=
3
,
stride
=
2
,
dilation
=
1
,
group
=
1
)
self
.
w
=
Parameter
(
initializer
(
'normal'
,
[
512
,
2048
,
1
,
1
]),
name
=
'w'
)
'normal'
,
[
512
,
2048
,
1
,
1
]),
name
=
'w'
)
@
ms_function
def
construct
(
self
,
x
):
return
self
.
conv
(
x
,
self
.
w
)
def
test_net
():
x
=
np
.
ones
([
32
,
2048
,
7
,
7
]).
astype
(
np
.
float32
)
sens
=
np
.
ones
([
32
,
512
,
7
,
7
]).
astype
(
np
.
float32
)
...
...
tests/st/ops/ascend/test_dense.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,6 +33,7 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
dense
(
x
)
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
2048
).
astype
(
np
.
float32
)
net
=
Net
()
...
...
tests/st/ops/ascend/test_dense_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,10 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,6 +35,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
,
output_grad
):
return
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -41,6 +44,7 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
dense
(
x
)
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
2048
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
32
,
1001
).
astype
(
np
.
float32
)
...
...
tests/st/ops/ascend/test_drop_out_gen_mask.py
浏览文件 @
2bc3fcb1
...
...
@@ -17,6 +17,7 @@ from mindspore.ops import operations as P
import
mindspore.nn
as
nn
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
...
...
tests/st/ops/ascend/test_full_connection.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import mindspore.context as context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
tests/st/ops/ascend/test_fused_batchnorm.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -35,7 +38,7 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
# mean = np.random.randn(1,16,1,1).astype(np.float32)
# variance = np.random.randn(1,16,1,1).astype(np.float32)
fusedBn
=
Net
()
...
...
@@ -45,4 +48,3 @@ def test_net():
print
(
"***********output y*********"
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_fused_batchnorm_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,11 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
#context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,6 +36,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
,
output_grad
):
return
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -47,8 +51,8 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
net
=
Grad
(
Net
())
output
=
net
(
Tensor
(
x
),
Tensor
(
sens
))
print
(
"***********x*********"
)
...
...
tests/st/ops/ascend/test_image_gradients.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,6 +20,8 @@ from mindspore import Tensor
from
mindspore.common.api
import
ms_function
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -31,32 +33,32 @@ class Net(nn.Cell):
def
test_image_gradients
():
image
=
Tensor
(
np
.
array
([[[[
1
,
2
],[
3
,
4
]]]]),
dtype
=
mstype
.
int32
)
expected_dy
=
np
.
array
([[[[
2
,
2
],[
0
,
0
]]]]).
astype
(
np
.
int32
)
expected_dx
=
np
.
array
([[[[
1
,
0
],[
1
,
0
]]]]).
astype
(
np
.
int32
)
image
=
Tensor
(
np
.
array
([[[[
1
,
2
],
[
3
,
4
]]]]),
dtype
=
mstype
.
int32
)
expected_dy
=
np
.
array
([[[[
2
,
2
],
[
0
,
0
]]]]).
astype
(
np
.
int32
)
expected_dx
=
np
.
array
([[[[
1
,
0
],
[
1
,
0
]]]]).
astype
(
np
.
int32
)
net
=
Net
()
dy
,
dx
=
net
(
image
)
assert
np
.
any
(
dx
.
asnumpy
()
-
expected_dx
)
==
False
assert
np
.
any
(
dy
.
asnumpy
()
-
expected_dy
)
==
False
assert
np
.
any
(
dx
.
asnumpy
()
-
expected_dx
)
==
False
assert
np
.
any
(
dy
.
asnumpy
()
-
expected_dy
)
==
False
def
test_image_gradients_multi_channel_depth
():
# 4 x 2 x 2 x 2
dtype
=
mstype
.
int32
image
=
Tensor
(
np
.
array
([[[[
1
,
2
],[
3
,
4
]],
[[
5
,
6
],[
7
,
8
]]],
[[[
3
,
5
],[
7
,
9
]],
[[
11
,
13
],[
15
,
17
]]],
[[[
5
,
10
],[
15
,
20
]],
[[
25
,
30
],[
35
,
40
]]],
[[[
10
,
20
],[
30
,
40
]],
[[
50
,
60
],[
70
,
80
]]]]),
dtype
=
dtype
)
expected_dy
=
Tensor
(
np
.
array
([[[[
2
,
2
],[
0
,
0
]],
[[
2
,
2
],[
0
,
0
]]],
[[[
4
,
4
],[
0
,
0
]],
[[
4
,
4
],[
0
,
0
]]],
[[[
10
,
10
],[
0
,
0
]],
[[
10
,
10
],[
0
,
0
]]],
[[[
20
,
20
],[
0
,
0
]],
[[
20
,
20
],[
0
,
0
]]]]),
dtype
=
dtype
)
expected_dx
=
Tensor
(
np
.
array
([[[[
1
,
0
],[
1
,
0
]],
[[
1
,
0
],[
1
,
0
]]],
[[[
2
,
0
],[
2
,
0
]],
[[
2
,
0
],[
2
,
0
]]],
[[[
5
,
0
],[
5
,
0
]],
[[
5
,
0
],[
5
,
0
]]],
[[[
10
,
0
],[
10
,
0
]],
[[
10
,
0
],[
10
,
0
]]]]),
dtype
=
dtype
)
image
=
Tensor
(
np
.
array
([[[[
1
,
2
],
[
3
,
4
]],
[[
5
,
6
],
[
7
,
8
]]],
[[[
3
,
5
],
[
7
,
9
]],
[[
11
,
13
],
[
15
,
17
]]],
[[[
5
,
10
],
[
15
,
20
]],
[[
25
,
30
],
[
35
,
40
]]],
[[[
10
,
20
],
[
30
,
40
]],
[[
50
,
60
],
[
70
,
80
]]]]),
dtype
=
dtype
)
expected_dy
=
Tensor
(
np
.
array
([[[[
2
,
2
],
[
0
,
0
]],
[[
2
,
2
],
[
0
,
0
]]],
[[[
4
,
4
],
[
0
,
0
]],
[[
4
,
4
],
[
0
,
0
]]],
[[[
10
,
10
],
[
0
,
0
]],
[[
10
,
10
],
[
0
,
0
]]],
[[[
20
,
20
],
[
0
,
0
]],
[[
20
,
20
],
[
0
,
0
]]]]),
dtype
=
dtype
)
expected_dx
=
Tensor
(
np
.
array
([[[[
1
,
0
],
[
1
,
0
]],
[[
1
,
0
],
[
1
,
0
]]],
[[[
2
,
0
],
[
2
,
0
]],
[[
2
,
0
],
[
2
,
0
]]],
[[[
5
,
0
],
[
5
,
0
]],
[[
5
,
0
],
[
5
,
0
]]],
[[[
10
,
0
],
[
10
,
0
]],
[[
10
,
0
],
[
10
,
0
]]]]),
dtype
=
dtype
)
net
=
Net
()
dy
,
dx
=
net
(
image
)
assert
np
.
any
(
dx
.
asnumpy
()
-
expected_dx
.
asnumpy
())
==
False
assert
np
.
any
(
dy
.
asnumpy
()
-
expected_dy
.
asnumpy
())
==
False
assert
np
.
any
(
dx
.
asnumpy
()
-
expected_dx
.
asnumpy
())
==
False
assert
np
.
any
(
dy
.
asnumpy
()
-
expected_dy
.
asnumpy
())
==
False
tests/st/ops/ascend/test_matmul.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,8 +33,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
matmul
(
x1
,
x2
)
x1
=
np
.
random
.
randn
(
1
,
3
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x1
=
np
.
random
.
randn
(
1
,
3
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
def
test_net
():
matmul
=
Net
()
...
...
tests/st/ops/ascend/test_maxpool.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,12 +20,13 @@ import numpy as np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
maxpool
=
P
.
MaxPool
(
pad_mode
=
"SAME"
,
window
=
3
,
stride
=
2
)
@
ms_function
def
construct
(
self
,
x
):
output
=
self
.
maxpool
(
x
)
...
...
@@ -33,7 +34,7 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
64
,
112
,
112
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
32
,
64
,
112
,
112
).
astype
(
np
.
float32
)
maxpool
=
Net
()
output
=
maxpool
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_maxpool_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,6 +19,7 @@ from mindspore.common.api import ms_function
import
numpy
as
np
import
mindspore.context
as
context
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
...
...
tests/st/ops/ascend/test_maxpool_with_argmax_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,10 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,6 +35,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
,
output_grad
):
return
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -43,8 +46,9 @@ class Net(nn.Cell):
@
ms_function
def
construct
(
self
,
x
):
output
=
self
.
maxpool
(
x
)
return
output
[
0
]
output
=
self
.
maxpool
(
x
)
return
output
[
0
]
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
64
,
112
,
112
).
astype
(
np
.
float32
)
...
...
tests/st/ops/ascend/test_relu.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,8 +33,9 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
relu
(
x
)
def
test_net
():
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
relu
=
Net
()
output
=
relu
(
Tensor
(
x
))
print
(
x
)
...
...
tests/st/ops/ascend/test_relu_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,10 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,6 +35,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
,
output_grad
):
return
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -41,9 +44,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
relu
(
x
)
def
test_net
():
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
net
=
Grad
(
Net
())
output
=
net
(
Tensor
(
x
),
Tensor
(
sens
))
print
(
len
(
output
))
...
...
tests/st/ops/ascend/test_reshape.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,18 +18,22 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
reshape
=
P
.
Reshape
()
@
ms_function
def
construct
(
self
,
tensor
):
return
self
.
reshape
(
tensor
,
(
1
,
16
))
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
reshape
=
P
.
Reshape
()
@
ms_function
def
construct
(
self
,
tensor
):
return
self
.
reshape
(
tensor
,
(
1
,
16
))
def
test_net
():
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
reshape
=
Net
()
output
=
reshape
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
x
=
np
.
random
.
randn
(
1
,
16
,
1
,
1
).
astype
(
np
.
float16
)
reshape
=
Net
()
output
=
reshape
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_simplemean.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -29,7 +32,8 @@ class Net(nn.Cell):
@
ms_function
def
construct
(
self
,
x
):
return
self
.
simplemean
(
x
,
(
-
2
,
-
1
))
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
2048
,
7
,
7
).
astype
(
np
.
float32
)
simplemean
=
Net
()
...
...
tests/st/ops/ascend/test_simplemean_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,10 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,6 +35,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
,
output_grad
):
return
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -41,9 +44,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
simplemean
(
x
,
(
-
2
,
-
1
))
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
2048
,
7
,
7
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
32
,
2048
,
1
,
1
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
32
,
2048
,
7
,
7
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
32
,
2048
,
1
,
1
).
astype
(
np
.
float32
)
net
=
Grad
(
Net
())
output
=
net
(
Tensor
(
x
),
Tensor
(
sens
))
print
(
output
.
asnumpy
())
...
...
tests/st/ops/ascend/test_sparseSoftmaxCrossEntropyWithLogits.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,6 +18,7 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
...
...
@@ -30,9 +31,10 @@ class Net(nn.Cell):
def
construct
(
self
,
features
,
labels
):
return
self
.
SparseSoftmaxCrossEntropyWithLogits
(
features
,
labels
)
def
np_sparse_softmax_cross_entropy_with_logits
(
labels_shape
,
logits_shape
,
logits_dtype
):
num_class
=
logits_shape
[
1
]
labels
=
np
.
random
.
randint
(
low
=
0
,
high
=
num_class
-
1
,
size
=
labels_shape
).
astype
(
np
.
int32
)
labels
=
np
.
random
.
randint
(
low
=
0
,
high
=
num_class
-
1
,
size
=
labels_shape
).
astype
(
np
.
int32
)
logits
=
np
.
random
.
rand
(
*
logits_shape
).
astype
(
logits_dtype
)
features
=
logits
features_reshape
=
np
.
reshape
(
features
,
[
-
1
,
num_class
])
...
...
@@ -48,7 +50,7 @@ def np_sparse_softmax_cross_entropy_with_logits(labels_shape, logits_shape, logi
loss
=
-
np
.
sum
(
labels_mat
*
np
.
log
(
probs
+
1.0e-20
),
axis
=
1
)
bp_res
=
np
.
reshape
(
bp
,
features
.
shape
)
loss_res
=
np
.
reshape
(
loss
,
labels
.
shape
)
loss_res
=
np
.
sum
(
loss_res
,
axis
=
0
)
/
loss_res
.
shape
[
0
]
loss_res
=
np
.
sum
(
loss_res
,
axis
=
0
)
/
loss_res
.
shape
[
0
]
return
labels
,
logits
,
loss_res
,
bp_res
...
...
@@ -65,4 +67,6 @@ def test_net():
print
(
loss_me
.
asnumpy
().
flatten
())
print
(
"-------------------------"
)
print
(
expect
)
test_net
()
tests/st/ops/ascend/test_sparse_softmax_cross_entropy_with_logits_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import mindspore.context as context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
is_grad
=
False
):
super
(
Net
,
self
).
__init__
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_AssignAdd.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,11 +20,13 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
"""Net definition"""
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
AssignAdd
=
P
.
AssignAdd
()
...
...
@@ -39,8 +41,8 @@ class Net(nn.Cell):
def
test_net
():
"""test AssignAdd"""
net
=
Net
()
x
=
Tensor
(
np
.
ones
([
1
]).
astype
(
np
.
float32
)
*
100
)
x
=
Tensor
(
np
.
ones
([
1
]).
astype
(
np
.
float32
)
*
100
)
print
(
"MyPrintResult dataX:"
,
x
)
result
=
net
(
x
)
print
(
"MyPrintResult data::"
,
result
.
asnumpy
())
\ No newline at end of file
print
(
"MyPrintResult data::"
,
result
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_AssignSub.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,11 +20,13 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
"""Net definition"""
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
AssignSub
=
P
.
AssignSub
()
...
...
@@ -39,8 +41,8 @@ class Net(nn.Cell):
def
test_net
():
"""test AssignSub"""
net
=
Net
()
x
=
Tensor
(
np
.
ones
([
1
]).
astype
(
np
.
int32
)
*
100
)
x
=
Tensor
(
np
.
ones
([
1
]).
astype
(
np
.
int32
)
*
100
)
print
(
"MyPrintResult dataX:"
,
x
)
result
=
net
(
x
)
print
(
"MyPrintResult data::"
,
result
.
asnumpy
())
\ No newline at end of file
print
(
"MyPrintResult data::"
,
result
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_ReduceMean.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
keep_dims
,
axis
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -31,8 +34,10 @@ class Net(nn.Cell):
def
construct
(
self
,
inputs
):
return
self
.
reduce_mean
(
inputs
,
self
.
axis
)
x1
=
np
.
random
.
randn
(
64
).
astype
(
np
.
float32
)
def
test_net
():
keepdims
=
False
axis
=
-
1
...
...
tests/st/ops/ascend/test_tbe_ops/test_add.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -29,8 +30,9 @@ class Net(nn.Cell):
def
construct
(
self
,
x
,
y
):
return
self
.
add
(
x
,
y
)
x
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
y
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
y
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
def
test_net
():
...
...
tests/st/ops/ascend/test_tbe_ops/test_addn.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,15 +20,19 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
add
=
P
.
AddN
()
def
construct
(
self
,
x
,
y
):
return
self
.
add
((
x
,
y
))
def
test_net
():
x
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
y
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_apply_adam.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,6 +19,7 @@ from mindspore.nn import Dense, SoftmaxCrossEntropyWithLogits
from
mindspore.nn
import
TrainOneStepCell
,
WithLossCell
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
,
impl_type
=
"tbe"
)
context
.
set_context
(
enable_task_sink
=
True
)
...
...
@@ -44,16 +45,16 @@ class Adam:
label
=
Tensor
(
label_np_onehot
)
ms_dense
=
Dense
(
in_channels
=
self
.
input_channels
,
out_channels
=
self
.
output_channels
,
weight_init
=
weight_np
,
bias_init
=
bias
,
has_bias
=
True
)
out_channels
=
self
.
output_channels
,
weight_init
=
weight_np
,
bias_init
=
bias
,
has_bias
=
True
)
criterion
=
SoftmaxCrossEntropyWithLogits
()
optimizer
=
nn
.
Adam
(
ms_dense
.
trainable_params
(),
learning_rate
=
1e-3
,
beta1
=
0.9
,
beta2
=
0.999
,
eps
=
self
.
epsilon
,
use_locking
=
False
,
use_nesterov
=
False
,
weight_decay
=
0.0
,
loss_scale
=
1.0
)
learning_rate
=
1e-3
,
beta1
=
0.9
,
beta2
=
0.999
,
eps
=
self
.
epsilon
,
use_locking
=
False
,
use_nesterov
=
False
,
weight_decay
=
0.0
,
loss_scale
=
1.0
)
net_with_criterion
=
WithLossCell
(
ms_dense
,
criterion
)
train_network
=
TrainOneStepCell
(
net_with_criterion
,
optimizer
)
...
...
@@ -68,5 +69,5 @@ class Adam:
def
test_adam
():
fact
=
Adam
(
batch_num
=
8
,
input_channels
=
20
,
output_channels
=
5
,
epoch
=
5
,
lr
=
0.1
,
weight_decay
=
0.0
,
epsilon
=
1e-8
)
fact
=
Adam
(
batch_num
=
8
,
input_channels
=
20
,
output_channels
=
5
,
epoch
=
5
,
lr
=
0.1
,
weight_decay
=
0.0
,
epsilon
=
1e-8
)
fact
.
train_mindspore_impl
()
tests/st/ops/ascend/test_tbe_ops/test_apply_momentum.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,23 +21,26 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
apply_momentum
=
P
.
ApplyMomentum
(
gradient_scale
=
1024.0
)
self
.
variable
=
Parameter
(
initializer
(
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'variable'
)
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'variable'
)
self
.
accumulation
=
Parameter
(
initializer
(
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'accumulation'
)
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'accumulation'
)
self
.
learning_rate
=
Parameter
(
initializer
(
'normal'
,
[
1
,
]),
name
=
'learning_rate'
)
'normal'
,
[
1
,
]),
name
=
'learning_rate'
)
self
.
gradient
=
Parameter
(
initializer
(
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'gradient'
)
'normal'
,
[
2
,
3
,
3
,
4
]),
name
=
'gradient'
)
self
.
momentum
=
Parameter
(
initializer
(
'normal'
,
[
1
,
]),
name
=
'momentum'
)
'normal'
,
[
1
,
]),
name
=
'momentum'
)
def
construct
(
self
):
return
self
.
apply_momentum
(
self
.
variable
,
self
.
accumulation
,
self
.
learning_rate
,
self
.
gradient
,
self
.
momentum
)
def
test_net
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
apply_momentum
=
Net
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_batchmatmul.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,8 +19,10 @@ from mindspore.nn import Cell
from
mindspore.train.model
import
Model
import
pytest
from
mindspore
import
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,17 +32,20 @@ class Net(Cell):
x
=
self
.
batchmatmul
(
inputa
,
inputb
)
return
x
def
tf_me_batchmatmul
(
inputa
,
inputb
):
net
=
Net
()
net
.
set_train
()
model
=
Model
(
net
)
out_me
=
model
.
predict
(
Tensor
(
inputa
),
Tensor
(
inputb
))
def
test_batchmatmul_normal_shape1
():
inputa
=
np
.
random
.
randn
(
128
,
16
,
128
).
astype
(
np
.
float32
)
inputb
=
np
.
random
.
randn
(
128
,
128
,
64
).
astype
(
np
.
float32
)
tf_me_batchmatmul
(
Tensor
(
inputa
),
Tensor
(
inputb
))
def
test_batchmatmul_normal_shape2
():
inputa
=
np
.
random
.
randn
(
1
,
16
,
128
,
128
).
astype
(
np
.
float32
)
inputb
=
np
.
random
.
randn
(
1
,
16
,
128
,
64
).
astype
(
np
.
float32
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_batchnorm.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -35,7 +36,7 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
# mean = np.random.randn(1,16,1,1).astype(np.float32)
# variance = np.random.randn(1,16,1,1).astype(np.float32)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
...
...
@@ -55,4 +56,3 @@ def test_net():
print
(
"***********output y*********"
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,11 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
#context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
# context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -48,7 +51,7 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
1
,
64
,
112
,
112
).
astype
(
np
.
float32
)
net
=
Grad
(
Net
())
output
=
net
(
Tensor
(
x
),
Tensor
(
sens
))
tests/st/ops/ascend/test_tbe_ops/test_bias_add.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,11 +20,13 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
"""Net definition"""
def
__init__
(
self
,
output_channels
,
bias_init
=
'zeros'
,
...
...
@@ -51,4 +53,3 @@ def test_compile():
# enable it when staging function is ready
output
=
net
(
input_data
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_bias_add_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,7 +21,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -31,6 +34,7 @@ class Net(nn.Cell):
def
construct
(
self
,
dout
):
return
self
.
bias_add_grad
(
dout
)
def
test_net
():
dout
=
np
.
random
.
rand
(
1
,
1001
).
astype
(
np
.
float32
)
bias_add_grad
=
Net
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_concat.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,11 +20,12 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
cat
=
P
.
Concat
(
axis
=
1
)
...
...
@@ -46,4 +47,4 @@ def test_net():
print
(
np
.
arange
(
2
*
2
).
reshape
(
2
,
2
))
print
(
np
.
arange
(
2
*
3
).
reshape
(
2
,
3
))
print
(
output
)
assert
(
output
.
asnumpy
()
==
expect
).
all
()
assert
(
output
.
asnumpy
()
==
expect
).
all
()
tests/st/ops/ascend/test_tbe_ops/test_conv.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,31 +21,30 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
out_channel
=
64
kernel_size
=
7
self
.
conv
=
P
.
Conv2D
(
out_channel
,
kernel_size
,
mode
=
1
,
pad_mode
=
"valid"
,
pad
=
0
,
stride
=
1
,
dilation
=
1
,
group
=
1
)
kernel_size
,
mode
=
1
,
pad_mode
=
"valid"
,
pad
=
0
,
stride
=
1
,
dilation
=
1
,
group
=
1
)
self
.
w
=
Parameter
(
initializer
(
'normal'
,
[
64
,
3
,
7
,
7
]),
name
=
'w'
)
'normal'
,
[
64
,
3
,
7
,
7
]),
name
=
'w'
)
@
ms_function
def
construct
(
self
,
x
):
return
self
.
conv
(
x
,
self
.
w
)
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
3
,
224
,
224
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
32
,
3
,
224
,
224
).
astype
(
np
.
float32
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
conv
=
Net
()
output
=
conv
(
Tensor
(
x
))
...
...
tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_filter.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
'Ascend'
)
...
...
@@ -37,19 +38,21 @@ class Net(nn.Cell):
stride
=
1
,
dilation
=
1
,
group
=
1
)
self
.
w
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
3
,
3
]),
name
=
'w'
)
self
.
w
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
3
,
3
]),
name
=
'w'
)
self
.
x
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[
[
3
,
0
,
1
,
2
,
7
,
4
],
[
1
,
5
,
8
,
9
,
3
,
1
],
[
2
,
7
,
2
,
5
,
1
,
3
],
[
0
,
1
,
3
,
1
,
7
,
8
],
[
4
,
2
,
1
,
6
,
2
,
8
],
[
2
,
4
,
5
,
2
,
3
,
9
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
6
,
6
]),
name
=
'x'
)
[
2
,
4
,
5
,
2
,
3
,
9
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
6
,
6
]),
name
=
'x'
)
self
.
out
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[
[
-
5
,
-
4
,
0
,
8
],
[
-
10
,
-
2
,
2
,
3
],
[
0
,
-
2
,
-
4
,
-
7
],
[
-
3
,
-
2
,
-
3
,
-
16
]]]]).
astype
(
np
.
float32
)),[
1
,
1
,
4
,
4
]),
name
=
'y'
)
[
-
5
,
-
4
,
0
,
8
],
[
-
10
,
-
2
,
2
,
3
],
[
0
,
-
2
,
-
4
,
-
7
],
[
-
3
,
-
2
,
-
3
,
-
16
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
4
,
4
]),
name
=
'y'
)
self
.
get_shape
=
P
.
Shape
()
@
ms_function
...
...
@@ -67,7 +70,7 @@ def test_conv2d_backprop_filter():
[-104, -211, -322]
[-102, -144, -248]]]]
"""
expect
=
np
.
array
([[[[
-
60
,
-
142
,
-
265
],
expect
=
np
.
array
([[[[
-
60
,
-
142
,
-
265
],
[
-
104
,
-
211
,
-
322
],
[
-
102
,
-
144
,
-
248
]]]]).
astype
(
np
.
float32
)
print
(
output
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_conv2d_backprop_input.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,6 +20,7 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
...
...
@@ -36,19 +37,21 @@ class Net(nn.Cell):
stride
=
1
,
dilation
=
1
,
group
=
1
)
self
.
w
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
3
,
3
]),
name
=
'w'
)
self
.
w
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
],
[
1
,
0
,
-
1
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
3
,
3
]),
name
=
'w'
)
self
.
x
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[
[
3
,
0
,
1
,
2
,
7
,
4
],
[
1
,
5
,
8
,
9
,
3
,
1
],
[
2
,
7
,
2
,
5
,
1
,
3
],
[
0
,
1
,
3
,
1
,
7
,
8
],
[
4
,
2
,
1
,
6
,
2
,
8
],
[
2
,
4
,
5
,
2
,
3
,
9
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
6
,
6
]),
name
=
'x'
)
[
2
,
4
,
5
,
2
,
3
,
9
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
6
,
6
]),
name
=
'x'
)
self
.
out
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[
[
-
5
,
-
4
,
0
,
8
],
[
-
10
,
-
2
,
2
,
3
],
[
0
,
-
2
,
-
4
,
-
7
],
[
-
3
,
-
2
,
-
3
,
-
16
]]]]).
astype
(
np
.
float32
)),[
1
,
1
,
4
,
4
]),
name
=
'y'
)
[
-
5
,
-
4
,
0
,
8
],
[
-
10
,
-
2
,
2
,
3
],
[
0
,
-
2
,
-
4
,
-
7
],
[
-
3
,
-
2
,
-
3
,
-
16
]]]]).
astype
(
np
.
float32
)),
[
1
,
1
,
4
,
4
]),
name
=
'y'
)
self
.
get_shape
=
P
.
Shape
()
@
ms_function
...
...
@@ -69,11 +72,11 @@ def test_conv2d_backprop_input():
[ -3, -4, -4, -19, 7, 23]
[ -3, -2, 0, -14, 3, 16]]]]
"""
expect
=
np
.
array
([[[[
-
5
,
-
4
,
5
,
12
,
0
,
-
8
],
[
-
15
,
-
6
,
17
,
17
,
-
2
,
-
11
],
[
-
15
,
-
8
,
13
,
12
,
2
,
-
4
],
[
-
13
,
-
6
,
8
,
-
14
,
5
,
20
],
[
-
3
,
-
4
,
-
4
,
-
19
,
7
,
23
],
[
-
3
,
-
2
,
0
,
-
14
,
3
,
16
]]]]).
astype
(
np
.
float32
)
expect
=
np
.
array
([[[[
-
5
,
-
4
,
5
,
12
,
0
,
-
8
],
[
-
15
,
-
6
,
17
,
17
,
-
2
,
-
11
],
[
-
15
,
-
8
,
13
,
12
,
2
,
-
4
],
[
-
13
,
-
6
,
8
,
-
14
,
5
,
20
],
[
-
3
,
-
4
,
-
4
,
-
19
,
7
,
23
],
[
-
3
,
-
2
,
0
,
-
14
,
3
,
16
]]]]).
astype
(
np
.
float32
)
print
(
output
)
assert
(
output
.
asnumpy
()
==
expect
).
all
()
tests/st/ops/ascend/test_tbe_ops/test_dropout_do_mask.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,9 +20,11 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
from
mindspore
import
log
as
logger
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -33,7 +35,7 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
2
,
5
,
8
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
2
,
5
,
8
).
astype
(
np
.
float32
)
mask
=
np
.
random
.
randn
(
16
).
astype
(
np
.
uint8
)
keep_prob
=
1
...
...
@@ -48,4 +50,3 @@ def test_net():
logger
.
info
(
"***********output y*********"
)
logger
.
info
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_gelu.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import math
import
pytest
from
mindspore
import
context
from
mindspore
import
log
as
logger
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
...
...
@@ -52,6 +53,7 @@ def test_gelu_input_dim_0():
with
pytest
.
raises
(
ValueError
):
gelu_forward_cmp
(
input_shape
)
def
test_gelu_input_dim_10240_1024
():
input_shape
=
[
10240
,
1024
]
gelu_forward_cmp
(
input_shape
)
...
...
@@ -96,6 +98,7 @@ def test_gelu_input_dim_128_4096():
input_shape
=
[
128
,
4096
]
gelu_forward_cmp
(
input_shape
)
@
pytest
.
mark
.
lower_bs
def
test_gelu_input_dim_160_1024
():
input_shape
=
[
160
,
1024
]
...
...
tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py
浏览文件 @
2bc3fcb1
...
...
@@ -25,6 +25,7 @@ from mindspore import log as logger
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Grad
(
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -55,6 +56,7 @@ def gelu_backward_cmp(input_shape):
logger
.
info
(
"---------me--------"
)
logger
.
info
(
output_grad_me
)
# ---------- LARGE INPUT ---------------
class
MEGeluLargeIn
(
Cell
):
...
...
@@ -67,6 +69,7 @@ class MEGeluLargeIn(Cell):
x
=
self
.
matmul
(
x1
,
x2
)
return
self
.
gelu
(
x
)
class
GradLargeIn
(
Cell
):
def
__init__
(
self
,
network
):
super
(
GradLargeIn
,
self
).
__init__
()
...
...
@@ -86,5 +89,5 @@ def gelu_backward_me_large_in_impl(x1, x2, output_grad):
def
test_grad_gelu_input_10240_1024
():
input_shape
=
[
10240
,
1024
]
input_shape
=
[
10240
,
1024
]
gelu_backward_cmp
(
input_shape
)
tests/st/ops/ascend/test_tbe_ops/test_greater.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,8 +20,10 @@ from mindspore.common.tensor import Tensor
from
mindspore.train.model
import
Model
from
mindspore
import
log
as
logger
from
mindspore
import
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Greater
(
Cell
):
def
__init__
(
self
):
super
(
Greater
,
self
).
__init__
()
...
...
@@ -30,6 +32,7 @@ class Greater(Cell):
def
construct
(
self
,
inputa
,
inputb
):
return
self
.
greater
(
inputa
,
inputb
)
def
me_greater
(
inputa
,
inputb
):
net
=
Greater
()
net
.
set_train
()
...
...
@@ -42,10 +45,11 @@ def me_greater(inputa, inputb):
logger
.
info
(
inputb
)
return
out
.
asnumpy
()
@
pytest
.
mark
.
ssd_tbe
def
test_greater_2d_scalar0
():
a
=
np
.
random
.
randint
(
-
5
,
5
,
[
8
,
32
]).
astype
(
np
.
int32
)
b
=
np
.
random
.
randint
(
-
5
,
5
,
[
8
,
32
]).
astype
(
np
.
int32
)
out_me
=
me_greater
(
Tensor
(
a
),
Tensor
(
b
))
logger
.
info
(
"Check me result:"
)
logger
.
info
(
out_me
)
\ No newline at end of file
logger
.
info
(
out_me
)
tests/st/ops/ascend/test_tbe_ops/test_layernorm.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,8 +20,10 @@ from mindspore.train.model import Model
from
mindspore
import
log
as
logger
import
pytest
from
mindspore
import
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
Cell
):
def
__init__
(
self
,
input_shape
,
begin_norm_axis
,
begin_params_axis
,
gamma
,
beta
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -31,6 +33,7 @@ class Net(Cell):
x
=
self
.
layernorm
(
input
)
return
x
def
pt_me_layernorm
(
input_data
,
normalized_shape
,
gamma
,
beta
,
axis
):
net
=
Net
(
normalized_shape
,
begin_norm_axis
=
axis
,
begin_params_axis
=
axis
,
...
...
@@ -42,6 +45,7 @@ def pt_me_layernorm(input_data, normalized_shape, gamma, beta, axis):
logger
.
info
(
"Check me result:"
)
logger
.
info
(
out_me
.
asnumpy
())
@
pytest
.
mark
.
lower_bs
def
test_normal_layernorm_1_128_1024_axis_2
():
"""
...
...
@@ -52,4 +56,4 @@ def test_normal_layernorm_1_128_1024_axis_2():
gamma
.
fill
(
1.1
)
beta
=
np
.
random
.
randn
(
1024
).
astype
(
np
.
float32
)
beta
.
fill
(
0.1
)
pt_me_layernorm
(
input_data
,
(
1024
,
),
gamma
,
beta
,
2
)
pt_me_layernorm
(
input_data
,
(
1024
,),
gamma
,
beta
,
2
)
tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,18 +19,21 @@ from mindspore.nn import Cell
from
mindspore.ops.composite
import
GradOperation
from
mindspore
import
log
as
logger
from
mindspore
import
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Grad
(
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
self
.
grad
=
GradOperation
(
name
=
"get_all"
,
get_all
=
True
,
sens_param
=
True
)
self
.
network
=
network
def
construct
(
self
,
input
,
output_grad
,):
def
construct
(
self
,
input
,
output_grad
,
):
gout
=
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
return
gout
class
Net
(
Cell
):
def
__init__
(
self
,
input_shape
,
begin_norm_axis
,
begin_params_axis
,
gamma
,
beta
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -40,6 +43,7 @@ class Net(Cell):
x
=
self
.
layernorm
(
input
)
return
x
def
py_me_layernorm_grad
(
input_data
,
normalized_shape
,
gamma
,
beta
,
axis
,
gradients
):
input_me
=
Tensor
(
input_data
)
net_me
=
Grad
(
Net
(
normalized_shape
,
begin_norm_axis
=
axis
,
...
...
@@ -52,6 +56,7 @@ def py_me_layernorm_grad(input_data, normalized_shape, gamma, beta, axis, gradie
logger
.
info
(
"Check me result:"
)
logger
.
info
(
out_grad
.
asnumpy
())
def
test_normal_layernorm_grad_normalize_2d
():
"""
1 input[1, 128, 1024],normalized_shape=[1024],element_affine=False
...
...
tests/st/ops/ascend/test_tbe_ops/test_less.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,8 +31,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
less
(
x1
,
x2
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
def
test_net
():
less
=
Net
()
...
...
@@ -37,4 +42,3 @@ def test_net():
print
(
x1
)
print
(
x2
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_less_equal.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,8 +31,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
less_equal
(
x1
,
x2
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float16
)
def
test_net
():
less_equal
=
Net
()
...
...
@@ -37,4 +42,3 @@ def test_net():
print
(
x1
)
print
(
x2
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_logical_and.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,12 +31,14 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
logical_and
(
x1
,
x2
)
x1
=
[
True
,
True
,
False
,
False
,
True
,
True
,
False
,
False
]
x2
=
[
True
,
False
,
False
,
True
,
True
,
False
,
False
,
True
]
def
test_net
():
logical_and
=
Net
()
output
=
logical_and
(
Tensor
(
x1
),
Tensor
(
x2
))
print
(
x1
)
print
(
x2
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_logical_not.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,11 +31,12 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
):
return
self
.
logical_not
(
x1
)
x1
=
[
True
,
True
,
False
,
False
,
True
,
True
,
False
,
False
]
def
test_net
():
logical_not
=
Net
()
output
=
logical_not
(
Tensor
(
x1
))
print
(
x1
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_logical_or.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,12 +31,14 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
logical_or
(
x1
,
x2
)
x1
=
[
True
,
True
,
False
,
False
,
True
,
True
,
False
,
False
]
x2
=
[
True
,
False
,
False
,
True
,
True
,
False
,
False
,
True
]
def
test_net
():
logical_or
=
Net
()
output
=
logical_or
(
Tensor
(
x1
),
Tensor
(
x2
))
print
(
x1
)
print
(
x2
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_matmul.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,8 +31,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
matmul
(
x1
,
x2
)
x1
=
np
.
random
.
randn
(
1
,
3
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x1
=
np
.
random
.
randn
(
1
,
3
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
def
test_net
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_matmul_failed.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,8 +33,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
matmul
(
x1
,
x2
)
x1
=
np
.
random
.
randn
(
10
,
1
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
100
,
1
).
astype
(
np
.
float32
)
x1
=
np
.
random
.
randn
(
10
,
1
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
100
,
1
).
astype
(
np
.
float32
)
def
test_net
():
matmul
=
Net
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_maximum.py
浏览文件 @
2bc3fcb1
...
...
@@ -22,14 +22,16 @@ from mindspore.ops import operations as P
context
.
set_context
(
device_target
=
"Ascend"
)
class
Max
(
nn
.
Cell
):
def
__init__
(
self
,
dtype
):
def
__init__
(
self
,
dtype
):
super
(
Max
,
self
).
__init__
()
self
.
max
=
P
.
Maximum
()
def
construct
(
self
,
inputa
,
inputb
):
return
self
.
max
(
inputa
,
inputb
)
def
me_max
(
inputa
,
inputb
,
dtype
=
ms
.
float32
):
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
net
=
Max
(
dtype
)
...
...
@@ -44,14 +46,16 @@ def me_max(inputa, inputb, dtype=ms.float32):
print
(
out
)
return
out
.
asnumpy
()
def
cmp_max
(
a
,
b
):
def
cmp_max
(
a
,
b
):
out
=
np
.
maximum
(
a
,
b
)
out_ms
=
me_max
(
a
,
b
)
print
(
"-------ms------"
)
print
(
"numpy out :{}"
.
format
(
out
))
print
(
"ms out :{}"
.
format
(
out_ms
))
def
test_maximum_2_2
():
a
=
np
.
random
.
randn
(
2
,
2
).
astype
(
np
.
float32
)
b
=
np
.
random
.
randn
(
2
,
2
).
astype
(
np
.
float32
)
cmp_max
(
a
,
b
)
\ No newline at end of file
cmp_max
(
a
,
b
)
tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -22,6 +22,7 @@ from mindspore.ops import operations as P
context
.
set_context
(
device_target
=
"Ascend"
)
grad
=
C
.
GradOperation
(
'get_all'
,
get_all
=
True
,
sens_param
=
True
)
class
MaxNetMe
(
Cell
):
def
__init__
(
self
):
super
(
MaxNetMe
,
self
).
__init__
()
...
...
@@ -31,6 +32,7 @@ class MaxNetMe(Cell):
x
=
self
.
max
(
inputA
,
inputB
)
return
x
class
GradWrap
(
Cell
):
def
__init__
(
self
,
network
):
super
(
GradWrap
,
self
).
__init__
()
...
...
@@ -40,6 +42,7 @@ class GradWrap(Cell):
gout
=
grad
(
self
.
network
)(
inputA
,
inputB
,
sens
)
return
gout
def
gen_data
(
inputA_np
,
inputB_np
,
grad
=
None
):
inputA_me
=
inputA_np
if
isinstance
(
inputA_np
,
np
.
ndarray
)
==
True
:
...
...
@@ -61,7 +64,8 @@ def gen_data(inputA_np, inputB_np, grad=None):
print
(
output
[
0
].
asnumpy
())
print
(
output
[
1
].
asnumpy
())
def
test_net
():
inputA_np
=
np
.
random
.
randn
(
1
,
3
,
2
,
2
).
astype
(
np
.
float32
)
inputB_np
=
np
.
random
.
randn
(
1
,
3
,
2
,
2
).
astype
(
np
.
float32
)
gen_data
(
inputA_np
,
inputB_np
)
\ No newline at end of file
gen_data
(
inputA_np
,
inputB_np
)
tests/st/ops/ascend/test_tbe_ops/test_maxpool.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,12 +19,12 @@ from mindspore.common.api import ms_function
import
numpy
as
np
import
mindspore.context
as
context
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
maxpool
=
P
.
MaxPool
(
padding
=
"SAME"
,
ksize
=
3
,
strides
=
2
)
@
ms_function
def
construct
(
self
,
x
):
output
=
self
.
maxpool
(
x
)
...
...
@@ -32,7 +32,7 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
32
,
64
,
112
,
112
).
astype
(
np
.
float16
)
x
=
np
.
random
.
randn
(
32
,
64
,
112
,
112
).
astype
(
np
.
float16
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
maxpool
=
Net
()
output
=
maxpool
(
Tensor
(
x
))
...
...
tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,6 +19,7 @@ from mindspore.common.api import ms_function
import
numpy
as
np
import
mindspore.context
as
context
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_minimum.py
浏览文件 @
2bc3fcb1
...
...
@@ -22,7 +22,10 @@ from mindspore.common.initializer import initializer
from
mindspore.common.parameter
import
Parameter
import
mindspore
as
ms
from
mindspore.train.model
import
Model
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Min
(
nn
.
Cell
):
def
__init__
(
self
,
dtype
):
super
(
Min
,
self
).
__init__
()
...
...
@@ -46,7 +49,8 @@ def me_min(inputa, inputb, dtype=ms.float32):
print
(
out
)
return
out
.
asnumpy
()
def
cmp_min
(
a
,
b
):
def
cmp_min
(
a
,
b
):
print
(
a
)
print
(
b
)
...
...
@@ -55,8 +59,8 @@ def cmp_min(a,b):
out_me
=
me_min
(
a
,
b
)
print
(
out_me
)
def
test_minimum_2_2
():
a
=
np
.
random
.
randn
(
2
,
2
,
1
,
1
).
astype
(
np
.
float32
)
b
=
np
.
random
.
randn
(
2
,
2
,
1
,
1
).
astype
(
np
.
float32
)
cmp_min
(
a
,
b
)
cmp_min
(
a
,
b
)
tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -22,6 +22,8 @@ from mindspore.ops.operations import Minimum
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
grad
=
C
.
GradOperation
(
'get_all'
,
get_all
=
True
,
sens_param
=
True
)
class
MinNetMe
(
Cell
):
def
__init__
(
self
):
super
(
MinNetMe
,
self
).
__init__
()
...
...
@@ -41,6 +43,7 @@ class GradWrap(Cell):
gout
=
grad
(
self
.
network
)(
inputA
,
inputB
,
sens
)
return
gout
def
gen_data
(
inputA_np
,
inputB_np
,
grad
=
None
):
inputA_me
=
inputA_np
if
isinstance
(
inputA_np
,
np
.
ndarray
)
==
True
:
...
...
@@ -51,7 +54,7 @@ def gen_data(inputA_np, inputB_np, grad=None):
inputB_me
=
Tensor
(
inputB_np
)
if
grad
is
None
:
grad
=
np
.
random
.
randn
(
1
,
3
,
2
,
2
).
astype
(
np
.
float32
)
grad
=
np
.
random
.
randn
(
1
,
3
,
2
,
2
).
astype
(
np
.
float32
)
print
(
inputA_np
)
print
(
inputB_np
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_mul.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,8 +31,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
mul
(
x1
,
x2
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
def
test_net
():
mul
=
Net
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_npu_alloc_float_status.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,8 +31,8 @@ class Net(nn.Cell):
def
construct
(
self
):
return
self
.
npu_alloc_float_status
()
def
test_net
():
npu_alloc_float_status
=
Net
()
output
=
npu_alloc_float_status
()
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_npu_clear_float_status.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,11 +31,12 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
):
return
self
.
npu_clear_float_status
(
x1
)
x1
=
np
.
random
.
randn
(
8
).
astype
(
np
.
float32
)
def
test_net
():
npu_clear_float_status
=
Net
()
output
=
npu_clear_float_status
(
Tensor
(
x1
))
print
(
x1
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_npu_get_float_status.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,11 +31,12 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
):
return
self
.
npu_get_float_status
(
x1
)
x1
=
np
.
random
.
randn
(
8
).
astype
(
np
.
float32
)
def
test_net
():
npu_get_float_status
=
Net
()
output
=
npu_get_float_status
(
Tensor
(
x1
))
print
(
x1
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_pad.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,21 +18,24 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
pad
=
P
.
Pad
(
paddings
=
((
3
,
2
),
(
2
,
3
)))
self
.
pad
=
P
.
Pad
(
paddings
=
((
3
,
2
),
(
2
,
3
)))
@
ms_function
def
construct
(
self
,
x
):
x
=
self
.
pad
(
x
)
return
x
x
=
np
.
random
.
random
(
size
=
(
2
,
2
)).
astype
(
np
.
float32
)
def
test_net
():
pad
=
Net
()
output
=
pad
(
Tensor
(
x
))
...
...
tests/st/ops/ascend/test_tbe_ops/test_pow.py
浏览文件 @
2bc3fcb1
...
...
@@ -23,8 +23,10 @@ from mindspore.common.initializer import initializer
from
mindspore.common.parameter
import
Parameter
import
mindspore
as
ms
from
mindspore.train.model
import
Model
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
PowMe
(
Cell
):
def
__init__
(
self
):
super
(
PowMe
,
self
).
__init__
()
...
...
@@ -33,6 +35,7 @@ class PowMe(Cell):
def
construct
(
self
,
input
,
exp
):
return
self
.
pow
(
input
,
exp
)
def
pow_forward_me_impl
(
input
,
exp
):
n
=
PowMe
()
n
.
set_train
()
...
...
@@ -40,6 +43,7 @@ def pow_forward_me_impl(input, exp):
out
=
m
.
predict
(
input
,
exp
)
return
out
.
asnumpy
()
def
pow_forward_cmp
(
input_shape
,
exp_shape
):
if
len
(
input_shape
)
==
0
:
input_np
=
np
.
absolute
(
np
.
random
.
randn
())
...
...
@@ -54,14 +58,14 @@ def pow_forward_cmp(input_shape, exp_shape):
exp_np
=
np
.
absolute
(
np
.
random
.
randn
(
*
exp_shape
).
astype
(
np
.
float32
))
exp_tf
=
exp_np
exp_me
=
Tensor
(
exp_np
,
dtype
=
ms
.
float32
)
out_me
=
pow_forward_me_impl
(
input_me
,
exp_me
)
print
(
input_me
)
print
(
exp_me
)
print
(
out_me
)
def
test_pow_input_scalar_exp_scalar
():
input_shape
=
[]
exp_shape
=
[]
pow_forward_cmp
(
input_shape
,
exp_shape
)
tests/st/ops/ascend/test_tbe_ops/test_realdiv.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,8 +31,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
,
x2
):
return
self
.
realdiv
(
x1
,
x2
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
x2
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
def
test_net
():
realdiv
=
Net
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_reciprocal.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,7 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -28,11 +31,12 @@ class Net(nn.Cell):
def
construct
(
self
,
x1
):
return
self
.
reciprocal
(
x1
)
x1
=
np
.
random
.
randn
(
3
,
4
).
astype
(
np
.
float32
)
def
test_net
():
reciprocal
=
Net
()
output
=
reciprocal
(
Tensor
(
x1
))
print
(
x1
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_relu.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,8 +33,9 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
relu
(
x
)
def
test_net
():
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
relu
=
Net
()
output
=
relu
(
Tensor
(
x
))
print
(
x
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,10 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,6 +35,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
,
output_grad
):
return
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -41,9 +44,10 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
relu
(
x
)
def
test_net
():
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
sens
=
np
.
random
.
randn
(
2
,
3
,
3
,
4
).
astype
(
np
.
float32
)
net
=
Grad
(
Net
())
output
=
net
(
Tensor
(
x
),
Tensor
(
sens
))
print
(
len
(
output
))
...
...
tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,8 +21,10 @@ import mindspore.context as context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
mindspore.ops.composite
import
GradOperation
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -33,6 +35,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
input
):
return
self
.
grad
(
self
.
network
)(
input
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -41,8 +44,9 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
relu_v2
(
x
)
def
test_net
():
x
=
Tensor
(
np
.
ones
((
2
,
3
,
3
,
4
)).
astype
(
np
.
float32
))
x
=
Tensor
(
np
.
ones
((
2
,
3
,
3
,
4
)).
astype
(
np
.
float32
))
relu_net
=
Net
()
relu_output
=
relu_net
(
x
)
net
=
Grad
(
Net
())
...
...
tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,8 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -29,6 +31,7 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
upsample
(
x
)
def
test_net
():
x
=
np
.
random
.
random
(
size
=
(
32
,
3
,
32
,
32
)).
astype
(
np
.
float32
)
upsample
=
Net
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
...
...
@@ -49,4 +50,4 @@ def test_net():
grad
=
Grad
(
Net
())
output
=
grad
(
Tensor
(
image
),
Tensor
(
grads
))
print
(
"=================output===================="
)
print
(
output
)
\ No newline at end of file
print
(
output
)
tests/st/ops/ascend/test_tbe_ops/test_scatter_nd.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,6 +20,7 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
...
...
@@ -29,12 +30,13 @@ class Net(nn.Cell):
self
.
scatternd
=
P
.
ScatterNd
()
def
construct
(
self
,
indices
,
update
):
return
self
.
scatternd
(
indices
,
update
,
(
3
,
3
))
return
self
.
scatternd
(
indices
,
update
,
(
3
,
3
))
indices
=
np
.
array
([[
0
,
1
],
[
1
,
1
]]).
astype
(
np
.
int32
)
update
=
np
.
array
([
3.2
,
1.1
]).
astype
(
np
.
float32
)
def
test_net
():
scatternd
=
Net
()
print
(
indices
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_select.py
浏览文件 @
2bc3fcb1
...
...
@@ -23,7 +23,10 @@ from mindspore.common.initializer import initializer
from
mindspore.common.parameter
import
Parameter
import
mindspore
as
ms
from
mindspore.train.model
import
Model
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Select
(
Cell
):
def
__init__
(
self
,
dtype
):
super
(
Select
,
self
).
__init__
()
...
...
@@ -32,6 +35,7 @@ class Select(Cell):
def
construct
(
self
,
cond
,
inputa
,
inputb
):
return
self
.
select
(
cond
,
inputa
,
inputb
)
def
me_select
(
cond
,
inputa
,
inputb
,
dtype
=
ms
.
float32
):
net
=
Select
(
dtype
)
net
.
set_train
()
...
...
@@ -45,9 +49,10 @@ def me_select(cond, inputa, inputb, dtype=ms.float32):
out
=
model
.
predict
(
Tensor
(
cond
),
inputa
,
inputb
)
return
out
.
asnumpy
()
def
cmp_select
(
input_cond
,
inputa
,
inputb
):
cond
=
input_cond
>
0.5
def
cmp_select
(
input_cond
,
inputa
,
inputb
):
cond
=
input_cond
>
0.5
out_me
=
me_select
(
cond
,
inputa
,
inputb
)
print
(
input_cond
)
print
(
cond
)
...
...
@@ -55,9 +60,9 @@ def cmp_select(input_cond,inputa,inputb):
print
(
inputb
)
print
(
out_me
)
def
test_select_2_2
():
input_cond
=
np
.
random
.
rand
(
2
,
2
)
inputa
=
np
.
random
.
randn
(
2
,
2
).
astype
(
np
.
float32
)
inputb
=
np
.
random
.
randn
(
2
,
2
).
astype
(
np
.
float32
)
cmp_select
(
input_cond
,
inputa
,
inputb
)
inputa
=
np
.
random
.
randn
(
2
,
2
).
astype
(
np
.
float32
)
inputb
=
np
.
random
.
randn
(
2
,
2
).
astype
(
np
.
float32
)
cmp_select
(
input_cond
,
inputa
,
inputb
)
tests/st/ops/ascend/test_tbe_ops/test_sigmoid.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,8 +18,10 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -29,6 +31,7 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
sigmoid
(
x
)
def
test_net
():
x
=
np
.
random
.
random
(
size
=
(
2
,
3
)).
astype
(
np
.
float32
)
sigmoid
=
Net
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ import mindspore.context as context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -22,6 +22,7 @@ import mindspore.context as context
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,6 +19,7 @@ from mindspore.ops.composite import GradOperation
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
...
...
@@ -42,6 +43,7 @@ class Grad(nn.Cell):
def
construct
(
self
,
x
,
y
):
return
self
.
grad
(
self
.
network
)(
x
,
y
)
def
test_net
():
x
=
np
.
random
.
random
(
size
=
(
2
,
3
,
4
,
5
,
6
)).
astype
(
np
.
float32
)
y
=
np
.
random
.
random
(
size
=
(
2
,
3
,
4
,
5
,
6
)).
astype
(
np
.
float32
)
...
...
@@ -49,4 +51,3 @@ def test_net():
output
=
net
(
Tensor
(
x
),
Tensor
(
y
))
print
(
"=================output===================="
)
print
(
output
.
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_slice.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,26 +20,28 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Slice
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Slice
,
self
).
__init__
()
self
.
cat
=
P
.
Slice
()
self
.
x1
=
Parameter
(
initializer
(
Tensor
(
np
.
array
([[[
1
,
-
1
,
1
],
[
2
,
-
2
,
2
]],
[[
3
,
-
3
,
3
],
[
4
,
-
4
,
4
]],
[[
5
,
-
5
,
5
],
[
6
,
-
6
,
6
]]]).
astype
(
np
.
float32
)),
[
3
,
2
,
3
]),
name
=
'x1'
)
Tensor
(
np
.
array
([[[
1
,
-
1
,
1
],
[
2
,
-
2
,
2
]],
[[
3
,
-
3
,
3
],
[
4
,
-
4
,
4
]],
[[
5
,
-
5
,
5
],
[
6
,
-
6
,
6
]]]).
astype
(
np
.
float32
)),
[
3
,
2
,
3
]),
name
=
'x1'
)
@
ms_function
def
construct
(
self
):
return
self
.
cat
(
self
.
x1
,
(
0
,
1
,
0
),
(
2
,
1
,
3
))
return
self
.
cat
(
self
.
x1
,
(
0
,
1
,
0
),
(
2
,
1
,
3
))
def
test_slice
():
cat
=
Slice
()
output
=
cat
()
expect
=
[[[
2.
,
-
2.
,
2.
]],
[[
4.
,
-
4.
,
4.
]]]
expect
=
[[[
2.
,
-
2.
,
2.
]],
[[
4.
,
-
4.
,
4.
]]]
print
(
output
)
assert
(
output
.
asnumpy
()
==
expect
).
all
()
\ No newline at end of file
assert
(
output
.
asnumpy
()
==
expect
).
all
()
tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,6 +18,7 @@ import mindspore.nn as nn
import
mindspore.context
as
context
from
mindspore
import
Tensor
from
mindspore.ops
import
operations
as
P
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -31,6 +31,7 @@ class Net(nn.Cell):
def
construct
(
self
,
pred
,
gt
):
return
self
.
SmoothL1Loss
(
pred
,
gt
)
class
Grad
(
nn
.
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
tests/st/ops/ascend/test_tbe_ops/test_softmax.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,17 +20,22 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
Softmax
=
P
.
Softmax
()
def
construct
(
self
,
x
):
return
self
.
Softmax
(
x
)
x
=
np
.
array
([[
5
,
1
]]).
astype
(
np
.
float32
)
def
test_net
():
softmax
=
Net
()
output
=
softmax
(
Tensor
(
x
))
...
...
tests/st/ops/ascend/test_tbe_ops/test_softmax_cross_entropy_with_logits.py
浏览文件 @
2bc3fcb1
...
...
@@ -18,6 +18,7 @@ import mindspore.nn as nn
from
mindspore.common.api
import
ms_function
import
numpy
as
np
import
mindspore.context
as
context
context
.
set_context
(
device_target
=
"Ascend"
)
...
...
@@ -36,4 +37,4 @@ def test_net():
labels
=
np
.
random
.
randn
(
32
,
1001
).
astype
(
np
.
float16
)
SoftmaxCrossEntropyWithLogits
=
Net
()
output
=
SoftmaxCrossEntropyWithLogits
(
Tensor
(
features
),
Tensor
(
labels
))
#print(output.asnumpy())
#
print(output.asnumpy())
tests/st/ops/ascend/test_tbe_ops/test_split.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -29,7 +32,8 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
split
(
x
)
x
=
np
.
random
.
randn
(
2
,
4
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
2
,
4
).
astype
(
np
.
float32
)
def
test_net
():
...
...
tests/st/ops/ascend/test_tbe_ops/test_sqrt.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,17 +20,22 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
sqrt
=
P
.
Sqrt
()
def
construct
(
self
,
x
):
return
self
.
sqrt
(
x
)
x
=
np
.
array
([
1.0
,
4.0
,
9.0
]).
astype
(
np
.
float32
)
def
test_net
():
sqrt
=
Net
()
output
=
sqrt
(
Tensor
(
x
))
...
...
tests/st/ops/ascend/test_tbe_ops/test_square.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,17 +20,22 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
square
=
P
.
Square
()
def
construct
(
self
,
x
):
return
self
.
square
(
x
)
x
=
np
.
array
([
1.0
,
4.0
,
9.0
]).
astype
(
np
.
float32
)
def
test_net
():
square
=
Net
()
output
=
square
(
Tensor
(
x
))
...
...
tests/st/ops/ascend/test_tbe_ops/test_stridedslice.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,7 +19,10 @@ from mindspore.nn import Cell
from
mindspore.train.model
import
Model
import
pytest
import
mindspore.context
as
context
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
Cell
):
def
__init__
(
self
,
begin
,
end
,
stride
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -32,6 +35,7 @@ class Net(Cell):
x
=
self
.
stridedslice
(
input
,
self
.
begin
,
self
.
end
,
self
.
stride
)
return
x
def
me_stridedslice
(
input1
,
begin
,
end
,
stride
):
input_me
=
Tensor
(
input1
)
net
=
Net
(
begin
,
end
,
stride
)
...
...
@@ -40,17 +44,19 @@ def me_stridedslice(input1, begin, end, stride):
output
=
model
.
predict
(
input_me
)
print
(
output
.
asnumpy
())
def
test_stridedslice_input_2d
():
input
=
np
.
random
.
randn
(
5
,
5
).
astype
(
np
.
int32
)
begin
=
(
0
,
0
)
end
=
(
2
,
2
)
stride
=
(
1
,
1
)
begin
=
(
0
,
0
)
end
=
(
2
,
2
)
stride
=
(
1
,
1
)
me_stridedslice
(
input
,
begin
,
end
,
stride
)
def
test_stridedslice_input_3d
():
input
=
np
.
random
.
randn
(
5
,
5
,
5
).
astype
(
np
.
float32
)
begin
=
(
0
,
0
,
0
)
end
=
(
3
,
3
,
3
)
stride
=
(
1
,
1
,
1
)
begin
=
(
0
,
0
,
0
)
end
=
(
3
,
3
,
3
)
stride
=
(
1
,
1
,
1
)
me_stridedslice
(
input
,
begin
,
end
,
stride
)
tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -19,8 +19,10 @@ from mindspore.nn import Cell
from
mindspore.ops.composite
import
GradOperation
from
mindspore
import
context
import
pytest
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Grad
(
Cell
):
def
__init__
(
self
,
network
):
super
(
Grad
,
self
).
__init__
()
...
...
@@ -31,6 +33,7 @@ class Grad(Cell):
gout
=
self
.
grad
(
self
.
network
)(
input
,
output_grad
)
return
gout
class
Net
(
Cell
):
def
__init__
(
self
,
begin
,
end
,
stride
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -43,6 +46,7 @@ class Net(Cell):
x
=
self
.
stridedslice
(
input
,
self
.
begin
,
self
.
end
,
self
.
stride
)
return
x
def
me_stridedslice
(
input
,
begin
,
end
,
stride
,
gradients
):
input_me
=
Tensor
(
input
)
out_grad_me
=
Tensor
(
gradients
)
...
...
@@ -51,6 +55,7 @@ def me_stridedslice(input, begin, end, stride, gradients):
out_grad
=
net_me
(
input_me
,
out_grad_me
)
print
(
out_grad
.
asnumpy
())
def
test_grad_stridedslice_1d
():
input
=
np
.
random
.
randn
(
2
).
astype
(
np
.
float32
)
begin
=
(
0
,)
...
...
tests/st/ops/ascend/test_tbe_ops/test_sub.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,17 +20,21 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
sub
=
P
.
Sub
()
def
construct
(
self
,
x
,
y
):
return
self
.
sub
(
x
,
y
)
x
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
y
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
x
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
y
=
np
.
random
.
randn
(
1
,
3
,
3
,
4
).
astype
(
np
.
float32
)
def
test_net
():
...
...
tests/st/ops/ascend/test_tbe_ops/test_tanh.py
浏览文件 @
2bc3fcb1
...
...
@@ -21,6 +21,7 @@ from mindspore.ops import operations as P
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -29,9 +30,12 @@ class Net(nn.Cell):
def
construct
(
self
,
x
):
return
self
.
tanh
(
x
)
input_shape
=
[
1
]
input_np
=
np
.
random
.
randn
(
*
input_shape
).
astype
(
np
.
float32
)
input_me
=
Tensor
(
input_np
)
def
test_net
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
tanh
=
Net
()
...
...
@@ -40,4 +44,4 @@ def test_net():
out
=
m
.
predict
(
input_me
)
print
(
"out_me.dtype={}"
.
format
(
out
.
dtype
))
print
(
"out_me.asnumpy={}"
.
format
(
out
.
asnumpy
()))
return
out
.
asnumpy
()
\ No newline at end of file
return
out
.
asnumpy
()
tests/st/ops/ascend/test_tbe_ops/test_tanh_grad.py
浏览文件 @
2bc3fcb1
...
...
@@ -22,6 +22,7 @@ from mindspore.ops.operations import _grad_ops as G
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -30,9 +31,12 @@ class Net(nn.Cell):
def
construct
(
self
,
y
,
dy
):
return
self
.
tanh_grad
(
y
,
dy
)
input_shape
=
[
1
]
input_np
=
np
.
random
.
randn
(
*
input_shape
).
astype
(
np
.
float32
)
input_me
=
Tensor
(
input_np
)
def
test_net
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
)
tanh_grad
=
Net
()
...
...
@@ -41,4 +45,4 @@ def test_net():
out
=
m
.
predict
(
input_me
,
input_me
)
print
(
"out_me.dtype={}"
.
format
(
out
.
dtype
))
print
(
"out_me.asnumpy={}"
.
format
(
out
.
asnumpy
()))
return
out
.
asnumpy
()
\ No newline at end of file
return
out
.
asnumpy
()
tests/st/ops/ascend/test_tbe_ops/test_tile.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,6 +20,7 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
...
...
tests/st/ops/ascend/test_tbe_ops/test_topk.py
浏览文件 @
2bc3fcb1
...
...
@@ -20,7 +20,10 @@ import numpy as np
import
mindspore.context
as
context
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
,
k
):
super
(
Net
,
self
).
__init__
()
...
...
@@ -32,7 +35,7 @@ class Net(nn.Cell):
def
test_net
():
x
=
np
.
random
.
randn
(
4
,
4
).
astype
(
np
.
float16
)
x
=
np
.
random
.
randn
(
4
,
4
).
astype
(
np
.
float16
)
k
=
2
TopK
=
Net
(
k
)
output
=
TopK
(
Tensor
(
x
))
...
...
@@ -41,4 +44,3 @@ def test_net():
print
(
"***********output y*********"
)
print
(
output
[
0
].
asnumpy
())
tests/st/ops/ascend/test_tbe_ops/test_transpose_d.py
浏览文件 @
2bc3fcb1
此差异已折叠。
点击以展开。
tests/st/ops/ascend/test_tbe_ops/test_unsorted_segment_sum.py
浏览文件 @
2bc3fcb1
此差异已折叠。
点击以展开。
tests/st/ops/ascend/test_tdt_data_ms.py
浏览文件 @
2bc3fcb1
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录