Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
d6e930d7
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d6e930d7
编写于
5月 22, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
5月 22, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1337 test(custom_op): delete conv2d custom op.
Merge pull request !1337 from gongchen/del_conv2d_custom_op
上级
58e6d7d9
36edbe41
变更
6
展开全部
隐藏空白更改
内联
并排
Showing
6 changed file
with
0 addition
and
1150 deletion
+0
-1150
tests/st/ops/custom_ops_tbe/conv2d.py
tests/st/ops/custom_ops_tbe/conv2d.py
+0
-203
tests/st/ops/custom_ops_tbe/conv2d_impl.py
tests/st/ops/custom_ops_tbe/conv2d_impl.py
+0
-40
tests/st/ops/custom_ops_tbe/conv_layer.py
tests/st/ops/custom_ops_tbe/conv_layer.py
+0
-520
tests/st/ops/custom_ops_tbe/conv_layer_fast.py
tests/st/ops/custom_ops_tbe/conv_layer_fast.py
+0
-180
tests/st/ops/custom_ops_tbe/cus_conv2d.py
tests/st/ops/custom_ops_tbe/cus_conv2d.py
+0
-153
tests/st/ops/custom_ops_tbe/test_conv.py
tests/st/ops/custom_ops_tbe/test_conv.py
+0
-54
未找到文件。
tests/st/ops/custom_ops_tbe/conv2d.py
已删除
100755 → 0
浏览文件 @
58e6d7d9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from
__future__
import
absolute_import
import
te.lang.cce
from
te
import
platform
as
cce
from
te.platform.fusion_manager
import
fusion_manager
from
topi.cce
import
util
from
.conv_layer
import
conv_layer_cce
from
.conv_layer_fast
import
conv_layer_fast_cce
Nonetype
=
type
(
None
)
# pylint: disable=unused-argument, no-value-for-parameter, too-many-branches
@
fusion_manager
.
register
(
"conv2d"
)
def
conv2d_compute
(
inputs
,
weights
,
bias
,
outputs
,
strides
,
pad_list
,
dilations
,
kernel_name
=
"conv2d"
):
"""
conv2d compute
Notice
------
only used by framework combine with IR
Parameters
----------
inputs: tvm placeholder
input 5hd feature map tensor
weights: tvm placeholder
input frac_z weight tensor
outputs: tvm placeholder
output tensor, dtype must be assigned
bias: tvm placeholder or None
input 1d bias tensor
strides: integers
stride on H/W, format sensitive
pads: tuple/list of 4 integers
[pad_top, pad_bottom, pad_left, pad_right]
dilations: integers
dilation on H/W, format sensitive
kernel_name: string
kernel name, default value is "conv2d"
Returns
-------
tvm compute
"""
shape_w
=
[]
for
i
in
weights
.
op
.
attrs
[
'ori_shape'
]:
shape_w
.
append
(
i
.
value
)
format_w
=
weights
.
op
.
attrs
[
'ori_format'
]
if
format_w
==
"NCHW"
:
weight_h
=
shape_w
[
2
]
weight_w
=
shape_w
[
3
]
elif
format_w
==
"NHWC"
:
weight_h
=
shape_w
[
1
]
weight_w
=
shape_w
[
2
]
elif
format_w
==
"HWCN"
:
weight_h
=
shape_w
[
0
]
weight_w
=
shape_w
[
1
]
else
:
raise
RuntimeError
(
"weights ori_format should be NCHW, NHWC or HWCN"
)
format_x
=
inputs
.
op
.
attrs
[
'ori_format'
]
if
format_x
==
"NCHW"
:
strideh
=
strides
[
0
]
stridew
=
strides
[
0
]
dlt_h
=
dilations
[
0
]
dlt_w
=
dilations
[
0
]
elif
format_x
==
"NHWC"
:
strideh
=
strides
[
0
]
stridew
=
strides
[
0
]
dlt_h
=
dilations
[
0
]
dlt_w
=
dilations
[
0
]
else
:
raise
RuntimeError
(
"inputs ori_format should be NCHW or NHWC"
)
if
len
(
pad_list
)
==
4
:
padh
=
[
pad_list
[
0
],
pad_list
[
1
]]
padw
=
[
pad_list
[
2
],
pad_list
[
3
]]
else
:
raise
RuntimeError
(
"pads shape should be 4d."
)
para_dict
=
{
"pad_h"
:
padh
,
"pad_w"
:
padw
,
"stride_h"
:
strideh
,
"stride_w"
:
stridew
,
"filter_h"
:
weight_h
,
"filter_w"
:
weight_w
,
"bias_tensor"
:
bias
}
if
cce
.
CceProductParams
().
cce_product
==
"5.10"
:
para_dict
[
"mad_dtype"
]
=
"float16"
res
=
te
.
lang
.
cce
.
conv
(
inputs
,
weights
,
para_dict
)
else
:
res
=
te
.
lang
.
cce
.
conv
(
inputs
,
weights
,
para_dict
)
return
res
@
util
.
check_input_type
(
dict
,
dict
,
(
dict
,
Nonetype
),
dict
,
(
tuple
,
list
),
(
tuple
,
list
),
(
tuple
,
list
),
str
)
def
conv2d
(
inputs
,
weights
,
bias
,
outputs
,
strides
,
pad_list
,
dilations
,
kernel_name
=
"conv2d"
):
"""
algorithm: conv2d
Notice
------
only used by framework combine with IR
Parameters
----------
inputs: dict with keys(shape and dtype)
input 4d feature map tensor
weights: dict with keys(shape and dtype)
input 4d weight tensor
outputs: dict with keys(shape and dtype)
output tensor, dtype must be assigned
bias: dict with keys(shape and dtype) or None
input bias tensor
strides: integers
stride on H/W, format sensitive
pads: integers
[pad_top, pad_bottom, pad_left, pad_right]
dilations: tuple/list of 4 integers
dilation on H/W, format sensitive
kernel_name: str
kernel name, default value is "conv2d"
Returns
-------
None
"""
shape_x
=
inputs
.
get
(
"ori_shape"
)
in_dtype
=
inputs
.
get
(
"dtype"
)
shape_w
=
weights
.
get
(
"ori_shape"
)
w_dtype
=
weights
.
get
(
"dtype"
)
res_dtype
=
outputs
.
get
(
"dtype"
)
if
len
(
pad_list
)
==
4
:
padh
=
[
pad_list
[
0
],
pad_list
[
1
]]
padw
=
[
pad_list
[
2
],
pad_list
[
3
]]
else
:
raise
RuntimeError
(
"pads shape should be 4d."
)
if
(
not
isinstance
(
shape_x
,
(
tuple
,
list
)))
or
len
(
shape_x
)
!=
4
:
raise
RuntimeError
(
"inputs should be 4d list."
)
if
(
not
isinstance
(
shape_w
,
(
tuple
,
list
)))
or
len
(
shape_w
)
!=
4
:
raise
RuntimeError
(
"weights should be 4d list."
)
format_x
=
inputs
.
get
(
"ori_format"
)
if
format_x
==
"NCHW"
:
shape_fm
=
shape_x
strideh
=
strides
[
0
]
stridew
=
strides
[
0
]
dlt_h
=
dilations
[
0
]
dlt_w
=
dilations
[
0
]
elif
format_x
==
"NHWC"
:
shape_fm
=
[
shape_x
[
0
],
shape_x
[
3
],
shape_x
[
1
],
shape_x
[
2
]]
strideh
=
strides
[
0
]
stridew
=
strides
[
0
]
dlt_h
=
dilations
[
0
]
dlt_w
=
dilations
[
0
]
else
:
raise
RuntimeError
(
"inputs ori_format should be NCHW or NHWC."
)
format_w
=
weights
.
get
(
"ori_format"
)
if
format_w
==
"NCHW"
:
shape_filter
=
shape_w
elif
format_w
==
"NHWC"
:
shape_filter
=
[
shape_w
[
0
],
shape_w
[
3
],
shape_w
[
1
],
shape_w
[
2
]]
elif
format_w
==
"HWCN"
:
shape_filter
=
[
shape_w
[
3
],
shape_w
[
2
],
shape_w
[
0
],
shape_w
[
1
]]
else
:
raise
RuntimeError
(
"weights ori_format should be NCHW, NHWC or HWCN."
)
if
bias
is
None
:
use_bias
=
False
else
:
use_bias
=
True
if
cce
.
CceProductParams
().
cce_product
==
"5.10"
:
conv_layer_fast_cce
(
shape_fm
,
shape_filter
,
in_dtype
,
w_dtype
,
res_dtype
,
padh
,
padw
,
strideh
,
stridew
,
bias
=
use_bias
,
kernel_name
=
kernel_name
,
need_build
=
True
,
need_print
=
False
)
else
:
conv_layer_cce
(
shape_fm
,
shape_filter
,
in_dtype
,
w_dtype
,
res_dtype
,
padh
,
padw
,
strideh
,
stridew
,
quantize_config
=
[
0
,
0
,
0
],
scale_sqrt
=
[
0
,
0
,
0
],
bias
=
use_bias
,
kernel_name
=
kernel_name
,
need_build
=
True
,
need_print
=
False
)
tests/st/ops/custom_ops_tbe/conv2d_impl.py
已删除
100644 → 0
浏览文件 @
58e6d7d9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
from
tests.st.ops.custom_ops_tbe.conv2d
import
conv2d
cus_conv2D_op_info
=
TBERegOp
(
"Cus_Conv2D"
)
\
.
fusion_type
(
"CONVLUTION"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"conv2d.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"Cus_Conv2D"
)
\
.
partial_flag
(
True
)
\
.
attr
(
"stride"
,
"required"
,
"listInt"
,
"all"
)
\
.
attr
(
"pad_list"
,
"required"
,
"listInt"
,
"all"
)
\
.
attr
(
"dilation"
,
"required"
,
"listInt"
,
"all"
)
\
.
input
(
0
,
"x"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"filter"
,
False
,
"required"
,
"all"
)
\
.
input
(
2
,
"bias"
,
False
,
"optional"
,
"all"
)
\
.
output
(
0
,
"y"
,
True
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F16_5HD
,
DataType
.
F16_FracZ
,
DataType
.
F32_Default
,
DataType
.
F16_5HD
)
\
.
get_op_info
()
@
op_info_register
(
cus_conv2D_op_info
)
def
Cus_Conv2D
(
inputs
,
weights
,
bias
,
outputs
,
strides
,
pads
,
dilations
,
kernel_name
=
"conv2d"
):
conv2d
(
inputs
,
weights
,
bias
,
outputs
,
strides
,
pads
,
dilations
,
kernel_name
)
tests/st/ops/custom_ops_tbe/conv_layer.py
已删除
100755 → 0
浏览文件 @
58e6d7d9
此差异已折叠。
点击以展开。
tests/st/ops/custom_ops_tbe/conv_layer_fast.py
已删除
100755 → 0
浏览文件 @
58e6d7d9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
te.lang.cce
from
te
import
tvm
from
te.platform
import
CUBE_MKN
from
topi
import
generic
from
topi.cce
import
util
# pylint: disable=R0913,R0914,R0915,E1101
# the dim of shape in conv must be 4
PAD_SHAPE_DIM
=
2
NoneType
=
type
(
None
)
@
util
.
check_input_type
((
list
,
tuple
),
(
list
,
tuple
),
str
,
str
,
str
,
(
list
,
int
),
(
list
,
int
),
int
,
int
,
bool
,
str
)
def
conv_layer_fast_cce_para_check
(
shape_in
,
shape_w
,
in_dtype
,
w_dtype
,
res_dtype
,
padh
,
padw
,
strideh
,
stridew
,
bias
,
kernel_name
):
# conv shape check
util
.
check_kernel_name
(
kernel_name
)
# conv data type check
util
.
check_dtype_rule
(
in_dtype
,
[
'float16'
])
util
.
check_dtype_rule
(
w_dtype
,
[
'float16'
])
util
.
check_dtype_rule
(
res_dtype
,
[
'float16'
])
if
not
isinstance
(
bias
,
bool
):
raise
RuntimeError
(
"bias dtype should be bool."
)
if
isinstance
(
padh
,
list
):
if
len
(
padh
)
!=
PAD_SHAPE_DIM
:
raise
RuntimeError
(
"Dimension must be %d when padh is a list."
%
PAD_SHAPE_DIM
)
pad_top
=
padh
[
0
]
pad_bottom
=
padh
[
1
]
else
:
pad_top
=
padh
pad_bottom
=
padh
if
isinstance
(
padw
,
list
):
if
len
(
padw
)
!=
PAD_SHAPE_DIM
:
raise
RuntimeError
(
"Dimension must be %d when padw is a list."
%
PAD_SHAPE_DIM
)
pad_left
=
padw
[
0
]
pad_right
=
padw
[
1
]
else
:
pad_left
=
padw
pad_right
=
padw
shape_in
,
shape_w
=
te
.
lang
.
cce
.
check_conv_shape
(
shape_in
,
shape_w
,
pad_top
,
pad_bottom
,
pad_left
,
pad_right
,
strideh
,
stridew
,
in_dtype
,
w_dtype
,
res_dtype
)
return
shape_in
,
shape_w
@
util
.
check_input_type
((
list
,
tuple
),
(
list
,
tuple
),
str
,
str
,
str
,
(
list
,
int
),
(
list
,
int
),
int
,
int
,
bool
,
str
,
bool
,
bool
)
def
conv_layer_fast_cce
(
shape_in
,
shape_w
,
in_dtype
,
w_dtype
,
res_dtype
,
padh
,
padw
,
strideh
,
stridew
,
bias
=
False
,
kernel_name
=
"cce_conv"
,
need_build
=
False
,
need_print
=
False
):
"""
Parameters
----------
shape_in : shape of data_in
shape_w : shape of filter
in_dtype : the feature map data type
w_dtype : the weight data type
res_dtype : the result data type
padh: the padding shape in H
padw: the padding shape in weight
strideh: the stride value in H
stridew: the stride value in weight
bias: the tag for bias or not
kernel_name : cce kernel name, default value is "cce_conv"
need_buid : if need to build CCEC kernel, default value is False
need_print : if need to print the ir, default value is False
Returns
-------
None
"""
in_dtype
=
in_dtype
.
lower
()
w_dtype
=
w_dtype
.
lower
()
res_dtype
=
res_dtype
.
lower
()
shape_in
=
list
(
shape_in
)
shape_w
=
list
(
shape_w
)
shape_in
,
shape_w
=
conv_layer_fast_cce_para_check
(
shape_in
,
shape_w
,
in_dtype
,
w_dtype
,
res_dtype
,
padh
,
padw
,
strideh
,
stridew
,
bias
,
kernel_name
)
batch_size
=
shape_in
[
0
]
in_channel
=
shape_in
[
1
]
feature_map_h
=
shape_in
[
2
]
feature_map_w
=
shape_in
[
3
]
block_size_k
=
CUBE_MKN
[
in_dtype
][
'mac'
][
1
]
fmap_shape_nc1hwc0
=
(
batch_size
,
(
in_channel
+
block_size_k
-
1
)
//
block_size_k
,
feature_map_h
,
feature_map_w
,
block_size_k
)
out_channel
=
shape_w
[
0
]
in_channel_weight
=
shape_w
[
1
]
filter_h
=
shape_w
[
2
]
filter_w
=
shape_w
[
3
]
block_size_k
=
CUBE_MKN
[
w_dtype
][
'mac'
][
1
]
block_size_n
=
CUBE_MKN
[
w_dtype
][
'mac'
][
2
]
filter_shape_frac_z
=
(
in_channel_weight
*
filter_h
*
filter_w
//
block_size_k
,
out_channel
//
block_size_n
,
block_size_n
,
block_size_k
)
with
tvm
.
target
.
cce
():
data
=
tvm
.
placeholder
(
fmap_shape_nc1hwc0
,
name
=
'Fmap'
,
dtype
=
in_dtype
)
weight
=
tvm
.
placeholder
(
filter_shape_frac_z
,
name
=
'Filter'
,
dtype
=
w_dtype
)
bias_tensor
=
None
if
bias
:
bias_tensor
=
tvm
.
placeholder
(
(
out_channel
,),
name
=
'bias_tensor'
,
dtype
=
res_dtype
)
mad_dtype
=
"float16"
conv_res
=
te
.
lang
.
cce
.
conv
(
data
,
weight
,
{
"bias_tensor"
:
bias_tensor
,
"scale_q"
:
None
,
"offset_q"
:
None
,
"scale_drq"
:
None
,
"offset_pad"
:
None
,
"offset_rq"
:
None
,
"quantize_config"
:
[
0
,
0
,
0
],
"is_quantize"
:
False
,
"is_dequantize"
:
False
,
"is_requantize"
:
False
,
"scale_sqrt"
:
[
0
,
0
,
0
],
"pad_h"
:
padh
,
"pad_w"
:
padw
,
"stride_h"
:
strideh
,
"stride_w"
:
stridew
,
"filter_h"
:
filter_h
,
"filter_w"
:
filter_w
,
"res_dtype"
:
res_dtype
,
"mad_dtype"
:
mad_dtype
},
dsl_flag
=
False
)
if
bias
:
tensor_list
=
[
data
,
weight
,
bias_tensor
,
conv_res
]
else
:
tensor_list
=
[
data
,
weight
,
conv_res
]
sch
=
generic
.
auto_schedule
(
conv_res
)
config
=
{
"print_ir"
:
need_print
,
"need_build"
:
need_build
,
"name"
:
kernel_name
,
"tensor_list"
:
tensor_list
}
te
.
lang
.
cce
.
cce_build_code
(
sch
,
config
)
tests/st/ops/custom_ops_tbe/cus_conv2d.py
已删除
100644 → 0
浏览文件 @
58e6d7d9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
math
import
numpy
as
np
from
functools
import
reduce
from
mindspore
import
Tensor
from
mindspore._checkparam
import
ParamValidator
as
validator
from
mindspore._checkparam
import
Rel
,
check_bool
,
check_int_positive
,
twice
from
mindspore.common
import
dtype
as
mstype
from
mindspore.ops
import
prim_attr_register
,
PrimitiveWithInfer
class
Cus_Conv2D
(
PrimitiveWithInfer
):
r
"""
Applies 2D convolution for the input.
Input is typically of shape :math:`(N, C, H, W)`, where :math:`N` is batch size and :math:`C` is channel number.
For each batch of shape :math:`(C, H, W)` the formula (given mode 1) is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to i-th channel of the j-th filter and
:math:`out_{j}` corresponds to the j-th channel of the output.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
More detailed introduction can be found here: http://cs231n.github.io/convolutional-networks/.
Args:
out_channel (int): The dimensionality of the output space.
kernel_size (Union[int, tuple[int]]): The kernel size of the 2D convolution.
mode (int): 0 Math convolutiuon, 1 cross-correlation convolution ,
2 deconvolution, 3 depthwise convolution. Default: 1.
pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid".
pad (int): The pad value to fill. Default: 0.
stride (int): The stride to apply conv filter. Default: 1.
dilation (int): Specifying the dilation rate to use for dilated convolution. Default: 1.
group (int): Split input into groups. Default: 1.
Returns:
Tensor, the value that applied 2D convolution.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
"""
@
prim_attr_register
def
__init__
(
self
,
out_channel
,
kernel_size
,
mode
=
1
,
pad_mode
=
"valid"
,
pad
=
0
,
stride
=
1
,
dilation
=
1
,
group
=
1
):
"""init Conv2D"""
self
.
init_prim_io_names
(
inputs
=
[
'x'
,
'w'
],
outputs
=
[
'output'
])
self
.
kernel_size
=
kernel_size
self
.
kernel_size
=
validator
.
check_type
(
'kernel_size'
,
kernel_size
,
(
int
,
tuple
))
if
isinstance
(
self
.
kernel_size
,
int
):
self
.
kernel_size
=
(
self
.
kernel_size
,
self
.
kernel_size
)
validator
.
check_integer
(
'length of kernel_size'
,
len
(
self
.
kernel_size
),
2
,
Rel
.
GE
)
validator
.
equal
(
'type of pad'
,
type
(
pad
),
'not bool'
,
not
isinstance
(
pad
,
bool
))
validator
.
equal
(
'type of pad'
,
type
(
pad
),
'int'
,
isinstance
(
pad
,
int
))
self
.
pad_mode
=
validator
.
check_string
(
'pad_mode'
,
pad_mode
,
[
'valid'
,
'same'
,
'pad'
])
self
.
pad
=
validator
.
check_pad_value_by_mode
(
self
.
__class__
.
__name__
,
pad_mode
,
pad
)
if
self
.
pad_mode
==
'pad'
:
validator
.
check_integer
(
'pad'
,
self
.
pad
,
0
,
Rel
.
GE
)
self
.
mode
=
validator
.
check_integer
(
'mode'
,
mode
,
1
,
Rel
.
EQ
)
self
.
add_prim_attr
(
'data_format'
,
"NCHW"
)
self
.
out_channel
=
validator
.
check_integer
(
'out_channel'
,
out_channel
,
0
,
Rel
.
GT
)
self
.
group
=
validator
.
check_integer
(
'group'
,
group
,
0
,
Rel
.
GT
)
self
.
dilation
=
validator
.
check_integer
(
'dilation'
,
dilation
,
1
,
Rel
.
GE
)
validator
.
check_type
(
'kernel_size'
,
kernel_size
,
[
int
,
tuple
])
if
isinstance
(
kernel_size
,
int
)
and
kernel_size
<
1
:
raise
ValueError
(
'Attr
\'
kernel_size
\'
of
\'
Conv2D
\'
Op passed '
+
str
(
self
.
kernel_size
)
+
', should be a int or tuple and equal to or greater than 1.'
)
if
isinstance
(
kernel_size
,
tuple
)
and
(
len
(
kernel_size
)
!=
2
or
(
not
isinstance
(
kernel_size
[
0
],
int
))
or
(
not
isinstance
(
kernel_size
[
1
],
int
))
or
kernel_size
[
0
]
<
1
or
kernel_size
[
1
]
<
1
):
raise
ValueError
(
'Attr
\'
kernel_size
\'
of
\'
Conv2D
\'
Op passed '
+
str
(
self
.
kernel_size
)
+
', should be a int or tuple and equal to or greater than 1.'
)
self
.
stride
=
validator
.
check_integer
(
'stride'
,
stride
,
1
,
Rel
.
GE
)
from
conv2d_impl
import
Cus_Conv2D
def
infer_shape
(
self
,
x_shape
,
w_shape
):
validator
.
check_integer
(
"weight_shape"
,
len
(
w_shape
),
4
,
Rel
.
EQ
)
validator
.
check_integer
(
"x_shape"
,
len
(
x_shape
),
4
,
Rel
.
EQ
)
validator
.
check_param_equal
(
"x_shape[1]"
,
x_shape
[
1
]
//
self
.
group
,
"w_shape[1]"
,
w_shape
[
1
])
validator
.
check_param_equal
(
'out_channel'
,
self
.
out_channel
,
'w_shape[0]'
,
w_shape
[
0
])
validator
.
check_param_equal
(
'kernel_size'
,
self
.
kernel_size
,
'w_shape[2:4]'
,
tuple
(
w_shape
[
2
:
4
]))
kernel_size_h
=
w_shape
[
2
]
kernel_size_w
=
w_shape
[
3
]
if
self
.
pad_mode
==
"valid"
:
h_out
=
math
.
ceil
((
x_shape
[
2
]
-
kernel_size_h
+
1
)
/
self
.
stride
)
w_out
=
math
.
ceil
((
x_shape
[
3
]
-
kernel_size_w
+
1
)
/
self
.
stride
)
pad_top
,
pad_bottom
,
pad_left
,
pad_right
=
0
,
0
,
0
,
0
elif
self
.
pad_mode
==
"same"
:
h_out
=
math
.
ceil
(
x_shape
[
2
]
/
self
.
stride
)
w_out
=
math
.
ceil
(
x_shape
[
3
]
/
self
.
stride
)
pad_needed_h
=
max
(
0
,
(
h_out
-
1
)
*
self
.
stride
+
kernel_size_h
-
x_shape
[
2
])
pad_top
=
math
.
floor
(
pad_needed_h
/
2
)
pad_bottom
=
pad_needed_h
-
pad_top
pad_needed_w
=
max
(
0
,
(
w_out
-
1
)
*
self
.
stride
+
kernel_size_w
-
x_shape
[
3
])
pad_left
=
math
.
floor
(
pad_needed_w
/
2
)
pad_right
=
pad_needed_w
-
pad_left
elif
self
.
pad_mode
==
'pad'
:
pad_top
,
pad_bottom
,
pad_left
,
pad_right
=
self
.
pad
,
self
.
pad
,
self
.
pad
,
self
.
pad
h_out
=
1
+
(
x_shape
[
2
]
+
2
*
self
.
pad
-
kernel_size_h
-
(
kernel_size_h
-
1
)
*
(
self
.
dilation
-
1
))
\
/
self
.
stride
w_out
=
1
+
(
x_shape
[
3
]
+
2
*
self
.
pad
-
kernel_size_w
-
(
kernel_size_w
-
1
)
*
(
self
.
dilation
-
1
))
\
/
self
.
stride
h_out
=
math
.
floor
(
h_out
)
w_out
=
math
.
floor
(
w_out
)
self
.
pad_list
=
[
pad_top
,
pad_bottom
,
pad_left
,
pad_right
]
self
.
add_prim_attr
(
'pad_list'
,
(
pad_top
,
pad_bottom
,
pad_left
,
pad_right
))
out_channel
=
self
.
out_channel
out_shape
=
[
x_shape
[
0
],
out_channel
,
h_out
,
w_out
]
return
out_shape
def
infer_dtype
(
self
,
x_dtype
,
w_dtype
):
args
=
{
'x_dtype'
:
x_dtype
,
'w_dtype'
:
w_dtype
}
validator
.
check_type_same
(
args
,
[
mstype
.
int8
,
mstype
.
int32
,
mstype
.
float16
,
mstype
.
float32
])
return
x_dtype
tests/st/ops/custom_ops_tbe/test_conv.py
已删除
100644 → 0
浏览文件 @
58e6d7d9
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
import
mindspore.context
as
context
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.common.api
import
ms_function
from
mindspore.common.initializer
import
initializer
from
mindspore.common.parameter
import
Parameter
from
.cus_conv2d
import
Cus_Conv2D
context
.
set_context
(
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
out_channel
=
64
kernel_size
=
7
self
.
conv
=
Cus_Conv2D
(
out_channel
,
kernel_size
,
mode
=
1
,
pad_mode
=
"valid"
,
pad
=
0
,
stride
=
1
,
dilation
=
1
,
group
=
1
)
self
.
w
=
Parameter
(
initializer
(
'normal'
,
[
64
,
3
,
7
,
7
]),
name
=
'w'
)
@
ms_function
def
construct
(
self
,
x
):
return
self
.
conv
(
x
,
self
.
w
)
def
test_net
():
np
.
random
.
seed
(
3800
)
x
=
np
.
random
.
randn
(
32
,
3
,
224
,
224
).
astype
(
np
.
float32
)
conv
=
Net
()
output
=
conv
(
Tensor
(
x
))
print
(
output
.
asnumpy
())
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录