Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
366364ba
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
366364ba
编写于
5月 16, 2020
作者:
G
gongchen
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add custom op testcases.
上级
9f9af3c5
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
167 addition
and
17 deletion
+167
-17
tests/st/ops/custom_ops_tbe/add3_impl.py
tests/st/ops/custom_ops_tbe/add3_impl.py
+62
-0
tests/st/ops/custom_ops_tbe/cus_add3.py
tests/st/ops/custom_ops_tbe/cus_add3.py
+32
-0
tests/st/ops/custom_ops_tbe/cus_square.py
tests/st/ops/custom_ops_tbe/cus_square.py
+9
-3
tests/st/ops/custom_ops_tbe/square_impl.py
tests/st/ops/custom_ops_tbe/square_impl.py
+7
-10
tests/st/ops/custom_ops_tbe/test_add3.py
tests/st/ops/custom_ops_tbe/test_add3.py
+44
-0
tests/st/ops/custom_ops_tbe/test_square.py
tests/st/ops/custom_ops_tbe/test_square.py
+13
-4
未找到文件。
tests/st/ops/custom_ops_tbe/add3_impl.py
0 → 100644
浏览文件 @
366364ba
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from
__future__
import
absolute_import
from
te
import
tvm
from
topi
import
generic
import
te.lang.cce
from
topi.cce
import
util
from
te.platform.fusion_manager
import
fusion_manager
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
@
fusion_manager
.
register
(
"add3"
)
def
add3_compute
(
input1
,
input2
,
const_bias
):
sum2
=
te
.
lang
.
cce
.
vadd
(
input1
,
input2
)
sum3
=
te
.
lang
.
cce
.
vadds
(
sum2
,
tvm
.
const
(
const_bias
,
dtype
=
input1
.
dtype
))
return
sum3
cus_add3_op_info
=
TBERegOp
(
"CusAdd3"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"add3.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"CusAdd3Impl"
)
\
.
partial_flag
(
True
)
\
.
attr
(
"const_bias"
,
"required"
,
"float"
,
"all"
)
\
.
input
(
0
,
"input1"
,
False
,
"required"
,
"all"
)
\
.
input
(
1
,
"input2"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"sum"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
get_op_info
()
@
op_info_register
(
cus_add3_op_info
)
def
CusAdd3Impl
(
input1
,
inptu2
,
sum
,
const_bias
,
kernel_name
=
"CusAdd3Impl"
):
shape
=
input1
.
get
(
"shape"
)
shape
=
util
.
shape_refine
(
shape
)
dtype
=
input1
.
get
(
"dtype"
).
lower
()
input1
=
tvm
.
placeholder
(
shape
,
name
=
"input1"
,
dtype
=
dtype
.
lower
())
input2
=
tvm
.
placeholder
(
shape
,
name
=
"input2"
,
dtype
=
dtype
.
lower
())
with
tvm
.
target
.
cce
():
res
=
add3_compute
(
input1
,
input2
,
const_bias
)
sch
=
generic
.
auto_schedule
(
res
)
config
=
{
"print_ir"
:
False
,
"name"
:
kernel_name
,
"tensor_list"
:
[
input1
,
input2
,
res
]}
te
.
lang
.
cce
.
cce_build_code
(
sch
,
config
)
tests/st/ops/custom_ops_tbe/cus_add3.py
0 → 100644
浏览文件 @
366364ba
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
numpy
as
np
from
mindspore.ops
import
prim_attr_register
,
PrimitiveWithInfer
from
mindspore.ops
import
operations
as
P
from
mindspore
import
Tensor
# sum = input1 + input2 + const_bias
class
CusAdd3
(
PrimitiveWithInfer
):
"""Custom add3 definition"""
@
prim_attr_register
def
__init__
(
self
,
const_bias
=
0.0
):
self
.
init_prim_io_names
(
inputs
=
[
'input1'
,
'input2'
],
outputs
=
[
'sum3'
])
from
add3_impl
import
CusAdd3Impl
def
infer_shape
(
self
,
input1
,
input2
):
return
input1
def
infer_dtype
(
self
,
input1
,
input2
):
return
input1
tests/st/ops/custom_ops_tbe/cus_square.py
浏览文件 @
366364ba
...
...
@@ -13,10 +13,9 @@
# limitations under the License.
# ============================================================================
import
numpy
as
np
from
mindspore
import
Tensor
from
mindspore.ops
import
prim_attr_register
,
PrimitiveWithInfer
from
mindspore.ops
import
operations
as
P
# y = x^2
class
CusSquare
(
PrimitiveWithInfer
):
...
...
@@ -26,7 +25,7 @@ class CusSquare(PrimitiveWithInfer):
def
__init__
(
self
):
"""init CusSquare"""
self
.
init_prim_io_names
(
inputs
=
[
'x'
],
outputs
=
[
'y'
])
from
square_impl
import
CusSquare
from
square_impl
import
CusSquare
Impl
def
vm_impl
(
self
,
x
):
x
=
x
.
asnumpy
()
...
...
@@ -37,3 +36,10 @@ class CusSquare(PrimitiveWithInfer):
def
infer_dtype
(
self
,
data_dtype
):
return
data_dtype
def
get_bprop
(
self
):
def
bprop
(
data
,
out
,
dout
):
gradient
=
data
*
2
dx
=
gradient
*
dout
return
(
dx
,
)
return
bprop
tests/st/ops/custom_ops_tbe/square_impl.py
浏览文件 @
366364ba
...
...
@@ -22,12 +22,8 @@ from topi.cce import util
from
mindspore.ops.op_info_register
import
op_info_register
,
TBERegOp
,
DataType
# shape size limit for aicore is 2**31
SHAPE_SIZE_LIMIT
=
200000000
@
fusion_manager
.
register
(
"square"
)
def
square_compute
(
input_x
,
output_y
,
kernel_name
=
"square"
):
def
square_compute
(
input_x
,
output_y
):
"""
algorithm: square
calculating data's square,y= x*x
...
...
@@ -50,21 +46,22 @@ def square_compute(input_x, output_y, kernel_name="square"):
return
res
cus_
conv2D
_op_info
=
TBERegOp
(
"CusSquare"
)
\
cus_
square
_op_info
=
TBERegOp
(
"CusSquare"
)
\
.
fusion_type
(
"OPAQUE"
)
\
.
async_flag
(
False
)
\
.
binfile_name
(
"square.so"
)
\
.
compute_cost
(
10
)
\
.
kernel_name
(
"CusSquare"
)
\
.
kernel_name
(
"CusSquare
Impl
"
)
\
.
partial_flag
(
True
)
\
.
input
(
0
,
"x"
,
False
,
"required"
,
"all"
)
\
.
output
(
0
,
"y"
,
False
,
"required"
,
"all"
)
\
.
dtype_format
(
DataType
.
F32_Default
,
DataType
.
F32_Default
)
\
.
dtype_format
(
DataType
.
F16_Default
,
DataType
.
F16_Default
)
\
.
get_op_info
()
@
op_info_register
(
cus_
conv2D
_op_info
)
def
CusSquare
(
input_x
,
output_y
,
kernel_name
=
"square
"
):
@
op_info_register
(
cus_
square
_op_info
)
def
CusSquare
Impl
(
input_x
,
output_y
,
kernel_name
=
"CusSquareImpl
"
):
"""
algorithm: square
calculating data's square,y= x*x
...
...
@@ -89,7 +86,7 @@ def CusSquare(input_x, output_y, kernel_name="square"):
data
=
tvm
.
placeholder
(
shape
,
name
=
"data"
,
dtype
=
dtype
.
lower
())
with
tvm
.
target
.
cce
():
res
=
square_compute
(
data
,
output_y
,
kernel_name
)
res
=
square_compute
(
data
,
output_y
)
sch
=
generic
.
auto_schedule
(
res
)
config
=
{
"print_ir"
:
False
,
...
...
tests/st/ops/custom_ops_tbe/test_add3.py
0 → 100644
浏览文件 @
366364ba
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import
pytest
import
numpy
as
np
import
mindspore.nn
as
nn
import
mindspore.context
as
context
from
mindspore
import
Tensor
from
mindspore.ops
import
composite
as
C
from
cus_add3
import
CusAdd3
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
"""Net definition"""
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
add3
=
CusAdd3
(
1.0
)
def
construct
(
self
,
input1
,
input2
):
return
self
.
add3
(
input1
,
input2
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
env_onecard
def
test_net
():
input1
=
np
.
array
([
1.0
,
4.0
,
9.0
]).
astype
(
np
.
float32
)
input2
=
np
.
array
([
1.0
,
2.0
,
3.0
]).
astype
(
np
.
float32
)
add3_net
=
Net
()
output
=
add3_net
(
Tensor
(
input1
),
Tensor
(
input2
))
expect
=
np
.
array
([
3.0
,
7.0
,
13.0
]).
astype
(
np
.
float32
)
assert
(
output
.
asnumpy
()
==
expect
).
all
()
\ No newline at end of file
tests/st/ops/custom_ops_tbe/test_square.py
浏览文件 @
366364ba
...
...
@@ -19,10 +19,9 @@ from cus_square import CusSquare
import
mindspore.context
as
context
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore.ops
import
composite
as
C
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
class
Net
(
nn
.
Cell
):
"""Net definition"""
...
...
@@ -42,7 +41,17 @@ def test_net():
x
=
np
.
array
([
1.0
,
4.0
,
9.0
]).
astype
(
np
.
float32
)
square
=
Net
()
output
=
square
(
Tensor
(
x
))
print
(
x
)
print
(
output
.
asnumpy
())
expect
=
np
.
array
([
1.0
,
16.0
,
81.0
]).
astype
(
np
.
float32
)
assert
(
output
.
asnumpy
()
==
expect
).
all
()
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
env_onecard
def
test_grad_net
():
x
=
np
.
array
([
1.0
,
4.0
,
9.0
]).
astype
(
np
.
float32
)
sens
=
np
.
array
([
1.0
,
1.0
,
1.0
]).
astype
(
np
.
float32
)
square
=
Net
()
dx
=
C
.
grad_with_sens
(
square
)(
Tensor
(
x
),
Tensor
(
sens
))
expect
=
np
.
array
([
2.0
,
8.0
,
18.0
]).
astype
(
np
.
float32
)
assert
(
dx
.
asnumpy
()
==
expect
).
all
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录