提交 366364ba 编写于 作者: G gongchen

Add custom op testcases.

上级 9f9af3c5
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from te import tvm
from topi import generic
import te.lang.cce
from topi.cce import util
from te.platform.fusion_manager import fusion_manager
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
@fusion_manager.register("add3")
def add3_compute(input1, input2, const_bias):
sum2 = te.lang.cce.vadd(input1, input2)
sum3 = te.lang.cce.vadds(sum2, tvm.const(const_bias, dtype = input1.dtype))
return sum3
cus_add3_op_info = TBERegOp("CusAdd3") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("add3.so") \
.compute_cost(10) \
.kernel_name("CusAdd3Impl") \
.partial_flag(True) \
.attr("const_bias", "required", "float", "all") \
.input(0, "input1", False, "required", "all") \
.input(1, "input2", False, "required", "all") \
.output(0, "sum", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.get_op_info()
@op_info_register(cus_add3_op_info)
def CusAdd3Impl(input1, inptu2, sum, const_bias, kernel_name="CusAdd3Impl"):
shape = input1.get("shape")
shape = util.shape_refine(shape)
dtype = input1.get("dtype").lower()
input1 = tvm.placeholder(shape, name="input1", dtype=dtype.lower())
input2 = tvm.placeholder(shape, name="input2", dtype=dtype.lower())
with tvm.target.cce():
res = add3_compute(input1, input2, const_bias)
sch = generic.auto_schedule(res)
config = {"print_ir": False,
"name": kernel_name,
"tensor_list": [input1, input2, res]}
te.lang.cce.cce_build_code(sch, config)
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
from mindspore.ops import operations as P
from mindspore import Tensor
# sum = input1 + input2 + const_bias
class CusAdd3(PrimitiveWithInfer):
"""Custom add3 definition"""
@prim_attr_register
def __init__(self, const_bias=0.0):
self.init_prim_io_names(inputs=['input1', 'input2'], outputs=['sum3'])
from add3_impl import CusAdd3Impl
def infer_shape(self, input1, input2):
return input1
def infer_dtype(self, input1, input2):
return input1
......@@ -13,10 +13,9 @@
# limitations under the License.
# ============================================================================
import numpy as np
from mindspore import Tensor
from mindspore.ops import prim_attr_register, PrimitiveWithInfer
from mindspore.ops import operations as P
# y = x^2
class CusSquare(PrimitiveWithInfer):
......@@ -26,7 +25,7 @@ class CusSquare(PrimitiveWithInfer):
def __init__(self):
"""init CusSquare"""
self.init_prim_io_names(inputs=['x'], outputs=['y'])
from square_impl import CusSquare
from square_impl import CusSquareImpl
def vm_impl(self, x):
x = x.asnumpy()
......@@ -37,3 +36,10 @@ class CusSquare(PrimitiveWithInfer):
def infer_dtype(self, data_dtype):
return data_dtype
def get_bprop(self):
def bprop(data, out, dout):
gradient = data * 2
dx = gradient * dout
return (dx, )
return bprop
......@@ -22,12 +22,8 @@ from topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
# shape size limit for aicore is 2**31
SHAPE_SIZE_LIMIT = 200000000
@fusion_manager.register("square")
def square_compute(input_x, output_y, kernel_name="square"):
def square_compute(input_x, output_y):
"""
algorithm: square
calculating data's square,y= x*x
......@@ -50,21 +46,22 @@ def square_compute(input_x, output_y, kernel_name="square"):
return res
cus_conv2D_op_info = TBERegOp("CusSquare") \
cus_square_op_info = TBERegOp("CusSquare") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("square.so") \
.compute_cost(10) \
.kernel_name("CusSquare") \
.kernel_name("CusSquareImpl") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.get_op_info()
@op_info_register(cus_conv2D_op_info)
def CusSquare(input_x, output_y, kernel_name="square"):
@op_info_register(cus_square_op_info)
def CusSquareImpl(input_x, output_y, kernel_name="CusSquareImpl"):
"""
algorithm: square
calculating data's square,y= x*x
......@@ -89,7 +86,7 @@ def CusSquare(input_x, output_y, kernel_name="square"):
data = tvm.placeholder(shape, name="data", dtype=dtype.lower())
with tvm.target.cce():
res = square_compute(data, output_y, kernel_name)
res = square_compute(data, output_y)
sch = generic.auto_schedule(res)
config = {"print_ir": False,
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import composite as C
from cus_add3 import CusAdd3
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
"""Net definition"""
def __init__(self):
super(Net, self).__init__()
self.add3 = CusAdd3(1.0)
def construct(self, input1, input2):
return self.add3(input1, input2)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_net():
input1 = np.array([1.0, 4.0, 9.0]).astype(np.float32)
input2 = np.array([1.0, 2.0, 3.0]).astype(np.float32)
add3_net = Net()
output = add3_net(Tensor(input1), Tensor(input2))
expect = np.array([3.0, 7.0, 13.0]).astype(np.float32)
assert (output.asnumpy() == expect).all()
\ No newline at end of file
......@@ -19,10 +19,9 @@ from cus_square import CusSquare
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import composite as C
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
"""Net definition"""
......@@ -42,7 +41,17 @@ def test_net():
x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
square = Net()
output = square(Tensor(x))
print(x)
print(output.asnumpy())
expect = np.array([1.0, 16.0, 81.0]).astype(np.float32)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_grad_net():
x = np.array([1.0, 4.0, 9.0]).astype(np.float32)
sens = np.array([1.0, 1.0, 1.0]).astype(np.float32)
square = Net()
dx = C.grad_with_sens(square)(Tensor(x), Tensor(sens))
expect = np.array([2.0, 8.0, 18.0]).astype(np.float32)
assert (dx.asnumpy() == expect).all()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册