未验证 提交 23a69bc7 编写于 作者: Y ykkk2333 提交者: GitHub

update elementwise unittest style, *test=kunlun (#40779)

上级 bdef57cd
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -22,12 +22,17 @@ from op_test_xpu import XPUOpTest ...@@ -22,12 +22,17 @@ from op_test_xpu import XPUOpTest
import unittest import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseAddOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestElementwiseAddOp(XPUOpTest): self.op_name = 'elementwise_add'
self.use_dynamic_create_class = False
class TestElementwiseAddOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_add" self.op_type = "elementwise_add"
self.init_dtype() self.init_dtype()
...@@ -78,7 +83,7 @@ class TestElementwiseAddOp(XPUOpTest): ...@@ -78,7 +83,7 @@ class TestElementwiseAddOp(XPUOpTest):
self.out = np.add(self.x, self.y) self.out = np.add(self.x, self.y)
def init_dtype(self): def init_dtype(self):
self.dtype = np.float32 self.dtype = self.in_type
def init_axis(self): def init_axis(self):
self.axis = -1 self.axis = -1
...@@ -86,41 +91,29 @@ class TestElementwiseAddOp(XPUOpTest): ...@@ -86,41 +91,29 @@ class TestElementwiseAddOp(XPUOpTest):
def init_max_relative_error(self): def init_max_relative_error(self):
self.max_relative_error = 0.006 self.max_relative_error = 0.006
@skip_check_grad_ci(
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_scalar(TestElementwiseAddOp): class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y self.out = self.x + self.y
@skip_check_grad_ci(
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1,1) to test broadcast.") reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype) self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype) self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y self.out = self.x + self.y
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype) self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype) self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y) self.out = np.add(self.x, self.y)
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
...@@ -129,10 +122,7 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): ...@@ -129,10 +122,7 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype) self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
...@@ -141,19 +131,13 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): ...@@ -141,19 +131,13 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype) self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100) self.out = self.x + self.y.reshape(1, 1, 100)
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype)
...@@ -162,10 +146,7 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): ...@@ -162,10 +146,7 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype) self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype) self.y = np.random.rand(100, 1).astype(self.dtype)
...@@ -174,37 +155,25 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): ...@@ -174,37 +155,25 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 0 self.axis = 0
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype) self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype) self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
self.out = self.x + self.y self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
self.out = self.x + self.y self.out = self.x + self.y
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 10, 12).astype(self.dtype) self.x = np.random.rand(2, 10, 12).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype) self.y = np.random.rand(10, 12).astype(self.dtype)
...@@ -213,12 +182,9 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): ...@@ -213,12 +182,9 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
@skip_check_grad_ci(
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype) self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype) self.y = np.random.rand(1).astype(self.dtype)
...@@ -227,10 +193,7 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): ...@@ -227,10 +193,7 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 1 self.axis = 1
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100, 1, 1).astype(self.dtype) self.y = np.random.rand(100, 1, 1).astype(self.dtype)
...@@ -239,10 +202,7 @@ class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): ...@@ -239,10 +202,7 @@ class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = -1 self.axis = -1
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype) self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(1, 1, 100).astype(self.dtype) self.y = np.random.rand(1, 1, 100).astype(self.dtype)
...@@ -251,10 +211,7 @@ class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): ...@@ -251,10 +211,7 @@ class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = -1 self.axis = -1
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
...@@ -263,10 +220,7 @@ class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): ...@@ -263,10 +220,7 @@ class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = -1 self.axis = -1
class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(10, 12).astype(self.dtype) self.x = np.random.rand(10, 12).astype(self.dtype)
self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype) self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype)
...@@ -275,10 +229,7 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): ...@@ -275,10 +229,7 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
def init_axis(self): def init_axis(self):
self.axis = 2 self.axis = 2
class TestElementwiseAddOpError(unittest.TestCase):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# the input of elementwise_add must be Variable. # the input of elementwise_add must be Variable.
...@@ -286,18 +237,19 @@ class TestElementwiseAddOpError(unittest.TestCase): ...@@ -286,18 +237,19 @@ class TestElementwiseAddOpError(unittest.TestCase):
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor( y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1) self.assertRaises(TypeError, fluid.layers.elementwise_add, x1,
y1)
# the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64 # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
# float16 only can be set on GPU place # float16 only can be set on GPU place
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8") x2 = fluid.layers.data(
y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8") name='x2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2) y2 = fluid.layers.data(
name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_add, x2,
@unittest.skipIf(not paddle.is_compiled_with_xpu(), y2)
"core is not compiled with XPU")
class TestAddOp(unittest.TestCase): class TestAddOp(unittest.TestCase):
def test_name(self): def test_name(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32") x = fluid.data(name="x", shape=[2, 3], dtype="float32")
...@@ -337,170 +289,9 @@ class TestAddOp(unittest.TestCase): ...@@ -337,170 +289,9 @@ class TestAddOp(unittest.TestCase):
self.assertEqual((np_z == z_expected).all(), True) self.assertEqual((np_z == z_expected).all(), True)
######## fp16 test support_types = get_xpu_op_support_types('elementwise_add')
class TestElementwiseAddFP16Op(TestElementwiseAddOp): for stype in support_types:
def init_dtype(self): create_test_class(globals(), XPUTestElementwiseAddOp, stype)
self.dtype = np.float16
def init_max_relative_error(self):
self.max_relative_error = 0.01
class TestElementwiseAddOp_scalarFP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_scalar2FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_VectorFP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
class TestElementwiseAddOp_broadcast_0FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_1FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_2FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100)
class TestElementwiseAddOp_broadcast_3FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_4FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_5FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y
def init_dtype(self):
self.dtype = np.float16
class TestElementwiseAddOp_broadcast_6FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_7FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_dtype(self):
self.dtype = np.float16
class TestElementwiseAddOp_rowwise_add_0FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_rowwise_add_1FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_channelwise_addFP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100, 1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_commonuse_add1FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(1, 1, 100).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_commonuse_add2FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_xsize_lessthan_ysize_addFP16(
TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(10, 12).astype(self.dtype)
self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = 2
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,27 +20,37 @@ import paddle.fluid as fluid ...@@ -20,27 +20,37 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseDivOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class ElementwiseDivOp(XPUOpTest): self.op_name = 'elementwise_div'
self.use_dynamic_create_class = False
class ElementwiseDivOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_div" self.op_type = "elementwise_div"
self.dtype = np.float32 self.dtype = self.in_type
self.init_dtype() self.init_dtype()
self.use_xpu = True self.use_xpu = True
self.init_input_output()
""" Warning """ Warning
CPU gradient check error! CPU gradient check error!
'X': np.random.random((32,84)).astype("float32"), 'X': np.random.random((32,84)).astype("float32"),
'Y': np.random.random((32,84)).astype("float32") 'Y': np.random.random((32,84)).astype("float32")
""" """
def init_input_output(self):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), 'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) 'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
} }
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
...@@ -74,41 +84,31 @@ class ElementwiseDivOp(XPUOpTest): ...@@ -74,41 +84,31 @@ class ElementwiseDivOp(XPUOpTest):
def init_dtype(self): def init_dtype(self):
pass pass
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseDivOp_scalar(ElementwiseDivOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseDivOp_scalar(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float32), 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']}
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"), 'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32") 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
} }
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float32"), 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32") 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
} }
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
...@@ -117,15 +117,11 @@ class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): ...@@ -117,15 +117,11 @@ class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
} }
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32") 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
...@@ -134,15 +130,11 @@ class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): ...@@ -134,15 +130,11 @@ class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
} }
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32") 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
} }
self.outputs = { self.outputs = {
...@@ -150,90 +142,78 @@ class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): ...@@ -150,90 +142,78 @@ class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
} }
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float32"), 'X':
'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float32") np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
'Out': 'Out': np.divide(self.inputs['X'],
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1)) self.inputs['Y'].reshape(1, 10, 12, 1))
} }
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float32") 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
} }
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float32"), 'X':
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float32") np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
} }
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype("float32"), 'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
} }
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype("float32"), 'X':
'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype("float32"), np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype(self.dtype),
'Y':
np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
} }
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 12]).astype("float32"), 'X': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype("float32"), 'Y':
np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype(self.dtype),
} }
self.attrs = {'axis': 2} self.attrs = {'axis': 2}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseDivBroadcast(unittest.TestCase):
"core is not compiled with XPU")
class TestElementwiseDivBroadcast(unittest.TestCase):
def test_shape_with_batch_sizes(self): def test_shape_with_batch_sizes(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x_var = fluid.data( x_var = fluid.data(
...@@ -241,10 +221,15 @@ class TestElementwiseDivBroadcast(unittest.TestCase): ...@@ -241,10 +221,15 @@ class TestElementwiseDivBroadcast(unittest.TestCase):
one = 2. one = 2.
out = one / x_var out = one / x_var
exe = fluid.Executor(fluid.XPUPlace(0)) exe = fluid.Executor(fluid.XPUPlace(0))
x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32") x = np.random.uniform(0.1, 0.6,
(1, 3, 32, 32)).astype('float32')
out_result, = exe.run(feed={'x': x}, fetch_list=[out]) out_result, = exe.run(feed={'x': x}, fetch_list=[out])
self.assertEqual((out_result == (2 / x)).all(), True) self.assertEqual((out_result == (2 / x)).all(), True)
support_types = get_xpu_op_support_types('elementwise_div')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseDivOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,21 +20,24 @@ import paddle.fluid as fluid ...@@ -20,21 +20,24 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
import random import random
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseModOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestElementwiseModOp(XPUOpTest): self.op_name = 'elementwise_floordiv'
self.use_dynamic_create_class = False
class TestElementwiseModOp(XPUOpTest):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = False self.use_mkldnn = False
def setUp(self): def setUp(self):
self.op_type = "elementwise_floordiv" self.op_type = "elementwise_floordiv"
self.dtype = np.float32 self.dtype = self.in_type
self.axis = -1 self.axis = -1
self.init_dtype()
self.init_input_output() self.init_input_output()
self.init_kernel_type() self.init_kernel_type()
self.init_axis() self.init_axis()
...@@ -53,35 +56,30 @@ class TestElementwiseModOp(XPUOpTest): ...@@ -53,35 +56,30 @@ class TestElementwiseModOp(XPUOpTest):
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype) self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y) self.out = np.floor_divide(self.x, self.y)
def init_dtype(self):
pass
def init_axis(self): def init_axis(self):
pass pass
class TestElementwiseModOp_scalar(TestElementwiseModOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOp_scalar(TestElementwiseModOp):
def init_input_output(self): def init_input_output(self):
scale_x = random.randint(0, 100000000) scale_x = random.randint(0, 100000)
scale_y = random.randint(1, 100000000) scale_y = random.randint(1, 100000)
self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype) self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype)
self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype) self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y) self.out = np.floor_divide(self.x, self.y)
class TestElementwiseModOpInverse(TestElementwiseModOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOpInverse(TestElementwiseModOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype) self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype) self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y) self.out = np.floor_divide(self.x, self.y)
support_types = get_xpu_op_support_types('elementwise_floordiv')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseModOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -18,23 +18,33 @@ import numpy as np ...@@ -18,23 +18,33 @@ import numpy as np
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import paddle import paddle
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseMaxOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestElementwiseOp(XPUOpTest): self.op_name = 'elementwise_max'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self): def setUp(self):
self.use_xpu = True self.use_xpu = True
self.op_type = "elementwise_max" self.op_type = "elementwise_max"
self.dtype = self.in_type
self.init_input_output()
# If x and y have the same value, the max() is not differentiable. # If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") def init_input_output(self):
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
...@@ -64,116 +74,98 @@ class TestElementwiseOp(XPUOpTest): ...@@ -64,116 +74,98 @@ class TestElementwiseOp(XPUOpTest):
max_relative_error=0.006, max_relative_error=0.006,
no_grad_set=set('Y')) no_grad_set=set('Y'))
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMaxOp_scalar(TestElementwiseOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMaxOp_scalar(TestElementwiseOp): x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype)
def setUp(self): y = np.array([0.5]).astype(self.dtype)
self.op_type = "elementwise_max"
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
y = np.array([0.5]).astype("float32")
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMaxOp_Vector(TestElementwiseOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMaxOp_Vector(TestElementwiseOp): x = np.random.random((100, )).astype(self.dtype)
def setUp(self): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.op_type = "elementwise_max" y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype)
def setUp(self): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[:, 0, 0] + sgn * \ y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32) np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
self.outputs = { self.outputs = {
'Out': 'Out': np.maximum(self.inputs['X'],
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) self.inputs['Y'].reshape(100, 1, 1))
} }
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, :, 0] + sgn * \ y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32) np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
'Out': 'Out': np.maximum(self.inputs['X'],
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) self.inputs['Y'].reshape(1, 100, 1))
} }
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype)
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, 0, :] + sgn * \ y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32) np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = { self.outputs = {
'Out': 'Out': np.maximum(self.inputs['X'],
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) self.inputs['Y'].reshape(1, 1, 100))
} }
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype)
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32)
y = x[0, :, :, 0] + sgn * \ y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (50, 2)).astype(np.float32) np.random.uniform(1, 2, (50, 2)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
'Out': 'Out': np.maximum(self.inputs['X'],
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)) self.inputs['Y'].reshape(1, 50, 2, 1))
} }
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype)
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32)
y = x + sgn * \ y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32) np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
support_types = get_xpu_op_support_types('elementwise_max')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMaxOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,22 +20,33 @@ import paddle.fluid as fluid ...@@ -20,22 +20,33 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
import paddle import paddle
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseMinOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestElementwiseOp(XPUOpTest): self.op_name = 'elementwise_min'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_min" self.op_type = "elementwise_min"
# If x and y have the same value, the min() is not differentiable. # If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method # So we generate test data by the following method
# to avoid them being too close to each other. # to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") self.dtype = self.in_type
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") self.init_input_output()
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32")
def init_input_output(self):
x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
...@@ -65,116 +76,93 @@ class TestElementwiseOp(XPUOpTest): ...@@ -65,116 +76,93 @@ class TestElementwiseOp(XPUOpTest):
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set('Y')) no_grad_set=set('Y'))
@skip_check_grad_ci(
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMinOp_scalar(TestElementwiseOp): class TestElementwiseMinOp_scalar(TestElementwiseOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_min" x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype)
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float32") y = np.array([0.5]).astype(self.dtype)
y = np.array([0.5]).astype("float32")
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMinOp_Vector(TestElementwiseOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMinOp_Vector(TestElementwiseOp): x = np.random.random((100, )).astype(self.dtype)
def setUp(self): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.op_type = "elementwise_min" y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype)
def setUp(self): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[:, 0, 0] + sgn * \ y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32) np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
self.inputs = {'X': x, 'Y': y}
self.outputs = { self.outputs = {
'Out': 'Out': np.minimum(self.inputs['X'],
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) self.inputs['Y'].reshape(100, 1, 1))
} }
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, :, 0] + sgn * \ y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32) np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = { self.outputs = {
'Out': 'Out': np.minimum(self.inputs['X'],
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) self.inputs['Y'].reshape(1, 100, 1))
} }
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype)
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, 0, :] + sgn * \ y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32) np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = { self.outputs = {
'Out': 'Out': np.minimum(self.inputs['X'],
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) self.inputs['Y'].reshape(1, 1, 100))
} }
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype)
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float32)
y = x[0, :, :, 0] + sgn * \ y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (25, 4)).astype(np.float32) np.random.uniform(1, 2, (25, 4)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = { self.outputs = {
'Out': 'Out': np.minimum(self.inputs['X'],
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1)) self.inputs['Y'].reshape(1, 25, 4, 1))
} }
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU") x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype)
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(self.dtype)
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float32)
y = x + sgn * \ y = x + sgn * \
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float32) np.random.uniform(1, 2, (2, 10, 1, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
support_types = get_xpu_op_support_types('elementwise_min')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMinOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,32 +20,30 @@ import paddle.fluid as fluid ...@@ -20,32 +20,30 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
import paddle import paddle
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseMulOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class ElementwiseMulOp(XPUOpTest): self.op_name = 'elementwise_mul'
self.use_dynamic_create_class = False
class ElementwiseMulOp(XPUOpTest):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = False self.use_mkldnn = False
def setUp(self): def setUp(self):
self.op_type = 'elementwise_mul'
self.use_xpu = True self.use_xpu = True
self.op_type = "elementwise_mul" self.dtype = self.in_type
self.dtype = np.float32
self.axis = -1 self.axis = -1
self.init_dtype() self.init_dtype()
self.init_input_output() self.init_input_output()
self.init_kernel_type() self.init_kernel_type()
self.init_axis() self.init_axis()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
...@@ -81,6 +79,12 @@ class ElementwiseMulOp(XPUOpTest): ...@@ -81,6 +79,12 @@ class ElementwiseMulOp(XPUOpTest):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y) self.out = np.multiply(self.x, self.y)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def init_dtype(self): def init_dtype(self):
pass pass
...@@ -88,157 +92,109 @@ class ElementwiseMulOp(XPUOpTest): ...@@ -88,157 +92,109 @@ class ElementwiseMulOp(XPUOpTest):
def init_axis(self): def init_axis(self):
pass pass
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMulOp_scalar(ElementwiseMulOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32), 'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(np.float32) 'Y': np.random.rand(1).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.random((100, )).astype("float32"), 'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype("float32") 'Y': np.random.random((100, )).astype(self.dtype)
}
self.outputs = {
'Out': np.multiply(self.inputs['X'], self.inputs['Y'])
} }
self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def init_input_output(self): def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype) self.inputs = {
self.y = np.random.rand(100).astype(self.dtype) 'X': np.random.rand(100, 2, 3).astype(self.dtype),
self.out = self.x * self.y.reshape(100, 1, 1) 'Y': np.random.rand(100).astype(self.dtype)
}
def init_axis(self): self.outputs = {
self.axis = 0 'Out': self.inputs['X'] * self.inputs['Y'].reshape(100, 1, 1)
}
self.attrs = {'axis': 0}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32), 'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(np.float32) 'Y': np.random.rand(100).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1) 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
} }
self.init_kernel_type()
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32), 'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(np.float32) 'Y': np.random.rand(100).astype(self.dtype)
} }
self.outputs = { self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100) 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
} }
self.init_kernel_type()
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32), 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(np.float32) 'Y': np.random.rand(10, 12).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
} }
self.init_kernel_type()
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 2, 11).astype(np.float32), 'X': np.random.rand(10, 2, 11).astype(self.dtype),
'Y': np.random.rand(10, 1, 11).astype(np.float32) 'Y': np.random.rand(10, 1, 11).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(np.float32), 'X': np.random.rand(10, 4, 2, 3).astype(self.dtype),
'Y': np.random.rand(10, 4, 1, 3).astype(np.float32) 'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32), 'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(np.float32) 'Y': np.random.rand(1, 1, 100).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(np.float32), 'X': np.random.rand(30, 3, 1, 5).astype(self.dtype),
'Y': np.random.rand(30, 1, 4, 1).astype(np.float32) 'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 10).astype(np.float32), 'X': np.random.rand(10, 10).astype(self.dtype),
'Y': np.random.rand(2, 2, 10, 10).astype(np.float32) 'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype)
} }
self.attrs = {'axis': 2} self.attrs = {'axis': 2}
...@@ -246,12 +202,8 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): ...@@ -246,12 +202,8 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
self.outputs = { self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y'] 'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y']
} }
self.init_kernel_type()
class TestElementwiseMulOpError(unittest.TestCase):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
# the input of elementwise_mul must be Variable. # the input of elementwise_mul must be Variable.
...@@ -259,13 +211,21 @@ class TestElementwiseMulOpError(unittest.TestCase): ...@@ -259,13 +211,21 @@ class TestElementwiseMulOpError(unittest.TestCase):
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor( y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1) self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1,
y1)
# the input dtype of elementwise_mul must be float32 # the input dtype of elementwise_mul must be float32
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8") x2 = fluid.layers.data(
y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8") name='x2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, y2) y2 = fluid.layers.data(
name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2,
y2)
support_types = get_xpu_op_support_types('elementwise_mul')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMulOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,17 +20,28 @@ import paddle.fluid as fluid ...@@ -20,17 +20,28 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), @skip_check_grad_ci(reason="XPU does not support grad op currently")
"core is not compiled with XPU") class XPUTestElementwisePowOp(XPUOpTestWrapper):
class TestElementwisePowOp(XPUOpTest): def __init__(self):
self.op_name = 'elementwise_pow'
self.use_dynamic_create_class = False
class TestElementwisePowOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
self.compute_input_output()
def compute_input_output(self):
self.inputs = { self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype("float32"), 'X': np.random.uniform(1, 2, [20, 5]).astype(self.dtype),
'Y': np.random.uniform(1, 2, [20, 5]).astype("float32") 'Y': np.random.uniform(1, 2, [20, 5]).astype(self.dtype)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
...@@ -39,97 +50,65 @@ class TestElementwisePowOp(XPUOpTest): ...@@ -39,97 +50,65 @@ class TestElementwisePowOp(XPUOpTest):
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place)
def test_check_grad_normal(self): class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
if paddle.is_compiled_with_xpu(): def compute_input_output(self):
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"), 'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float32") 'Y': np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def compute_input_output(self):
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"), 'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float32") 'Y': np.random.uniform(0.2, 2, [10, 10]).astype(self.dtype)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@skip_check_grad_ci(
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwisePowOp_scalar(TestElementwisePowOp): class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self): def compute_input_output(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float32), 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_tensor(TestElementwisePowOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def compute_input_output(self):
"core is not compiled with XPU")
class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"), 'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(1, 3, [100]).astype("float32") 'Y': np.random.uniform(1, 3, [100]).astype(self.dtype)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def compute_input_output(self):
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32") 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def compute_input_output(self):
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32") 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1)) 'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
} }
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def compute_input_output(self):
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float32"), 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32") 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
} }
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
self.outputs = { self.outputs = {
...@@ -137,46 +116,44 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): ...@@ -137,46 +116,44 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
} }
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def compute_input_output(self):
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float32"), 'X':
'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float32") np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
self.outputs = { self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, 'Out': np.power(self.inputs['X'],
1)) self.inputs['Y'].reshape(1, 20, 5, 1))
} }
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def compute_input_output(self):
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float32"), 'X':
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float32") np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(self.dtype)
} }
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOpInt(OpTest):
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOpInt(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])} self.inputs = {
'X': np.asarray([1, 3, 6]),
'Y': np.asarray([1, 1, 1])
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
support_types = get_xpu_op_support_types('elementwise_pow')
for stype in support_types:
create_test_class(globals(), XPUTestElementwisePowOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -19,18 +19,27 @@ import paddle ...@@ -19,18 +19,27 @@ import paddle
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import unittest import unittest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseSubOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestElementwiseOp(OpTest): self.op_name = 'elementwise_sub'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self): def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_sub" self.op_type = "elementwise_sub"
self.use_xpu = True
self.dtype = self.in_type
self.init_input_output()
def init_input_output(self):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"), 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32") 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
...@@ -62,41 +71,29 @@ class TestElementwiseOp(OpTest): ...@@ -62,41 +71,29 @@ class TestElementwiseOp(OpTest):
max_relative_error=0.005, max_relative_error=0.005,
no_grad_set=set('Y')) no_grad_set=set('Y'))
@skip_check_grad_ci(
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseSubOp_scalar(TestElementwiseOp): class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32), 'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(np.float32) 'Y': np.random.rand(1).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_Vector(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.random((100, )).astype("float32"), 'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype("float32") 'Y': np.random.random((100, )).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(100, 3, 2).astype(np.float32), 'X': np.random.rand(100, 3, 2).astype(self.dtype),
'Y': np.random.rand(100).astype(np.float32) 'Y': np.random.rand(100).astype(self.dtype)
} }
self.attrs = {'axis': 0} self.attrs = {'axis': 0}
...@@ -104,15 +101,11 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): ...@@ -104,15 +101,11 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1) 'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
} }
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32), 'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(np.float32) 'Y': np.random.rand(100).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
...@@ -120,30 +113,22 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): ...@@ -120,30 +113,22 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1) 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
} }
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32), 'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(np.float32) 'Y': np.random.rand(100).astype(self.dtype)
} }
self.outputs = { self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100) 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
} }
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32), 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(np.float32) 'Y': np.random.rand(10, 12).astype(self.dtype)
} }
self.attrs = {'axis': 1} self.attrs = {'axis': 1}
...@@ -151,51 +136,35 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): ...@@ -151,51 +136,35 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1) 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
} }
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float32), 'X': np.random.rand(2, 5, 3, 12).astype(self.dtype),
'Y': np.random.rand(2, 5, 1, 12).astype(np.float32) 'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32), 'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(np.float32) 'Y': np.random.rand(1, 1, 100).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float32), 'X': np.random.rand(10, 3, 1, 4).astype(self.dtype),
'Y': np.random.rand(10, 1, 12, 1).astype(np.float32) 'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype)
} }
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
@unittest.skipIf(not paddle.is_compiled_with_xpu(), def init_input_output(self):
"core is not compiled with XPU")
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = { self.inputs = {
'X': np.random.rand(10, 12).astype(np.float32), 'X': np.random.rand(10, 12).astype(self.dtype),
'Y': np.random.rand(2, 3, 10, 12).astype(np.float32) 'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype)
} }
self.attrs = {'axis': 2} self.attrs = {'axis': 2}
...@@ -205,5 +174,9 @@ class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): ...@@ -205,5 +174,9 @@ class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
} }
support_types = get_xpu_op_support_types('elementwise_sub')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseSubOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -18,9 +18,10 @@ import unittest ...@@ -18,9 +18,10 @@ import unittest
import numpy as np import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
...@@ -41,19 +42,28 @@ def numpy_topk(x, k=1, axis=-1, largest=True): ...@@ -41,19 +42,28 @@ def numpy_topk(x, k=1, axis=-1, largest=True):
return value, indices return value, indices
class TestTopkOp(OpTest): class XPUTestTopKV2Op(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'top_k_v2'
self.use_dynamic_create_class = False
class TestTopkOp(XPUOpTest):
def init_args(self): def init_args(self):
self.k = 3 self.k = 3
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 20).astype(self.dtype)
def setUp(self): def setUp(self):
self.op_type = "top_k_v2" self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 20)
self.init_args() self.init_args()
self.dtype = self.in_type
self.inputs = {'X': self.input_data} self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} self.attrs = {
'k': self.k,
'axis': self.axis,
'largest': self.largest
}
output, indices = numpy_topk( output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest) self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices} self.outputs = {'Out': output, 'Indices': indices}
...@@ -68,222 +78,94 @@ class TestTopkOp(OpTest): ...@@ -68,222 +78,94 @@ class TestTopkOp(OpTest):
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_grad(set(['X']), 'Out') self.check_grad(set(['X']), 'Out')
class TestTopkOp1(TestTopkOp):
class TestTopkOp1(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 3 self.k = 3
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(100, 155).astype(self.dtype)
def setUp(self): class TestTopkOp2(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp2(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 3 self.k = 3
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp3(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp3(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 5 self.k = 5
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp4(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp4(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 1 self.k = 1
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp5(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp5(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 3 self.k = 3
self.axis = 2 self.axis = 2
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp6(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp6(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 5 self.k = 5
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
def setUp(self): class TestTopkOp7(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp7(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 10 self.k = 10
self.axis = 2 self.axis = 2
self.largest = True self.largest = True
self.input_data = np.random.rand(8, 5, 10, 16).astype(self.dtype)
def setUp(self): class TestTopkOp8(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 5, 10, 16)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp8(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 1 self.k = 1
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
def setUp(self): class TestTopkOp9(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp9(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 3 self.k = 3
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp10(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp10(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 3 self.k = 3
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp11(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp11(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 5 self.k = 5
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp12(TestTopkOp):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp12(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 1 self.k = 1
self.axis = 1 self.axis = 1
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
support_types = get_xpu_op_support_types('top_k_v2')
for stype in support_types:
create_test_class(globals(), XPUTestTopKV2Op, stype)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册