未验证 提交 23a69bc7 编写于 作者: Y ykkk2333 提交者: GitHub

update elementwise unittest style, *test=kunlun (#40779)

上级 bdef57cd
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -22,12 +22,17 @@ from op_test_xpu import XPUOpTest
import unittest
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp(XPUOpTest):
class XPUTestElementwiseAddOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_add'
self.use_dynamic_create_class = False
class TestElementwiseAddOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_add"
self.init_dtype()
......@@ -78,7 +83,7 @@ class TestElementwiseAddOp(XPUOpTest):
self.out = np.add(self.x, self.y)
def init_dtype(self):
self.dtype = np.float32
self.dtype = self.in_type
def init_axis(self):
self.axis = -1
......@@ -86,41 +91,29 @@ class TestElementwiseAddOp(XPUOpTest):
def init_max_relative_error(self):
self.max_relative_error = 0.006
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1,1) to test broadcast.")
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
......@@ -129,10 +122,7 @@ class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
def init_axis(self):
self.axis = 0
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
......@@ -141,19 +131,13 @@ class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def init_axis(self):
self.axis = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
......@@ -162,10 +146,7 @@ class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
def init_axis(self):
self.axis = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype)
......@@ -174,37 +155,25 @@ class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
def init_axis(self):
self.axis = 0
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
self.out = self.x + self.y
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
self.out = self.x + self.y
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
......@@ -213,12 +182,9 @@ class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
def init_axis(self):
self.axis = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
......@@ -227,10 +193,7 @@ class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
def init_axis(self):
self.axis = 1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100, 1, 1).astype(self.dtype)
......@@ -239,10 +202,7 @@ class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp):
def init_axis(self):
self.axis = -1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(1, 1, 100).astype(self.dtype)
......@@ -251,10 +211,7 @@ class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp):
def init_axis(self):
self.axis = -1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
......@@ -263,10 +220,7 @@ class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp):
def init_axis(self):
self.axis = -1
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(10, 12).astype(self.dtype)
self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype)
......@@ -275,10 +229,7 @@ class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp):
def init_axis(self):
self.axis = 2
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseAddOpError(unittest.TestCase):
class TestElementwiseAddOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of elementwise_add must be Variable.
......@@ -286,18 +237,19 @@ class TestElementwiseAddOpError(unittest.TestCase):
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1)
self.assertRaises(TypeError, fluid.layers.elementwise_add, x1,
y1)
# the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64
# float16 only can be set on GPU place
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestAddOp(unittest.TestCase):
x2 = fluid.layers.data(
name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(
name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_add, x2,
y2)
class TestAddOp(unittest.TestCase):
def test_name(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name="x", shape=[2, 3], dtype="float32")
......@@ -337,170 +289,9 @@ class TestAddOp(unittest.TestCase):
self.assertEqual((np_z == z_expected).all(), True)
######## fp16 test
class TestElementwiseAddFP16Op(TestElementwiseAddOp):
def init_dtype(self):
self.dtype = np.float16
def init_max_relative_error(self):
self.max_relative_error = 0.01
class TestElementwiseAddOp_scalarFP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_scalar2FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_VectorFP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.random((100, )).astype(self.dtype)
self.y = np.random.random((100, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
class TestElementwiseAddOp_broadcast_0FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_1FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 100, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 100, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_2FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 100)
class TestElementwiseAddOp_broadcast_3FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_4FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(100, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(100, 1, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_5FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(10, 3, 12).astype(self.dtype)
self.y = np.random.rand(10, 1, 12).astype(self.dtype)
self.out = self.x + self.y
def init_dtype(self):
self.dtype = np.float16
class TestElementwiseAddOp_broadcast_6FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype)
self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_broadcast_7FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype)
self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_dtype(self):
self.dtype = np.float16
class TestElementwiseAddOp_rowwise_add_0FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 10, 12).astype(self.dtype)
self.y = np.random.rand(10, 12).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 10, 12)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_rowwise_add_1FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_channelwise_addFP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100, 1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_commonuse_add1FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(2, 3, 100).astype(self.dtype)
self.y = np.random.rand(1, 1, 100).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_commonuse_add2FP16(TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype)
self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = -1
class TestElementwiseAddOp_xsize_lessthan_ysize_addFP16(
TestElementwiseAddFP16Op):
def init_input_output(self):
self.x = np.random.rand(10, 12).astype(self.dtype)
self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype)
self.out = self.x + self.y
def init_axis(self):
self.axis = 2
support_types = get_xpu_op_support_types('elementwise_add')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseAddOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,27 +20,37 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class ElementwiseDivOp(XPUOpTest):
class XPUTestElementwiseDivOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_div'
self.use_dynamic_create_class = False
class ElementwiseDivOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_div"
self.dtype = np.float32
self.dtype = self.in_type
self.init_dtype()
self.use_xpu = True
self.init_input_output()
""" Warning
CPU gradient check error!
'X': np.random.random((32,84)).astype("float32"),
'Y': np.random.random((32,84)).astype("float32")
"""
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
......@@ -74,41 +84,31 @@ class ElementwiseDivOp(XPUOpTest):
def init_dtype(self):
pass
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_scalar(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_scalar(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float32),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32)
'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 0}
......@@ -117,15 +117,11 @@ class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 1}
......@@ -134,15 +130,11 @@ class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {
......@@ -150,90 +142,78 @@ class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float32")
'X':
np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1))
'Out': np.divide(self.inputs['X'],
self.inputs['Y'].reshape(1, 10, 12, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float32")
'X':
np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype("float32"),
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype("float32"),
'X':
np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype(self.dtype),
'Y':
np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 12]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype("float32"),
'X': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype),
'Y':
np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype(self.dtype),
}
self.attrs = {'axis': 2}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivBroadcast(unittest.TestCase):
class TestElementwiseDivBroadcast(unittest.TestCase):
def test_shape_with_batch_sizes(self):
with fluid.program_guard(fluid.Program()):
x_var = fluid.data(
......@@ -241,10 +221,15 @@ class TestElementwiseDivBroadcast(unittest.TestCase):
one = 2.
out = one / x_var
exe = fluid.Executor(fluid.XPUPlace(0))
x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
x = np.random.uniform(0.1, 0.6,
(1, 3, 32, 32)).astype('float32')
out_result, = exe.run(feed={'x': x}, fetch_list=[out])
self.assertEqual((out_result == (2 / x)).all(), True)
support_types = get_xpu_op_support_types('elementwise_div')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseDivOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,21 +20,24 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
import random
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOp(XPUOpTest):
class XPUTestElementwiseModOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_floordiv'
self.use_dynamic_create_class = False
class TestElementwiseModOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.op_type = "elementwise_floordiv"
self.dtype = np.float32
self.dtype = self.in_type
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
......@@ -53,35 +56,30 @@ class TestElementwiseModOp(XPUOpTest):
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
def init_dtype(self):
pass
def init_axis(self):
pass
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOp_scalar(TestElementwiseModOp):
class TestElementwiseModOp_scalar(TestElementwiseModOp):
def init_input_output(self):
scale_x = random.randint(0, 100000000)
scale_y = random.randint(1, 100000000)
scale_x = random.randint(0, 100000)
scale_y = random.randint(1, 100000)
self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype)
self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOpInverse(TestElementwiseModOp):
class TestElementwiseModOpInverse(TestElementwiseModOp):
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
support_types = get_xpu_op_support_types('elementwise_floordiv')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseModOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -18,23 +18,33 @@ import numpy as np
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import paddle
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(XPUOpTest):
class XPUTestElementwiseMaxOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_max'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_max"
self.dtype = self.in_type
self.init_input_output()
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32")
def init_input_output(self):
x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
......@@ -64,116 +74,98 @@ class TestElementwiseOp(XPUOpTest):
max_relative_error=0.006,
no_grad_set=set('Y'))
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
y = np.array([0.5]).astype("float32")
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def init_input_output(self):
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype)
y = np.array([0.5]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def init_input_output(self):
x = np.random.random((100, )).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32)
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (50, 2)).astype(np.float32)
np.random.uniform(1, 2, (50, 2)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1))
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(1, 50, 2, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32)
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype)
y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32)
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
support_types = get_xpu_op_support_types('elementwise_max')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMaxOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,22 +20,33 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(XPUOpTest):
class XPUTestElementwiseMinOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_min'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_min"
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32")
self.dtype = self.in_type
self.init_input_output()
def init_input_output(self):
x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
......@@ -65,116 +76,93 @@ class TestElementwiseOp(XPUOpTest):
max_relative_error=0.005,
no_grad_set=set('Y'))
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float32")
y = np.array([0.5]).astype("float32")
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def init_input_output(self):
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype)
y = np.array([0.5]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
class TestElementwiseMinOp_Vector(TestElementwiseOp):
def init_input_output(self):
x = np.random.random((100, )).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.attrs = {'axis': 0}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float32)
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (25, 4)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
np.random.uniform(1, 2, (25, 4)).astype(self.dtype)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1))
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 25, 4, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float32)
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(self.dtype)
y = x + sgn * \
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float32)
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
support_types = get_xpu_op_support_types('elementwise_min')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMinOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,32 +20,30 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class ElementwiseMulOp(XPUOpTest):
class XPUTestElementwiseMulOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_mul'
self.use_dynamic_create_class = False
class ElementwiseMulOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.op_type = 'elementwise_mul'
self.use_xpu = True
self.op_type = "elementwise_mul"
self.dtype = np.float32
self.dtype = self.in_type
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
......@@ -81,6 +79,12 @@ class ElementwiseMulOp(XPUOpTest):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def init_dtype(self):
pass
......@@ -88,157 +92,109 @@ class ElementwiseMulOp(XPUOpTest):
def init_axis(self):
pass
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32),
'Y': np.random.rand(1).astype(np.float32)
'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.random((100, )).astype("float32"),
'Y': np.random.random((100, )).astype("float32")
'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype(self.dtype)
}
self.outputs = {
'Out': np.multiply(self.inputs['X'], self.inputs['Y'])
}
self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x * self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
self.inputs = {
'X': np.random.rand(100, 2, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(100, 1, 1)
}
self.attrs = {'axis': 0}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32),
'Y': np.random.rand(10, 12).astype(np.float32)
'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 2, 11).astype(np.float32),
'Y': np.random.rand(10, 1, 11).astype(np.float32)
'X': np.random.rand(10, 2, 11).astype(self.dtype),
'Y': np.random.rand(10, 1, 11).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(np.float32),
'Y': np.random.rand(10, 4, 1, 3).astype(np.float32)
'X': np.random.rand(10, 4, 2, 3).astype(self.dtype),
'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(1, 1, 100).astype(np.float32)
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(np.float32),
'Y': np.random.rand(30, 1, 4, 1).astype(np.float32)
'X': np.random.rand(30, 3, 1, 5).astype(self.dtype),
'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 10).astype(np.float32),
'Y': np.random.rand(2, 2, 10, 10).astype(np.float32)
'X': np.random.rand(10, 10).astype(self.dtype),
'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype)
}
self.attrs = {'axis': 2}
......@@ -246,12 +202,8 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y']
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOpError(unittest.TestCase):
class TestElementwiseMulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of elementwise_mul must be Variable.
......@@ -259,13 +211,21 @@ class TestElementwiseMulOpError(unittest.TestCase):
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1)
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1,
y1)
# the input dtype of elementwise_mul must be float32
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, y2)
x2 = fluid.layers.data(
name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(
name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2,
y2)
support_types = get_xpu_op_support_types('elementwise_mul')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMulOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,17 +20,28 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp(XPUOpTest):
@skip_check_grad_ci(reason="XPU does not support grad op currently")
class XPUTestElementwisePowOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_pow'
self.use_dynamic_create_class = False
class TestElementwisePowOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
self.compute_input_output()
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype("float32"),
'Y': np.random.uniform(1, 2, [20, 5]).astype("float32")
'X': np.random.uniform(1, 2, [20, 5]).astype(self.dtype),
'Y': np.random.uniform(1, 2, [20, 5]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
......@@ -39,97 +50,65 @@ class TestElementwisePowOp(XPUOpTest):
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float32")
'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_scalar(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float32),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32)
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_tensor(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"),
'Y': np.random.uniform(1, 3, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(1, 3, [100]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 0}
self.outputs = {
......@@ -137,46 +116,44 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float32")
'X':
np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5,
1))
'Out': np.power(self.inputs['X'],
self.inputs['Y'].reshape(1, 20, 5, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float32")
'X':
np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOpInt(OpTest):
class TestElementwisePowOpInt(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])}
self.inputs = {
'X': np.asarray([1, 3, 6]),
'Y': np.asarray([1, 1, 1])
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
support_types = get_xpu_op_support_types('elementwise_pow')
for stype in support_types:
create_test_class(globals(), XPUTestElementwisePowOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -19,18 +19,27 @@ import paddle
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import unittest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(OpTest):
class XPUTestElementwiseSubOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_sub'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_sub"
self.use_xpu = True
self.dtype = self.in_type
self.init_input_output()
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
......@@ -62,41 +71,29 @@ class TestElementwiseOp(OpTest):
max_relative_error=0.005,
no_grad_set=set('Y'))
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_scalar(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32),
'Y': np.random.rand(1).astype(np.float32)
'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.random((100, )).astype("float32"),
'Y': np.random.random((100, )).astype("float32")
'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(100, 3, 2).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
'X': np.random.rand(100, 3, 2).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 0}
......@@ -104,15 +101,11 @@ class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 1}
......@@ -120,30 +113,22 @@ class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32),
'Y': np.random.rand(10, 12).astype(np.float32)
'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(self.dtype)
}
self.attrs = {'axis': 1}
......@@ -151,51 +136,35 @@ class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float32),
'Y': np.random.rand(2, 5, 1, 12).astype(np.float32)
'X': np.random.rand(2, 5, 3, 12).astype(self.dtype),
'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(1, 1, 100).astype(np.float32)
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float32),
'Y': np.random.rand(10, 1, 12, 1).astype(np.float32)
'X': np.random.rand(10, 3, 1, 4).astype(self.dtype),
'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 12).astype(np.float32),
'Y': np.random.rand(2, 3, 10, 12).astype(np.float32)
'X': np.random.rand(10, 12).astype(self.dtype),
'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype)
}
self.attrs = {'axis': 2}
......@@ -205,5 +174,9 @@ class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
}
support_types = get_xpu_op_support_types('elementwise_sub')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseSubOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -18,9 +18,10 @@ import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid.core as core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
......@@ -41,19 +42,28 @@ def numpy_topk(x, k=1, axis=-1, largest=True):
return value, indices
class TestTopkOp(OpTest):
class XPUTestTopKV2Op(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'top_k_v2'
self.use_dynamic_create_class = False
class TestTopkOp(XPUOpTest):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 20).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 20)
self.init_args()
self.dtype = self.in_type
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
self.attrs = {
'k': self.k,
'axis': self.axis,
'largest': self.largest
}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
......@@ -68,222 +78,94 @@ class TestTopkOp(OpTest):
place = paddle.XPUPlace(0)
self.check_grad(set(['X']), 'Out')
class TestTopkOp1(TestTopkOp):
class TestTopkOp1(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(100, 155).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp2(TestTopkOp):
class TestTopkOp2(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp3(TestTopkOp):
class TestTopkOp3(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp4(TestTopkOp):
class TestTopkOp4(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp5(TestTopkOp):
class TestTopkOp5(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 2
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp6(TestTopkOp):
class TestTopkOp6(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp7(TestTopkOp):
class TestTopkOp7(TestTopkOp):
def init_args(self):
self.k = 10
self.axis = 2
self.largest = True
self.input_data = np.random.rand(8, 5, 10, 16).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 5, 10, 16)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp8(TestTopkOp):
class TestTopkOp8(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp9(TestTopkOp):
class TestTopkOp9(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp10(TestTopkOp):
class TestTopkOp10(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp11(TestTopkOp):
class TestTopkOp11(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp12(TestTopkOp):
class TestTopkOp12(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
support_types = get_xpu_op_support_types('top_k_v2')
for stype in support_types:
create_test_class(globals(), XPUTestTopKV2Op, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册