diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py index 9ef8cc1e02790ca19475df985ddf09ca72092781..2fc3a42df12646994304fc5a25d14eba80fce9da 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_add_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,485 +22,276 @@ from op_test_xpu import XPUOpTest import unittest import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard -paddle.enable_static() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp(XPUOpTest): - def setUp(self): - self.op_type = "elementwise_add" - self.init_dtype() - self.init_input_output() - self.init_axis() - self.init_max_relative_error() - self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) - } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} - self.outputs = {'Out': self.out} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad_normal(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X', 'Y'], - 'Out', - max_relative_error=self.max_relative_error) - - def test_check_grad_ingore_x(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['Y'], - 'Out', - no_grad_set=set("X"), - max_relative_error=self.max_relative_error) - - def test_check_grad_ingore_y(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X'], - 'Out', - no_grad_set=set("Y"), - max_relative_error=self.max_relative_error) - - def init_input_output(self): - self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) - self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) - self.out = np.add(self.x, self.y) - - def init_dtype(self): - self.dtype = np.float32 - - def init_axis(self): - self.axis = -1 - - def init_max_relative_error(self): - self.max_relative_error = 0.006 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -class TestElementwiseAddOp_scalar(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 3, 4).astype(self.dtype) - self.y = np.random.rand(1).astype(self.dtype) - self.out = self.x + self.y - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1,1) to test broadcast.") -class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 3, 4).astype(self.dtype) - self.y = np.random.rand(1, 1).astype(self.dtype) - self.out = self.x + self.y - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_Vector(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) - self.out = np.add(self.x, self.y) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(100, 2, 3).astype(self.dtype) - self.y = np.random.rand(100).astype(self.dtype) - self.out = self.x + self.y.reshape(100, 1, 1) - - def init_axis(self): - self.axis = 0 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 100, 3).astype(self.dtype) - self.y = np.random.rand(100).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 100, 1) - - def init_axis(self): - self.axis = 1 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 3, 100).astype(self.dtype) - self.y = np.random.rand(100).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 1, 100) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) - self.y = np.random.rand(10, 12).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 10, 12, 1) - - def init_axis(self): - self.axis = 1 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype) - self.y = np.random.rand(100, 1).astype(self.dtype) - self.out = self.x + self.y.reshape(100, 1, 1, 1) - - def init_axis(self): - self.axis = 0 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(10, 3, 12).astype(self.dtype) - self.y = np.random.rand(10, 1, 12).astype(self.dtype) - self.out = self.x + self.y - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) - self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) - self.out = self.x + self.y - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) - self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) - self.out = self.x + self.y - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 10, 12).astype(self.dtype) - self.y = np.random.rand(10, 12).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 10, 12) - - def init_axis(self): - self.axis = 1 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(100, 1).astype(self.dtype) - self.y = np.random.rand(1).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 1) - - def init_axis(self): - self.axis = 1 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(100, 2, 3).astype(self.dtype) - self.y = np.random.rand(100, 1, 1).astype(self.dtype) - self.out = self.x + self.y - - def init_axis(self): - self.axis = -1 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(2, 3, 100).astype(self.dtype) - self.y = np.random.rand(1, 1, 100).astype(self.dtype) - self.out = self.x + self.y - - def init_axis(self): - self.axis = -1 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) - self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) - self.out = self.x + self.y - - def init_axis(self): - self.axis = -1 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): - def init_input_output(self): - self.x = np.random.rand(10, 12).astype(self.dtype) - self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype) - self.out = self.x + self.y - - def init_axis(self): - self.axis = 2 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseAddOpError(unittest.TestCase): - def test_errors(self): - with program_guard(Program(), Program()): - # the input of elementwise_add must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) - y1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) - self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1) - - # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64 - # float16 only can be set on GPU place - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8") - y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8") - self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestAddOp(unittest.TestCase): - def test_name(self): - with fluid.program_guard(fluid.Program()): - x = fluid.data(name="x", shape=[2, 3], dtype="float32") - y = fluid.data(name='y', shape=[2, 3], dtype='float32') - - y_1 = paddle.add(x, y, name='add_res') - self.assertEqual(('add_res' in y_1.name), True) - - def test_declarative(self): - with fluid.program_guard(fluid.Program()): - - def gen_data(): - return { - "x": np.array([2, 3, 4]).astype('float32'), - "y": np.array([1, 5, 2]).astype('float32') - } - - x = fluid.data(name="x", shape=[3], dtype='float32') - y = fluid.data(name="y", shape=[3], dtype='float32') - z = paddle.add(x, y) - - place = fluid.XPUPlace(0) - exe = fluid.Executor(place) - z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) - z_expected = np.array([3., 8., 6.]) - self.assertEqual((z_value == z_expected).all(), True) - - def test_dygraph(self): - with fluid.dygraph.guard(): - np_x = np.array([2, 3, 4]).astype('float32') - np_y = np.array([1, 5, 2]).astype('float32') - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) - z = paddle.add(x, y) - np_z = z.numpy() - z_expected = np.array([3., 8., 6.]) - self.assertEqual((np_z == z_expected).all(), True) - - -######## fp16 test -class TestElementwiseAddFP16Op(TestElementwiseAddOp): - def init_dtype(self): - self.dtype = np.float16 - - def init_max_relative_error(self): - self.max_relative_error = 0.01 - - -class TestElementwiseAddOp_scalarFP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 3, 4).astype(self.dtype) - self.y = np.random.rand(1).astype(self.dtype) - self.out = self.x + self.y - - -class TestElementwiseAddOp_scalar2FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 3, 4).astype(self.dtype) - self.y = np.random.rand(1, 1).astype(self.dtype) - self.out = self.x + self.y - - -class TestElementwiseAddOp_VectorFP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.random((100, )).astype(self.dtype) - self.y = np.random.random((100, )).astype(self.dtype) - self.out = np.add(self.x, self.y) - - -class TestElementwiseAddOp_broadcast_0FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(100, 2, 3).astype(self.dtype) - self.y = np.random.rand(100).astype(self.dtype) - self.out = self.x + self.y.reshape(100, 1, 1) - - def init_axis(self): - self.axis = 0 - - -class TestElementwiseAddOp_broadcast_1FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 100, 3).astype(self.dtype) - self.y = np.random.rand(100).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 100, 1) - - def init_axis(self): - self.axis = 1 - - -class TestElementwiseAddOp_broadcast_2FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 3, 100).astype(self.dtype) - self.y = np.random.rand(100).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 1, 100) - - -class TestElementwiseAddOp_broadcast_3FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) - self.y = np.random.rand(10, 12).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 10, 12, 1) - - def init_axis(self): - self.axis = 1 - - -class TestElementwiseAddOp_broadcast_4FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype) - self.y = np.random.rand(100, 1).astype(self.dtype) - self.out = self.x + self.y.reshape(100, 1, 1, 1) +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper - def init_axis(self): - self.axis = 0 - - -class TestElementwiseAddOp_broadcast_5FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(10, 3, 12).astype(self.dtype) - self.y = np.random.rand(10, 1, 12).astype(self.dtype) - self.out = self.x + self.y - - def init_dtype(self): - self.dtype = np.float16 - - -class TestElementwiseAddOp_broadcast_6FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) - self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) - self.out = self.x + self.y - - -class TestElementwiseAddOp_broadcast_7FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) - self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) - self.out = self.x + self.y - - def init_dtype(self): - self.dtype = np.float16 - - -class TestElementwiseAddOp_rowwise_add_0FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 10, 12).astype(self.dtype) - self.y = np.random.rand(10, 12).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 10, 12) - - def init_axis(self): - self.axis = 1 - - -class TestElementwiseAddOp_rowwise_add_1FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(100, 1).astype(self.dtype) - self.y = np.random.rand(1).astype(self.dtype) - self.out = self.x + self.y.reshape(1, 1) - - def init_axis(self): - self.axis = 1 - - -class TestElementwiseAddOp_channelwise_addFP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(100, 2, 3).astype(self.dtype) - self.y = np.random.rand(100, 1, 1).astype(self.dtype) - self.out = self.x + self.y - - def init_axis(self): - self.axis = -1 - - -class TestElementwiseAddOp_commonuse_add1FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(2, 3, 100).astype(self.dtype) - self.y = np.random.rand(1, 1, 100).astype(self.dtype) - self.out = self.x + self.y - - def init_axis(self): - self.axis = -1 - - -class TestElementwiseAddOp_commonuse_add2FP16(TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) - self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) - self.out = self.x + self.y - - def init_axis(self): - self.axis = -1 - - -class TestElementwiseAddOp_xsize_lessthan_ysize_addFP16( - TestElementwiseAddFP16Op): - def init_input_output(self): - self.x = np.random.rand(10, 12).astype(self.dtype) - self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype) - self.out = self.x + self.y +paddle.enable_static() - def init_axis(self): - self.axis = 2 +class XPUTestElementwiseAddOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_add' + self.use_dynamic_create_class = False + + class TestElementwiseAddOp(XPUOpTest): + def setUp(self): + self.op_type = "elementwise_add" + self.init_dtype() + self.init_input_output() + self.init_axis() + self.init_max_relative_error() + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(self.x), + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + } + self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} + self.outputs = {'Out': self.out} + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + def test_check_grad_normal(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X', 'Y'], + 'Out', + max_relative_error=self.max_relative_error) + + def test_check_grad_ingore_x(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['Y'], + 'Out', + no_grad_set=set("X"), + max_relative_error=self.max_relative_error) + + def test_check_grad_ingore_y(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X'], + 'Out', + no_grad_set=set("Y"), + max_relative_error=self.max_relative_error) + + def init_input_output(self): + self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.out = np.add(self.x, self.y) + + def init_dtype(self): + self.dtype = self.in_type + + def init_axis(self): + self.axis = -1 + + def init_max_relative_error(self): + self.max_relative_error = 0.006 + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwiseAddOp_scalar(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + self.out = self.x + self.y + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1,1) to test broadcast.") + class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1, 1).astype(self.dtype) + self.out = self.x + self.y + + class TestElementwiseAddOp_Vector(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.random((100, )).astype(self.dtype) + self.y = np.random.random((100, )).astype(self.dtype) + self.out = np.add(self.x, self.y) + + class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(100, 2, 3).astype(self.dtype) + self.y = np.random.rand(100).astype(self.dtype) + self.out = self.x + self.y.reshape(100, 1, 1) + + def init_axis(self): + self.axis = 0 + + class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 100, 3).astype(self.dtype) + self.y = np.random.rand(100).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 100, 1) + + def init_axis(self): + self.axis = 1 + + class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 100).astype(self.dtype) + self.y = np.random.rand(100).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 1, 100) + + class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 10, 12, 3).astype(self.dtype) + self.y = np.random.rand(10, 12).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 10, 12, 1) + + def init_axis(self): + self.axis = 1 + + class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(100, 2, 3, 4).astype(self.dtype) + self.y = np.random.rand(100, 1).astype(self.dtype) + self.out = self.x + self.y.reshape(100, 1, 1, 1) + + def init_axis(self): + self.axis = 0 + + class TestElementwiseAddOp_broadcast_5(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(10, 3, 12).astype(self.dtype) + self.y = np.random.rand(10, 1, 12).astype(self.dtype) + self.out = self.x + self.y + + class TestElementwiseAddOp_broadcast_6(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 12, 3, 5).astype(self.dtype) + self.y = np.random.rand(2, 12, 1, 5).astype(self.dtype) + self.out = self.x + self.y + + class TestElementwiseAddOp_broadcast_7(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(1, 1, 20, 5).astype(self.dtype) + self.y = np.random.rand(20, 5, 1, 1).astype(self.dtype) + self.out = self.x + self.y + + class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 10, 12).astype(self.dtype) + self.y = np.random.rand(10, 12).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 10, 12) + + def init_axis(self): + self.axis = 1 + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(100, 1).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 1) + + def init_axis(self): + self.axis = 1 + + class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(100, 2, 3).astype(self.dtype) + self.y = np.random.rand(100, 1, 1).astype(self.dtype) + self.out = self.x + self.y + + def init_axis(self): + self.axis = -1 + + class TestElementwiseAddOp_commonuse_add1(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(2, 3, 100).astype(self.dtype) + self.y = np.random.rand(1, 1, 100).astype(self.dtype) + self.out = self.x + self.y + + def init_axis(self): + self.axis = -1 + + class TestElementwiseAddOp_commonuse_add2(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(10, 3, 1, 4).astype(self.dtype) + self.y = np.random.rand(10, 1, 12, 1).astype(self.dtype) + self.out = self.x + self.y + + def init_axis(self): + self.axis = -1 + + class TestElementwiseAddOp_xsize_lessthan_ysize_add(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(10, 12).astype(self.dtype) + self.y = np.random.rand(2, 3, 10, 12).astype(self.dtype) + self.out = self.x + self.y + + def init_axis(self): + self.axis = 2 + + class TestElementwiseAddOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + # the input of elementwise_add must be Variable. + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) + self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, + y1) + + # the input dtype of elementwise_add must be float16 or float32 or float64 or int32 or int64 + # float16 only can be set on GPU place + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="uint8") + y2 = fluid.layers.data( + name='y2', shape=[3, 4, 5, 6], dtype="uint8") + self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, + y2) + + class TestAddOp(unittest.TestCase): + def test_name(self): + with fluid.program_guard(fluid.Program()): + x = fluid.data(name="x", shape=[2, 3], dtype="float32") + y = fluid.data(name='y', shape=[2, 3], dtype='float32') + + y_1 = paddle.add(x, y, name='add_res') + self.assertEqual(('add_res' in y_1.name), True) + + def test_declarative(self): + with fluid.program_guard(fluid.Program()): + + def gen_data(): + return { + "x": np.array([2, 3, 4]).astype('float32'), + "y": np.array([1, 5, 2]).astype('float32') + } + + x = fluid.data(name="x", shape=[3], dtype='float32') + y = fluid.data(name="y", shape=[3], dtype='float32') + z = paddle.add(x, y) + + place = fluid.XPUPlace(0) + exe = fluid.Executor(place) + z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) + z_expected = np.array([3., 8., 6.]) + self.assertEqual((z_value == z_expected).all(), True) + + def test_dygraph(self): + with fluid.dygraph.guard(): + np_x = np.array([2, 3, 4]).astype('float32') + np_y = np.array([1, 5, 2]).astype('float32') + x = fluid.dygraph.to_variable(np_x) + y = fluid.dygraph.to_variable(np_y) + z = paddle.add(x, y) + np_z = z.numpy() + z_expected = np.array([3., 8., 6.]) + self.assertEqual((np_z == z_expected).all(), True) + + +support_types = get_xpu_op_support_types('elementwise_add') +for stype in support_types: + create_test_class(globals(), XPUTestElementwiseAddOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py index 0fd35d7a4576638836d964f15d6a4643921674a7..3b593818b4e9c5bfb496257399687e948669e197 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_div_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,231 +20,216 @@ import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest, skip_check_grad_ci from op_test_xpu import XPUOpTest -paddle.enable_static() +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class ElementwiseDivOp(XPUOpTest): - def setUp(self): - self.op_type = "elementwise_div" - self.dtype = np.float32 - self.init_dtype() - self.use_xpu = True - """ Warning - CPU gradient check error! - 'X': np.random.random((32,84)).astype("float32"), - 'Y': np.random.random((32,84)).astype("float32") - """ - self.inputs = { - 'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), - 'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) - } - self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad_normal(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X', 'Y'], 'Out', max_relative_error=0.05) - - def test_check_grad_ingore_x(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['Y'], - 'Out', - max_relative_error=0.05, - no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X'], - 'Out', - max_relative_error=0.05, - no_grad_set=set('Y')) - - def init_dtype(self): - pass - - -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_scalar(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float32), - 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_Vector(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") - } - self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") - } - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") - } - - self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float32") - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float32") - } - self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float32") - } - self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype("float32"), - } - self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype("float32"), - } - self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp): - def setUp(self): - self.op_type = "elementwise_div" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [10, 12]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype("float32"), - } - - self.attrs = {'axis': 2} - - self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseDivBroadcast(unittest.TestCase): - def test_shape_with_batch_sizes(self): - with fluid.program_guard(fluid.Program()): - x_var = fluid.data( - name='x', dtype='float32', shape=[None, 3, None, None]) - one = 2. - out = one / x_var - exe = fluid.Executor(fluid.XPUPlace(0)) - x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32") - out_result, = exe.run(feed={'x': x}, fetch_list=[out]) - self.assertEqual((out_result == (2 / x)).all(), True) +class XPUTestElementwiseDivOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_div' + self.use_dynamic_create_class = False + + class ElementwiseDivOp(XPUOpTest): + def setUp(self): + self.op_type = "elementwise_div" + self.dtype = self.in_type + self.init_dtype() + self.use_xpu = True + self.init_input_output() + """ Warning + CPU gradient check error! + 'X': np.random.random((32,84)).astype("float32"), + 'Y': np.random.random((32,84)).astype("float32") + """ + + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + } + self.outputs = { + 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + } + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + def test_check_grad_normal(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X', 'Y'], 'Out', max_relative_error=0.05) + + def test_check_grad_ingore_x(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['Y'], + 'Out', + max_relative_error=0.05, + no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X'], + 'Out', + max_relative_error=0.05, + no_grad_set=set('Y')) + + def init_dtype(self): + pass + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwiseDivOp_scalar(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} + + class TestElementwiseDivOp_Vector(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + } + self.outputs = { + 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + } + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': + np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) + } + + class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': + np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) + } + + class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + } + + self.outputs = { + 'Out': + np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) + } + + class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': + np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.divide(self.inputs['X'], + self.inputs['Y'].reshape(1, 10, 12, 1)) + } + + class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype(self.dtype) + } + self.outputs = { + 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': + np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype(self.dtype) + } + self.outputs = { + 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype(self.dtype), + } + self.outputs = { + 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': + np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype(self.dtype), + 'Y': + np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype(self.dtype), + } + self.outputs = { + 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype), + 'Y': + np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype(self.dtype), + } + + self.attrs = {'axis': 2} + + self.outputs = { + 'Out': np.divide(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseDivBroadcast(unittest.TestCase): + def test_shape_with_batch_sizes(self): + with fluid.program_guard(fluid.Program()): + x_var = fluid.data( + name='x', dtype='float32', shape=[None, 3, None, None]) + one = 2. + out = one / x_var + exe = fluid.Executor(fluid.XPUPlace(0)) + x = np.random.uniform(0.1, 0.6, + (1, 3, 32, 32)).astype('float32') + out_result, = exe.run(feed={'x': x}, fetch_list=[out]) + self.assertEqual((out_result == (2 / x)).all(), True) + + +support_types = get_xpu_op_support_types('elementwise_div') +for stype in support_types: + create_test_class(globals(), XPUTestElementwiseDivOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py index cc8ec3cac2c96b6544a9ae2ad2af3f39d62bc1aa..ea01a38f4b38d97eb1ff79c8083dd365f0c64ea9 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_floordiv_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,68 +20,66 @@ import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest, skip_check_grad_ci from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() import random -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseModOp(XPUOpTest): - def init_kernel_type(self): - self.use_mkldnn = False +class XPUTestElementwiseModOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_floordiv' + self.use_dynamic_create_class = False - def setUp(self): - self.op_type = "elementwise_floordiv" - self.dtype = np.float32 - self.axis = -1 - self.init_dtype() - self.init_input_output() - self.init_kernel_type() - self.init_axis() + class TestElementwiseModOp(XPUOpTest): + def init_kernel_type(self): + self.use_mkldnn = False - self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) - } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} - self.outputs = {'Out': self.out} + def setUp(self): + self.op_type = "elementwise_floordiv" + self.dtype = self.in_type + self.axis = -1 + self.init_input_output() + self.init_kernel_type() + self.init_axis() - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(self.x), + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + } + self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} + self.outputs = {'Out': self.out} - def init_input_output(self): - self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) - self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype) - self.out = np.floor_divide(self.x, self.y) + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) - def init_dtype(self): - pass + def init_input_output(self): + self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) + self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype) + self.out = np.floor_divide(self.x, self.y) - def init_axis(self): - pass + def init_axis(self): + pass + class TestElementwiseModOp_scalar(TestElementwiseModOp): + def init_input_output(self): + scale_x = random.randint(0, 100000) + scale_y = random.randint(1, 100000) + self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype) + self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype) + self.out = np.floor_divide(self.x, self.y) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseModOp_scalar(TestElementwiseModOp): - def init_input_output(self): - scale_x = random.randint(0, 100000000) - scale_y = random.randint(1, 100000000) - self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype) - self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype) - self.out = np.floor_divide(self.x, self.y) + class TestElementwiseModOpInverse(TestElementwiseModOp): + def init_input_output(self): + self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype) + self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype) + self.out = np.floor_divide(self.x, self.y) -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseModOpInverse(TestElementwiseModOp): - def init_input_output(self): - self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype) - self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype) - self.out = np.floor_divide(self.x, self.y) - +support_types = get_xpu_op_support_types('elementwise_floordiv') +for stype in support_types: + create_test_class(globals(), XPUTestElementwiseModOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py index dbe575d406a0a6985995c81e2a04d7a9268892db..3d9566dc71d425a9d1a64388cbc1a893646ea665 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_max_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,162 +18,154 @@ import numpy as np from op_test import OpTest, skip_check_grad_ci from op_test_xpu import XPUOpTest import paddle +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseOp(XPUOpTest): - def setUp(self): - self.use_xpu = True - self.op_type = "elementwise_max" - # If x and y have the same value, the max() is not differentiable. - # So we generate test data by the following method - # to avoid them being too close to each other. - x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") - sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") - y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad_normal(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X', 'Y'], 'Out') - - def test_check_grad_ingore_x(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['Y'], - 'Out', - max_relative_error=0.006, - no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X'], - 'Out', - max_relative_error=0.006, - no_grad_set=set('Y')) - - -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMaxOp_scalar(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32") - y = np.array([0.5]).astype("float32") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMaxOp_Vector(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - x = np.random.random((100, )).astype("float32") - sgn = np.random.choice([-1, 1], (100, )).astype("float32") - y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) - y = x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': - np.maximum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) - y = x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': - np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) - y = x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.outputs = { - 'Out': - np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32) - sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32) - y = x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (50, 2)).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': - np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_max" - x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32) - sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32) - y = x + sgn * \ - np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} - +class XPUTestElementwiseMaxOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_max' + self.use_dynamic_create_class = False + + class TestElementwiseOp(XPUOpTest): + def setUp(self): + self.use_xpu = True + self.op_type = "elementwise_max" + self.dtype = self.in_type + self.init_input_output() + # If x and y have the same value, the max() is not differentiable. + # So we generate test data by the following method + # to avoid them being too close to each other. + + def init_input_output(self): + x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype) + y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.maximum(self.inputs['X'], self.inputs['Y']) + } + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + def test_check_grad_normal(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place(place, ['X', 'Y'], 'Out') + + def test_check_grad_ingore_x(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['Y'], + 'Out', + max_relative_error=0.006, + no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X'], + 'Out', + max_relative_error=0.006, + no_grad_set=set('Y')) + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwiseMaxOp_scalar(TestElementwiseOp): + def init_input_output(self): + x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype) + y = np.array([0.5]).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.maximum(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseMaxOp_Vector(TestElementwiseOp): + def init_input_output(self): + x = np.random.random((100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.maximum(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x[:, 0, 0] + sgn * \ + np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': np.maximum(self.inputs['X'], + self.inputs['Y'].reshape(100, 1, 1)) + } + + class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x[0, :, 0] + sgn * \ + np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.maximum(self.inputs['X'], + self.inputs['Y'].reshape(1, 100, 1)) + } + + class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x[0, 0, :] + sgn * \ + np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + + self.outputs = { + 'Out': np.maximum(self.inputs['X'], + self.inputs['Y'].reshape(1, 1, 100)) + } + + class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype) + y = x[0, :, :, 0] + sgn * \ + np.random.uniform(1, 2, (50, 2)).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.maximum(self.inputs['X'], + self.inputs['Y'].reshape(1, 50, 2, 1)) + } + + class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype) + y = x + sgn * \ + np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + + self.outputs = { + 'Out': np.maximum(self.inputs['X'], self.inputs['Y']) + } + + +support_types = get_xpu_op_support_types('elementwise_max') +for stype in support_types: + create_test_class(globals(), XPUTestElementwiseMaxOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py index ebe2004c3f4a80fcd5e6a24173c6cae7ad08b174..9233097b3add1ca496a37fab19a65db831807863 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_min_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,161 +20,149 @@ import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard import paddle from op_test_xpu import XPUOpTest -paddle.enable_static() +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseOp(XPUOpTest): - def setUp(self): - self.op_type = "elementwise_min" - # If x and y have the same value, the min() is not differentiable. - # So we generate test data by the following method - # to avoid them being too close to each other. - x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") - sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") - y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad_normal(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X', 'Y'], 'Out') - - def test_check_grad_ingore_x(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -class TestElementwiseMinOp_scalar(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float32") - y = np.array([0.5]).astype("float32") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMinOp_Vector(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - x = np.random.random((100, )).astype("float32") - sgn = np.random.choice([-1, 1], (100, )).astype("float32") - y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32") - self.inputs = {'X': x, 'Y': y} - self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) - y = x[:, 0, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': - np.minimum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) - y = x[0, :, 0] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': - np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float32) - sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) - y = x[0, 0, :] + sgn * \ - np.random.uniform(1, 2, (100, )).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.outputs = { - 'Out': - np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float32) - sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float32) - y = x[0, :, :, 0] + sgn * \ - np.random.uniform(1, 2, (25, 4)).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': - np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_min" - x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float32) - sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float32) - y = x + sgn * \ - np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float32) - self.inputs = {'X': x, 'Y': y} - - self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])} +class XPUTestElementwiseMinOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_min' + self.use_dynamic_create_class = False + + class TestElementwiseOp(XPUOpTest): + def setUp(self): + self.op_type = "elementwise_min" + # If x and y have the same value, the min() is not differentiable. + # So we generate test data by the following method + # to avoid them being too close to each other. + self.dtype = self.in_type + self.init_input_output() + + def init_input_output(self): + x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype) + y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], self.inputs['Y']) + } + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + def test_check_grad_normal(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place(place, ['X', 'Y'], 'Out') + + def test_check_grad_ingore_x(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y')) + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwiseMinOp_scalar(TestElementwiseOp): + def init_input_output(self): + x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype) + y = np.array([0.5]).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseMinOp_Vector(TestElementwiseOp): + def init_input_output(self): + x = np.random.random((100, )).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseMinOp_broadcast_0(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x[:, 0, 0] + sgn * \ + np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.attrs = {'axis': 0} + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], + self.inputs['Y'].reshape(100, 1, 1)) + } + + class TestElementwiseMinOp_broadcast_1(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x[0, :, 0] + sgn * \ + np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.attrs = {'axis': 1} + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], + self.inputs['Y'].reshape(1, 100, 1)) + } + + class TestElementwiseMinOp_broadcast_2(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype) + y = x[0, 0, :] + sgn * \ + np.random.uniform(1, 2, (100, )).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], + self.inputs['Y'].reshape(1, 1, 100)) + } + + class TestElementwiseMinOp_broadcast_3(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype) + y = x[0, :, :, 0] + sgn * \ + np.random.uniform(1, 2, (25, 4)).astype(self.dtype) + self.attrs = {'axis': 1} + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], + self.inputs['Y'].reshape(1, 25, 4, 1)) + } + + class TestElementwiseMinOp_broadcast_4(TestElementwiseOp): + def init_input_output(self): + x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype) + sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(self.dtype) + y = x + sgn * \ + np.random.uniform(1, 2, (2, 10, 1, 5)).astype(self.dtype) + self.inputs = {'X': x, 'Y': y} + self.outputs = { + 'Out': np.minimum(self.inputs['X'], self.inputs['Y']) + } + + +support_types = get_xpu_op_support_types('elementwise_min') +for stype in support_types: + create_test_class(globals(), XPUTestElementwiseMinOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py index 39fd07cb7a9c1e4305dade31ae366ee0d8c93e1b..b4dbb7cf0455250c107a28a8e78670ba831a3a2e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,252 +20,212 @@ import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard import paddle from op_test_xpu import XPUOpTest -paddle.enable_static() +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class ElementwiseMulOp(XPUOpTest): - def init_kernel_type(self): - self.use_mkldnn = False - - def setUp(self): - self.use_xpu = True - self.op_type = "elementwise_mul" - self.dtype = np.float32 - self.axis = -1 - self.init_dtype() - self.init_input_output() - self.init_kernel_type() - self.init_axis() - - self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) - } - self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad_normal(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X', 'Y'], - 'Out', - check_dygraph=(self.use_mkldnn == False)) - - def test_check_grad_ingore_x(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['Y'], - 'Out', - no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False)) - - def test_check_grad_ingore_y(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X'], - 'Out', - no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False)) - - def init_input_output(self): - self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) - self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) - self.out = np.multiply(self.x, self.y) - - def init_dtype(self): - pass - - def init_axis(self): - pass - - -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_scalar(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(10, 3, 4).astype(np.float32), - 'Y': np.random.rand(1).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_Vector(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.random((100, )).astype("float32"), - 'Y': np.random.random((100, )).astype("float32") - } - self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): - def init_input_output(self): - self.x = np.random.rand(100, 2, 3).astype(self.dtype) - self.y = np.random.rand(100).astype(self.dtype) - self.out = self.x * self.y.reshape(100, 1, 1) - - def init_axis(self): - self.axis = 0 - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(2, 100, 3).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1) - } - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) - } - - self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100) - } - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(2, 10, 12, 3).astype(np.float32), - 'Y': np.random.rand(10, 12).astype(np.float32) - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) - } - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(10, 2, 11).astype(np.float32), - 'Y': np.random.rand(10, 1, 11).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(10, 4, 2, 3).astype(np.float32), - 'Y': np.random.rand(10, 4, 1, 3).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(1, 1, 100).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(30, 3, 1, 5).astype(np.float32), - 'Y': np.random.rand(30, 1, 4, 1).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): - def setUp(self): - self.op_type = "elementwise_mul" - self.inputs = { - 'X': np.random.rand(10, 10).astype(np.float32), - 'Y': np.random.rand(2, 2, 10, 10).astype(np.float32) - } - - self.attrs = {'axis': 2} - - self.outputs = { - 'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y'] - } - self.init_kernel_type() - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseMulOpError(unittest.TestCase): - def test_errors(self): - with program_guard(Program(), Program()): - # the input of elementwise_mul must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) - y1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) - self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1) - - # the input dtype of elementwise_mul must be float32 - x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8") - y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8") - self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, y2) +class XPUTestElementwiseMulOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_mul' + self.use_dynamic_create_class = False + + class ElementwiseMulOp(XPUOpTest): + def init_kernel_type(self): + self.use_mkldnn = False + + def setUp(self): + self.op_type = 'elementwise_mul' + self.use_xpu = True + self.dtype = self.in_type + self.axis = -1 + self.init_dtype() + self.init_input_output() + self.init_kernel_type() + self.init_axis() + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + def test_check_grad_normal(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X', 'Y'], + 'Out', + check_dygraph=(self.use_mkldnn == False)) + + def test_check_grad_ingore_x(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['Y'], + 'Out', + no_grad_set=set("X"), + check_dygraph=(self.use_mkldnn == False)) + + def test_check_grad_ingore_y(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X'], + 'Out', + no_grad_set=set('Y'), + check_dygraph=(self.use_mkldnn == False)) + + def init_input_output(self): + self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.out = np.multiply(self.x, self.y) + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(self.x), + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + } + self.outputs = {'Out': self.out} + self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} + + def init_dtype(self): + pass + + def init_axis(self): + pass + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwiseMulOp_scalar(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(10, 3, 4).astype(self.dtype), + 'Y': np.random.rand(1).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + + class TestElementwiseMulOp_Vector(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.random((100, )).astype(self.dtype), + 'Y': np.random.random((100, )).astype(self.dtype) + } + self.outputs = { + 'Out': np.multiply(self.inputs['X'], self.inputs['Y']) + } + + class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(100, 2, 3).astype(self.dtype), + 'Y': np.random.rand(100).astype(self.dtype) + } + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(100, 1, 1) + } + self.attrs = {'axis': 0} + + class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 100, 3).astype(self.dtype), + 'Y': np.random.rand(100).astype(self.dtype) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1) + } + + class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 3, 100).astype(self.dtype), + 'Y': np.random.rand(100).astype(self.dtype) + } + + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100) + } + + class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype), + 'Y': np.random.rand(10, 12).astype(self.dtype) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) + } + + class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(10, 2, 11).astype(self.dtype), + 'Y': np.random.rand(10, 1, 11).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + + class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(10, 4, 2, 3).astype(self.dtype), + 'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + + class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 3, 100).astype(self.dtype), + 'Y': np.random.rand(1, 1, 100).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + + class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(30, 3, 1, 5).astype(self.dtype), + 'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} + + class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(10, 10).astype(self.dtype), + 'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype) + } + + self.attrs = {'axis': 2} + + self.outputs = { + 'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y'] + } + + class TestElementwiseMulOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + # the input of elementwise_mul must be Variable. + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) + y1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0)) + self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, + y1) + + # the input dtype of elementwise_mul must be float32 + x2 = fluid.layers.data( + name='x2', shape=[3, 4, 5, 6], dtype="uint8") + y2 = fluid.layers.data( + name='y2', shape=[3, 4, 5, 6], dtype="uint8") + self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, + y2) + + +support_types = get_xpu_op_support_types('elementwise_mul') +for stype in support_types: + create_test_class(globals(), XPUTestElementwiseMulOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py index cbad3761196a7584333cad29775a2dd413258755..59c5dd685e1766ce835367fa52401a810e1aab77 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_pow_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,163 +20,140 @@ import paddle.fluid as fluid import paddle.fluid.core as core from op_test import OpTest, skip_check_grad_ci from op_test_xpu import XPUOpTest -paddle.enable_static() +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp(XPUOpTest): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(1, 2, [20, 5]).astype("float32"), - 'Y': np.random.uniform(1, 2, [20, 5]).astype("float32") - } - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad_normal(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X', 'Y'], 'Out') - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_big_shape_1(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(1, 2, [10, 10]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float32") - } - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_big_shape_2(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(1, 2, [10, 10]).astype("float32"), - 'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float32") - } - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -class TestElementwisePowOp_scalar(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float32), - 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32) - } - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_tensor(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100]).astype("float32"), - 'Y': np.random.uniform(1, 3, [100]).astype("float32") - } - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") - } - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [100]).astype("float32") - } - self.attrs = {'axis': 0} - self.outputs = { - 'Out': - np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float32") - } - self.attrs = {'axis': 1} - self.outputs = { - 'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5, - 1)) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float32") - } - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwisePowOpInt(OpTest): - def setUp(self): - self.op_type = "elementwise_pow" - self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])} - self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} - - def test_check_output(self): - self.check_output() +@skip_check_grad_ci(reason="XPU does not support grad op currently") +class XPUTestElementwisePowOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_pow' + self.use_dynamic_create_class = False + + class TestElementwisePowOp(XPUOpTest): + def setUp(self): + self.op_type = "elementwise_pow" + self.dtype = self.in_type + self.__class__.no_need_check_grad = True + self.compute_input_output() + + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(1, 2, [20, 5]).astype(self.dtype), + 'Y': np.random.uniform(1, 2, [20, 5]).astype(self.dtype) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + class TestElementwisePowOp_big_shape_1(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + class TestElementwisePowOp_big_shape_2(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype), + 'Y': np.random.uniform(0.2, 2, [10, 10]).astype(self.dtype) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwisePowOp_scalar(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + class TestElementwisePowOp_tensor(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype), + 'Y': np.random.uniform(1, 3, [100]).astype(self.dtype) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + } + self.attrs = {'axis': 1} + self.outputs = { + 'Out': + np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1)) + } + + class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype) + } + self.attrs = {'axis': 0} + self.outputs = { + 'Out': + np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) + } + + class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': + np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype) + } + self.attrs = {'axis': 1} + self.outputs = { + 'Out': np.power(self.inputs['X'], + self.inputs['Y'].reshape(1, 20, 5, 1)) + } + + class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): + def compute_input_output(self): + self.inputs = { + 'X': + np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(self.dtype) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + class TestElementwisePowOpInt(OpTest): + def setUp(self): + self.op_type = "elementwise_pow" + self.inputs = { + 'X': np.asarray([1, 3, 6]), + 'Y': np.asarray([1, 1, 1]) + } + self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + +support_types = get_xpu_op_support_types('elementwise_pow') +for stype in support_types: + create_test_class(globals(), XPUTestElementwisePowOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py index 3bc9fa067a6eed4b6a3614e3bd9417adadca1ff7..204485f3432dd06d10d4cd58fc218e5e233b25bd 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_sub_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,191 +19,164 @@ import paddle from op_test import OpTest, skip_check_grad_ci from op_test_xpu import XPUOpTest import unittest -paddle.enable_static() +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper +paddle.enable_static() -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseOp(OpTest): - def setUp(self): - self.use_xpu = True - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32") - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place, atol=1e-3) - - def test_check_grad_normal(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place(place, ['X', 'Y'], 'Out') - - def test_check_grad_ingore_x(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['Y'], - 'Out', - max_relative_error=0.005, - no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad_with_place( - place, ['X'], - 'Out', - max_relative_error=0.005, - no_grad_set=set('Y')) - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -@skip_check_grad_ci( - reason="[skip shape check] Use y_shape(1) to test broadcast.") -class TestElementwiseSubOp_scalar(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(10, 3, 4).astype(np.float32), - 'Y': np.random.rand(1).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_Vector(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.random((100, )).astype("float32"), - 'Y': np.random.random((100, )).astype("float32") - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(100, 3, 2).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) - } - - self.attrs = {'axis': 0} - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(2, 100, 3).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(100).astype(np.float32) - } - - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(2, 10, 12, 3).astype(np.float32), - 'Y': np.random.rand(10, 12).astype(np.float32) - } - - self.attrs = {'axis': 1} - self.outputs = { - 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1) - } - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(2, 5, 3, 12).astype(np.float32), - 'Y': np.random.rand(2, 5, 1, 12).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(2, 3, 100).astype(np.float32), - 'Y': np.random.rand(1, 1, 100).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(10, 3, 1, 4).astype(np.float32), - 'Y': np.random.rand(10, 1, 12, 1).astype(np.float32) - } - self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} - - -@unittest.skipIf(not paddle.is_compiled_with_xpu(), - "core is not compiled with XPU") -class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): - def setUp(self): - self.op_type = "elementwise_sub" - self.inputs = { - 'X': np.random.rand(10, 12).astype(np.float32), - 'Y': np.random.rand(2, 3, 10, 12).astype(np.float32) - } - - self.attrs = {'axis': 2} - - self.outputs = { - 'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y'] - } +class XPUTestElementwiseSubOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'elementwise_sub' + self.use_dynamic_create_class = False + + class TestElementwiseOp(XPUOpTest): + def setUp(self): + self.op_type = "elementwise_sub" + self.use_xpu = True + self.dtype = self.in_type + self.init_input_output() + + def init_input_output(self): + self.inputs = { + 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype), + 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place, atol=1e-3) + + def test_check_grad_normal(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place(place, ['X', 'Y'], 'Out') + + def test_check_grad_ingore_x(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['Y'], + 'Out', + max_relative_error=0.005, + no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad_with_place( + place, ['X'], + 'Out', + max_relative_error=0.005, + no_grad_set=set('Y')) + + @skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") + class TestElementwiseSubOp_scalar(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(10, 3, 4).astype(self.dtype), + 'Y': np.random.rand(1).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + class TestElementwiseSubOp_Vector(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.random((100, )).astype(self.dtype), + 'Y': np.random.random((100, )).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + class TestElementwiseSubOp_broadcast_0(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(100, 3, 2).astype(self.dtype), + 'Y': np.random.rand(100).astype(self.dtype) + } + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1) + } + + class TestElementwiseSubOp_broadcast_1(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 100, 3).astype(self.dtype), + 'Y': np.random.rand(100).astype(self.dtype) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1) + } + + class TestElementwiseSubOp_broadcast_2(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 3, 100).astype(self.dtype), + 'Y': np.random.rand(100).astype(self.dtype) + } + + self.outputs = { + 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100) + } + + class TestElementwiseSubOp_broadcast_3(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 10, 12, 3).astype(self.dtype), + 'Y': np.random.rand(10, 12).astype(self.dtype) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1) + } + + class TestElementwiseSubOp_broadcast_4(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 5, 3, 12).astype(self.dtype), + 'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + class TestElementwiseSubOp_commonuse_1(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(2, 3, 100).astype(self.dtype), + 'Y': np.random.rand(1, 1, 100).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + class TestElementwiseSubOp_commonuse_2(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(10, 3, 1, 4).astype(self.dtype), + 'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype) + } + self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} + + class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp): + def init_input_output(self): + self.inputs = { + 'X': np.random.rand(10, 12).astype(self.dtype), + 'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype) + } + + self.attrs = {'axis': 2} + + self.outputs = { + 'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y'] + } + + +support_types = get_xpu_op_support_types('elementwise_sub') +for stype in support_types: + create_test_class(globals(), XPUTestElementwiseSubOp, stype) if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py index a0f4b4244355ba68c1f28683831332019a556064..71895db4ae9bf622367665b1a24fbd15c1c21268 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_top_k_v2_op_xpu.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,9 +18,10 @@ import unittest import numpy as np import sys sys.path.append("..") -from op_test import OpTest +from op_test_xpu import XPUOpTest import paddle import paddle.fluid.core as core +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper paddle.enable_static() @@ -41,249 +42,130 @@ def numpy_topk(x, k=1, axis=-1, largest=True): return value, indices -class TestTopkOp(OpTest): - def init_args(self): - self.k = 3 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 20) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - def test_check_output(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_output_with_place(place) - - def test_check_grad(self): - if paddle.is_compiled_with_xpu(): - place = paddle.XPUPlace(0) - self.check_grad(set(['X']), 'Out') - - -class TestTopkOp1(TestTopkOp): - def init_args(self): - self.k = 3 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp2(TestTopkOp): - def init_args(self): - self.k = 3 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp3(TestTopkOp): - def init_args(self): - self.k = 5 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp4(TestTopkOp): - def init_args(self): - self.k = 1 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp5(TestTopkOp): - def init_args(self): - self.k = 3 - self.axis = 2 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp6(TestTopkOp): - def init_args(self): - self.k = 5 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(8, 32, 64) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp7(TestTopkOp): - def init_args(self): - self.k = 10 - self.axis = 2 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(8, 5, 10, 16) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp8(TestTopkOp): - def init_args(self): - self.k = 1 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(8, 32, 64) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp9(TestTopkOp): - def init_args(self): - self.k = 3 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp10(TestTopkOp): - def init_args(self): - self.k = 3 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp11(TestTopkOp): - def init_args(self): - self.k = 5 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - - -class TestTopkOp12(TestTopkOp): - def init_args(self): - self.k = 1 - self.axis = 1 - self.largest = True - - def setUp(self): - self.op_type = "top_k_v2" - self.dtype = np.float32 - self.input_data = np.random.rand(10, 10, 5) - self.init_args() - self.inputs = {'X': self.input_data} - self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} - output, indices = numpy_topk( - self.input_data, axis=self.axis, k=self.k, largest=self.largest) - self.outputs = {'Out': output, 'Indices': indices} - +class XPUTestTopKV2Op(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'top_k_v2' + self.use_dynamic_create_class = False + + class TestTopkOp(XPUOpTest): + def init_args(self): + self.k = 3 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 20).astype(self.dtype) + + def setUp(self): + self.op_type = "top_k_v2" + self.init_args() + self.dtype = self.in_type + self.inputs = {'X': self.input_data} + self.attrs = { + 'k': self.k, + 'axis': self.axis, + 'largest': self.largest + } + output, indices = numpy_topk( + self.input_data, axis=self.axis, k=self.k, largest=self.largest) + self.outputs = {'Out': output, 'Indices': indices} + + def test_check_output(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_output_with_place(place) + + def test_check_grad(self): + if paddle.is_compiled_with_xpu(): + place = paddle.XPUPlace(0) + self.check_grad(set(['X']), 'Out') + + class TestTopkOp1(TestTopkOp): + def init_args(self): + self.k = 3 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(100, 155).astype(self.dtype) + + class TestTopkOp2(TestTopkOp): + def init_args(self): + self.k = 3 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + class TestTopkOp3(TestTopkOp): + def init_args(self): + self.k = 5 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + class TestTopkOp4(TestTopkOp): + def init_args(self): + self.k = 1 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + class TestTopkOp5(TestTopkOp): + def init_args(self): + self.k = 3 + self.axis = 2 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + class TestTopkOp6(TestTopkOp): + def init_args(self): + self.k = 5 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(8, 32, 64).astype(self.dtype) + + class TestTopkOp7(TestTopkOp): + def init_args(self): + self.k = 10 + self.axis = 2 + self.largest = True + self.input_data = np.random.rand(8, 5, 10, 16).astype(self.dtype) + + class TestTopkOp8(TestTopkOp): + def init_args(self): + self.k = 1 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(8, 32, 64).astype(self.dtype) + + class TestTopkOp9(TestTopkOp): + def init_args(self): + self.k = 3 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + class TestTopkOp10(TestTopkOp): + def init_args(self): + self.k = 3 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + class TestTopkOp11(TestTopkOp): + def init_args(self): + self.k = 5 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + class TestTopkOp12(TestTopkOp): + def init_args(self): + self.k = 1 + self.axis = 1 + self.largest = True + self.input_data = np.random.rand(10, 10, 5).astype(self.dtype) + + +support_types = get_xpu_op_support_types('top_k_v2') +for stype in support_types: + create_test_class(globals(), XPUTestTopKV2Op, stype) if __name__ == "__main__": unittest.main()