未验证 提交 23a69bc7 编写于 作者: Y ykkk2333 提交者: GitHub

update elementwise unittest style, *test=kunlun (#40779)

上级 bdef57cd
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,231 +20,216 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
paddle.enable_static()
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class ElementwiseDivOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_div"
self.dtype = np.float32
self.init_dtype()
self.use_xpu = True
""" Warning
CPU gradient check error!
'X': np.random.random((32,84)).astype("float32"),
'Y': np.random.random((32,84)).astype("float32")
"""
self.inputs = {
'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=0.05)
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.05,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.05,
no_grad_set=set('Y'))
def init_dtype(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_scalar(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float32),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float32")
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float32")
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype("float32"),
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype("float32"),
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 12]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype("float32"),
}
self.attrs = {'axis': 2}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivBroadcast(unittest.TestCase):
def test_shape_with_batch_sizes(self):
with fluid.program_guard(fluid.Program()):
x_var = fluid.data(
name='x', dtype='float32', shape=[None, 3, None, None])
one = 2.
out = one / x_var
exe = fluid.Executor(fluid.XPUPlace(0))
x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
out_result, = exe.run(feed={'x': x}, fetch_list=[out])
self.assertEqual((out_result == (2 / x)).all(), True)
class XPUTestElementwiseDivOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_div'
self.use_dynamic_create_class = False
class ElementwiseDivOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_div"
self.dtype = self.in_type
self.init_dtype()
self.use_xpu = True
self.init_input_output()
""" Warning
CPU gradient check error!
'X': np.random.random((32,84)).astype("float32"),
'Y': np.random.random((32,84)).astype("float32")
"""
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=0.05)
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.05,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.05,
no_grad_set=set('Y'))
def init_dtype(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseDivOp_scalar(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']}
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.divide(self.inputs['X'],
self.inputs['Y'].reshape(1, 10, 12, 1))
}
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype(self.dtype),
'Y':
np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype),
'Y':
np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype(self.dtype),
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivBroadcast(unittest.TestCase):
def test_shape_with_batch_sizes(self):
with fluid.program_guard(fluid.Program()):
x_var = fluid.data(
name='x', dtype='float32', shape=[None, 3, None, None])
one = 2.
out = one / x_var
exe = fluid.Executor(fluid.XPUPlace(0))
x = np.random.uniform(0.1, 0.6,
(1, 3, 32, 32)).astype('float32')
out_result, = exe.run(feed={'x': x}, fetch_list=[out])
self.assertEqual((out_result == (2 / x)).all(), True)
support_types = get_xpu_op_support_types('elementwise_div')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseDivOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,68 +20,66 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
import random
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
class XPUTestElementwiseModOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_floordiv'
self.use_dynamic_create_class = False
def setUp(self):
self.op_type = "elementwise_floordiv"
self.dtype = np.float32
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
class TestElementwiseModOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out}
def setUp(self):
self.op_type = "elementwise_floordiv"
self.dtype = self.in_type
self.axis = -1
self.init_input_output()
self.init_kernel_type()
self.init_axis()
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out}
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def init_dtype(self):
pass
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
def init_axis(self):
pass
def init_axis(self):
pass
class TestElementwiseModOp_scalar(TestElementwiseModOp):
def init_input_output(self):
scale_x = random.randint(0, 100000)
scale_y = random.randint(1, 100000)
self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype)
self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOp_scalar(TestElementwiseModOp):
def init_input_output(self):
scale_x = random.randint(0, 100000000)
scale_y = random.randint(1, 100000000)
self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype)
self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
class TestElementwiseModOpInverse(TestElementwiseModOp):
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype)
self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseModOpInverse(TestElementwiseModOp):
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
support_types = get_xpu_op_support_types('elementwise_floordiv')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseModOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -18,162 +18,154 @@ import numpy as np
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import paddle
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_max"
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.006,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.006,
no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
y = np.array([0.5]).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (50, 2)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32)
y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
class XPUTestElementwiseMaxOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_max'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_max"
self.dtype = self.in_type
self.init_input_output()
# If x and y have the same value, the max() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
def init_input_output(self):
x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.006,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.006,
no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def init_input_output(self):
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype)
y = np.array([0.5]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def init_input_output(self):
x = np.random.random((100, )).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(1, 100, 1))
}
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(1, 1, 100))
}
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (50, 2)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.maximum(self.inputs['X'],
self.inputs['Y'].reshape(1, 50, 2, 1))
}
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype)
y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
support_types = get_xpu_op_support_types('elementwise_max')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMaxOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,161 +20,149 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
from op_test_xpu import XPUOpTest
paddle.enable_static()
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_min"
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float32")
y = np.array([0.5]).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float32)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (25, 4)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float32)
y = x + sgn * \
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
class XPUTestElementwiseMinOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_min'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_min"
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
self.dtype = self.in_type
self.init_input_output()
def init_input_output(self):
x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def init_input_output(self):
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype)
y = np.array([0.5]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMinOp_Vector(TestElementwiseOp):
def init_input_output(self):
x = np.random.random((100, )).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.attrs = {'axis': 0}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 100, 1))
}
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 1, 100))
}
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (25, 4)).astype(self.dtype)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 25, 4, 1))
}
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(self.dtype)
y = x + sgn * \
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
support_types = get_xpu_op_support_types('elementwise_min')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMinOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,252 +20,212 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
from op_test_xpu import XPUOpTest
paddle.enable_static()
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class ElementwiseMulOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_mul"
self.dtype = np.float32
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'],
'Out',
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False))
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
def init_dtype(self):
pass
def init_axis(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32),
'Y': np.random.rand(1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.random((100, )).astype("float32"),
'Y': np.random.random((100, )).astype("float32")
}
self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x * self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32),
'Y': np.random.rand(10, 12).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 2, 11).astype(np.float32),
'Y': np.random.rand(10, 1, 11).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(np.float32),
'Y': np.random.rand(10, 4, 1, 3).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(1, 1, 100).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(np.float32),
'Y': np.random.rand(30, 1, 4, 1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 10).astype(np.float32),
'Y': np.random.rand(2, 2, 10, 10).astype(np.float32)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y']
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of elementwise_mul must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1)
# the input dtype of elementwise_mul must be float32
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, y2)
class XPUTestElementwiseMulOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_mul'
self.use_dynamic_create_class = False
class ElementwiseMulOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.op_type = 'elementwise_mul'
self.use_xpu = True
self.dtype = self.in_type
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'],
'Out',
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False))
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def init_dtype(self):
pass
def init_axis(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype(self.dtype)
}
self.outputs = {
'Out': np.multiply(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(100, 2, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(100, 1, 1)
}
self.attrs = {'axis': 0}
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
}
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
}
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
}
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 2, 11).astype(self.dtype),
'Y': np.random.rand(10, 1, 11).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(self.dtype),
'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(self.dtype),
'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 10).astype(self.dtype),
'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y']
}
class TestElementwiseMulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of elementwise_mul must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1,
y1)
# the input dtype of elementwise_mul must be float32
x2 = fluid.layers.data(
name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(
name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2,
y2)
support_types = get_xpu_op_support_types('elementwise_mul')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMulOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -20,163 +20,140 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
paddle.enable_static()
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype("float32"),
'Y': np.random.uniform(1, 2, [20, 5]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float32),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"),
'Y': np.random.uniform(1, 3, [100]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5,
1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOpInt(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
@skip_check_grad_ci(reason="XPU does not support grad op currently")
class XPUTestElementwisePowOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_pow'
self.use_dynamic_create_class = False
class TestElementwisePowOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
self.compute_input_output()
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype(self.dtype),
'Y': np.random.uniform(1, 2, [20, 5]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwisePowOp_scalar(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_tensor(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(1, 3, [100]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
}
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'],
self.inputs['Y'].reshape(1, 20, 5, 1))
}
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOpInt(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.asarray([1, 3, 6]),
'Y': np.asarray([1, 1, 1])
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
support_types = get_xpu_op_support_types('elementwise_pow')
for stype in support_types:
create_test_class(globals(), XPUTestElementwisePowOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -19,191 +19,164 @@ import paddle
from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest
import unittest
paddle.enable_static()
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(OpTest):
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32),
'Y': np.random.rand(1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.random((100, )).astype("float32"),
'Y': np.random.random((100, )).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(100, 3, 2).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32),
'Y': np.random.rand(10, 12).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float32),
'Y': np.random.rand(2, 5, 1, 12).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(1, 1, 100).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float32),
'Y': np.random.rand(10, 1, 12, 1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 12).astype(np.float32),
'Y': np.random.rand(2, 3, 10, 12).astype(np.float32)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y']
}
class XPUTestElementwiseSubOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_sub'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.use_xpu = True
self.dtype = self.in_type
self.init_input_output()
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseSubOp_scalar(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(100, 3, 2).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
}
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
}
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
}
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
}
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(self.dtype),
'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(self.dtype),
'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 12).astype(self.dtype),
'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y']
}
support_types = get_xpu_op_support_types('elementwise_sub')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseSubOp, stype)
if __name__ == '__main__':
unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
......@@ -18,9 +18,10 @@ import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest
from op_test_xpu import XPUOpTest
import paddle
import paddle.fluid.core as core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
......@@ -41,249 +42,130 @@ def numpy_topk(x, k=1, axis=-1, largest=True):
return value, indices
class TestTopkOp(OpTest):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 20)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad(set(['X']), 'Out')
class TestTopkOp1(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp2(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp3(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp4(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp5(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 2
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp6(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp7(TestTopkOp):
def init_args(self):
self.k = 10
self.axis = 2
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 5, 10, 16)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp8(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp9(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp10(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp11(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp12(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class XPUTestTopKV2Op(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'top_k_v2'
self.use_dynamic_create_class = False
class TestTopkOp(XPUOpTest):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 20).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2"
self.init_args()
self.dtype = self.in_type
self.inputs = {'X': self.input_data}
self.attrs = {
'k': self.k,
'axis': self.axis,
'largest': self.largest
}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad(set(['X']), 'Out')
class TestTopkOp1(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(100, 155).astype(self.dtype)
class TestTopkOp2(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp3(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp4(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp5(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 2
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp6(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
class TestTopkOp7(TestTopkOp):
def init_args(self):
self.k = 10
self.axis = 2
self.largest = True
self.input_data = np.random.rand(8, 5, 10, 16).astype(self.dtype)
class TestTopkOp8(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
class TestTopkOp9(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp10(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp11(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp12(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
support_types = get_xpu_op_support_types('top_k_v2')
for stype in support_types:
create_test_class(globals(), XPUTestTopKV2Op, stype)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册