未验证 提交 23a69bc7 编写于 作者: Y ykkk2333 提交者: GitHub

update elementwise unittest style, *test=kunlun (#40779)

上级 bdef57cd
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,231 +20,216 @@ import paddle.fluid as fluid ...@@ -20,231 +20,216 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
paddle.enable_static() from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class ElementwiseDivOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_div"
self.dtype = np.float32
self.init_dtype()
self.use_xpu = True
""" Warning
CPU gradient check error!
'X': np.random.random((32,84)).astype("float32"),
'Y': np.random.random((32,84)).astype("float32")
"""
self.inputs = {
'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=0.05)
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.05,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.05,
no_grad_set=set('Y'))
def init_dtype(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_scalar(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float32),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float32")
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float32")
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype("float32"),
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype("float32"),
}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
def setUp(self):
self.op_type = "elementwise_div"
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 12]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype("float32"),
}
self.attrs = {'axis': 2}
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseDivBroadcast(unittest.TestCase):
def test_shape_with_batch_sizes(self):
with fluid.program_guard(fluid.Program()):
x_var = fluid.data(
name='x', dtype='float32', shape=[None, 3, None, None])
one = 2.
out = one / x_var
exe = fluid.Executor(fluid.XPUPlace(0))
x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32")
out_result, = exe.run(feed={'x': x}, fetch_list=[out])
self.assertEqual((out_result == (2 / x)).all(), True)
class XPUTestElementwiseDivOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_div'
self.use_dynamic_create_class = False
class ElementwiseDivOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_div"
self.dtype = self.in_type
self.init_dtype()
self.use_xpu = True
self.init_input_output()
""" Warning
CPU gradient check error!
'X': np.random.random((32,84)).astype("float32"),
'Y': np.random.random((32,84)).astype("float32")
"""
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=0.05)
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.05,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.05,
no_grad_set=set('Y'))
def init_dtype(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseDivOp_scalar(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']}
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {
'Out':
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.divide(self.inputs['X'],
self.inputs['Y'].reshape(1, 10, 12, 1))
}
class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype(self.dtype)
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype(self.dtype),
'Y':
np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype(self.dtype),
}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp):
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype),
'Y':
np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype(self.dtype),
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': np.divide(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseDivBroadcast(unittest.TestCase):
def test_shape_with_batch_sizes(self):
with fluid.program_guard(fluid.Program()):
x_var = fluid.data(
name='x', dtype='float32', shape=[None, 3, None, None])
one = 2.
out = one / x_var
exe = fluid.Executor(fluid.XPUPlace(0))
x = np.random.uniform(0.1, 0.6,
(1, 3, 32, 32)).astype('float32')
out_result, = exe.run(feed={'x': x}, fetch_list=[out])
self.assertEqual((out_result == (2 / x)).all(), True)
support_types = get_xpu_op_support_types('elementwise_div')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseDivOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,68 +20,66 @@ import paddle.fluid as fluid ...@@ -20,68 +20,66 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
import random import random
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseModOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestElementwiseModOp(XPUOpTest): self.op_name = 'elementwise_floordiv'
def init_kernel_type(self): self.use_dynamic_create_class = False
self.use_mkldnn = False
def setUp(self): class TestElementwiseModOp(XPUOpTest):
self.op_type = "elementwise_floordiv" def init_kernel_type(self):
self.dtype = np.float32 self.use_mkldnn = False
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.inputs = { def setUp(self):
'X': OpTest.np_dtype_to_fluid_dtype(self.x), self.op_type = "elementwise_floordiv"
'Y': OpTest.np_dtype_to_fluid_dtype(self.y) self.dtype = self.in_type
} self.axis = -1
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.init_input_output()
self.outputs = {'Out': self.out} self.init_kernel_type()
self.init_axis()
def test_check_output(self): self.inputs = {
if paddle.is_compiled_with_xpu(): 'X': OpTest.np_dtype_to_fluid_dtype(self.x),
place = paddle.XPUPlace(0) 'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
self.check_output_with_place(place) }
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.out}
def init_input_output(self): def test_check_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) if paddle.is_compiled_with_xpu():
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype) place = paddle.XPUPlace(0)
self.out = np.floor_divide(self.x, self.y) self.check_output_with_place(place)
def init_dtype(self): def init_input_output(self):
pass self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
def init_axis(self): def init_axis(self):
pass pass
class TestElementwiseModOp_scalar(TestElementwiseModOp):
def init_input_output(self):
scale_x = random.randint(0, 100000)
scale_y = random.randint(1, 100000)
self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype)
self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseModOpInverse(TestElementwiseModOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseModOp_scalar(TestElementwiseModOp): self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype)
def init_input_output(self): self.y = np.random.uniform(1, 1000, [10, 10]).astype(self.dtype)
scale_x = random.randint(0, 100000000) self.out = np.floor_divide(self.x, self.y)
scale_y = random.randint(1, 100000000)
self.x = (np.random.rand(2, 3, 4) * scale_x).astype(self.dtype)
self.y = (np.random.rand(1) * scale_y + 1).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), support_types = get_xpu_op_support_types('elementwise_floordiv')
"core is not compiled with XPU") for stype in support_types:
class TestElementwiseModOpInverse(TestElementwiseModOp): create_test_class(globals(), XPUTestElementwiseModOp, stype)
def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10]).astype(self.dtype)
self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype)
self.out = np.floor_divide(self.x, self.y)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -18,162 +18,154 @@ import numpy as np ...@@ -18,162 +18,154 @@ import numpy as np
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import paddle import paddle
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class XPUTestElementwiseMaxOp(XPUOpTestWrapper):
"core is not compiled with XPU") def __init__(self):
class TestElementwiseOp(XPUOpTest): self.op_name = 'elementwise_max'
def setUp(self): self.use_dynamic_create_class = False
self.use_xpu = True
self.op_type = "elementwise_max" class TestElementwiseOp(XPUOpTest):
# If x and y have the same value, the max() is not differentiable. def setUp(self):
# So we generate test data by the following method self.use_xpu = True
# to avoid them being too close to each other. self.op_type = "elementwise_max"
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32") self.dtype = self.in_type
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32") self.init_input_output()
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32") # If x and y have the same value, the max() is not differentiable.
self.inputs = {'X': x, 'Y': y} # So we generate test data by the following method
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} # to avoid them being too close to each other.
def test_check_output(self): def init_input_output(self):
if paddle.is_compiled_with_xpu(): x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
place = paddle.XPUPlace(0) sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
self.check_output_with_place(place) y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
def test_check_grad_normal(self): self.outputs = {
if paddle.is_compiled_with_xpu(): 'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
place = paddle.XPUPlace(0) }
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_output(self):
def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu():
if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0)
place = paddle.XPUPlace(0) self.check_output_with_place(place)
self.check_grad_with_place(
place, ['Y'], def test_check_grad_normal(self):
'Out', if paddle.is_compiled_with_xpu():
max_relative_error=0.006, place = paddle.XPUPlace(0)
no_grad_set=set("X")) self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_y(self): def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['X'], place, ['Y'],
'Out', 'Out',
max_relative_error=0.006, max_relative_error=0.006,
no_grad_set=set('Y')) no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
@skip_check_grad_ci( if paddle.is_compiled_with_xpu():
reason="[skip shape check] Use y_shape(1) to test broadcast.") place = paddle.XPUPlace(0)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), self.check_grad_with_place(
"core is not compiled with XPU") place, ['X'],
class TestElementwiseMaxOp_scalar(TestElementwiseOp): 'Out',
def setUp(self): max_relative_error=0.006,
self.op_type = "elementwise_max" no_grad_set=set('Y'))
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
y = np.array([0.5]).astype("float32") @skip_check_grad_ci(
self.inputs = {'X': x, 'Y': y} reason="[skip shape check] Use y_shape(1) to test broadcast.")
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def init_input_output(self):
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype(self.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), y = np.array([0.5]).astype(self.dtype)
"core is not compiled with XPU") self.inputs = {'X': x, 'Y': y}
class TestElementwiseMaxOp_Vector(TestElementwiseOp): self.outputs = {
def setUp(self): 'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
self.op_type = "elementwise_max" }
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32") class TestElementwiseMaxOp_Vector(TestElementwiseOp):
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32") def init_input_output(self):
self.inputs = {'X': x, 'Y': y} x = np.random.random((100, )).astype(self.dtype)
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])} sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), self.outputs = {
"core is not compiled with XPU") 'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp): }
def setUp(self):
self.op_type = "elementwise_max" class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(np.float32) def init_input_output(self):
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) x = np.random.uniform(0.5, 1, (100, 5, 2)).astype(self.dtype)
y = x[:, 0, 0] + sgn * \ sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
np.random.uniform(1, 2, (100, )).astype(np.float32) y = x[:, 0, 0] + sgn * \
self.inputs = {'X': x, 'Y': y} np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = { self.attrs = {'axis': 0}
'Out': self.outputs = {
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) 'Out': np.maximum(self.inputs['X'],
} self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
"core is not compiled with XPU") def init_input_output(self):
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp): x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
def setUp(self): sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
self.op_type = "elementwise_max" y = x[0, :, 0] + sgn * \
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32) np.random.uniform(1, 2, (100, )).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) self.inputs = {'X': x, 'Y': y}
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32) self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y} self.outputs = {
'Out': np.maximum(self.inputs['X'],
self.attrs = {'axis': 1} self.inputs['Y'].reshape(1, 100, 1))
self.outputs = { }
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
} def init_input_output(self):
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
@unittest.skipIf(not paddle.is_compiled_with_xpu(), y = x[0, 0, :] + sgn * \
"core is not compiled with XPU") np.random.uniform(1, 2, (100, )).astype(self.dtype)
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp): self.inputs = {'X': x, 'Y': y}
def setUp(self):
self.op_type = "elementwise_max" self.outputs = {
x = np.random.uniform(0.5, 1, (1, 3, 100)).astype(np.float32) 'Out': np.maximum(self.inputs['X'],
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32) self.inputs['Y'].reshape(1, 1, 100))
y = x[0, 0, :] + sgn * \ }
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y} class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
self.outputs = { x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(self.dtype)
'Out': sgn = np.random.choice([-1, 1], (50, 2)).astype(self.dtype)
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) y = x[0, :, :, 0] + sgn * \
} np.random.uniform(1, 2, (50, 2)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
@unittest.skipIf(not paddle.is_compiled_with_xpu(), self.attrs = {'axis': 1}
"core is not compiled with XPU") self.outputs = {
class TestElementwiseMaxOp_broadcast_3(TestElementwiseOp): 'Out': np.maximum(self.inputs['X'],
def setUp(self): self.inputs['Y'].reshape(1, 50, 2, 1))
self.op_type = "elementwise_max" }
x = np.random.uniform(0.5, 1, (2, 50, 2, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (50, 2)).astype(np.float32) class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp):
y = x[0, :, :, 0] + sgn * \ def init_input_output(self):
np.random.uniform(1, 2, (50, 2)).astype(np.float32) x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y} sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(self.dtype)
y = x + sgn * \
self.attrs = {'axis': 1} np.random.uniform(1, 2, (2, 3, 1, 5)).astype(self.dtype)
self.outputs = { self.inputs = {'X': x, 'Y': y}
'Out':
np.maximum(self.inputs['X'], self.inputs['Y'].reshape(1, 50, 2, 1)) self.outputs = {
} 'Out': np.maximum(self.inputs['X'], self.inputs['Y'])
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU") support_types = get_xpu_op_support_types('elementwise_max')
class TestElementwiseMaxOp_broadcast_4(TestElementwiseOp): for stype in support_types:
def setUp(self): create_test_class(globals(), XPUTestElementwiseMaxOp, stype)
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 3, 1, 5)).astype(np.float32)
y = x + sgn * \
np.random.uniform(1, 2, (2, 3, 1, 5)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,161 +20,149 @@ import paddle.fluid as fluid ...@@ -20,161 +20,149 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
import paddle import paddle
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
paddle.enable_static() from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_min"
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
x = np.random.uniform(0.1, 1, [13, 17]).astype("float32")
sgn = np.random.choice([-1, 1], [13, 17]).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype("float32")
y = np.array([0.5]).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(np.float32)
sgn = np.random.choice([-1, 1], (100, )).astype(np.float32)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(np.float32)
sgn = np.random.choice([-1, 1], (25, 4)).astype(np.float32)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (25, 4)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.minimum(self.inputs['X'], self.inputs['Y'].reshape(1, 25, 4, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_min"
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(np.float32)
y = x + sgn * \
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(np.float32)
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.minimum(self.inputs['X'], self.inputs['Y'])}
class XPUTestElementwiseMinOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_min'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_min"
# If x and y have the same value, the min() is not differentiable.
# So we generate test data by the following method
# to avoid them being too close to each other.
self.dtype = self.in_type
self.init_input_output()
def init_input_output(self):
x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
sgn = np.random.choice([-1, 1], [13, 17]).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMinOp_scalar(TestElementwiseOp):
def init_input_output(self):
x = np.random.random_integers(-5, 5, [10, 3, 4]).astype(self.dtype)
y = np.array([0.5]).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMinOp_Vector(TestElementwiseOp):
def init_input_output(self):
x = np.random.random((100, )).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMinOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (100, 3, 2)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.attrs = {'axis': 0}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwiseMinOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 100, 3)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 100, 1))
}
class TestElementwiseMinOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 3, 100)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (100, )).astype(self.dtype)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (100, )).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 1, 100))
}
class TestElementwiseMinOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 25, 4, 1)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (25, 4)).astype(self.dtype)
y = x[0, :, :, 0] + sgn * \
np.random.uniform(1, 2, (25, 4)).astype(self.dtype)
self.attrs = {'axis': 1}
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'],
self.inputs['Y'].reshape(1, 25, 4, 1))
}
class TestElementwiseMinOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
x = np.random.uniform(0.5, 1, (2, 10, 2, 5)).astype(self.dtype)
sgn = np.random.choice([-1, 1], (2, 10, 1, 5)).astype(self.dtype)
y = x + sgn * \
np.random.uniform(1, 2, (2, 10, 1, 5)).astype(self.dtype)
self.inputs = {'X': x, 'Y': y}
self.outputs = {
'Out': np.minimum(self.inputs['X'], self.inputs['Y'])
}
support_types = get_xpu_op_support_types('elementwise_min')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMinOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,252 +20,212 @@ import paddle.fluid as fluid ...@@ -20,252 +20,212 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
import paddle import paddle
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
paddle.enable_static() from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class ElementwiseMulOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_mul"
self.dtype = np.float32
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'],
'Out',
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False))
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
def init_dtype(self):
pass
def init_axis(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32),
'Y': np.random.rand(1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.random((100, )).astype("float32"),
'Y': np.random.random((100, )).astype("float32")
}
self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def init_input_output(self):
self.x = np.random.rand(100, 2, 3).astype(self.dtype)
self.y = np.random.rand(100).astype(self.dtype)
self.out = self.x * self.y.reshape(100, 1, 1)
def init_axis(self):
self.axis = 0
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32),
'Y': np.random.rand(10, 12).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 2, 11).astype(np.float32),
'Y': np.random.rand(10, 1, 11).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(np.float32),
'Y': np.random.rand(10, 4, 1, 3).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(1, 1, 100).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(np.float32),
'Y': np.random.rand(30, 1, 4, 1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def setUp(self):
self.op_type = "elementwise_mul"
self.inputs = {
'X': np.random.rand(10, 10).astype(np.float32),
'Y': np.random.rand(2, 2, 10, 10).astype(np.float32)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y']
}
self.init_kernel_type()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseMulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of elementwise_mul must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1)
# the input dtype of elementwise_mul must be float32
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, y2)
class XPUTestElementwiseMulOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_mul'
self.use_dynamic_create_class = False
class ElementwiseMulOp(XPUOpTest):
def init_kernel_type(self):
self.use_mkldnn = False
def setUp(self):
self.op_type = 'elementwise_mul'
self.use_xpu = True
self.dtype = self.in_type
self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_kernel_type()
self.init_axis()
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X', 'Y'],
'Out',
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False))
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.multiply(self.x, self.y)
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
}
self.outputs = {'Out': self.out}
self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn}
def init_dtype(self):
pass
def init_axis(self):
pass
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseMulOp_scalar(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_Vector(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype(self.dtype)
}
self.outputs = {
'Out': np.multiply(self.inputs['X'], self.inputs['Y'])
}
class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(100, 2, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(100, 1, 1)
}
self.attrs = {'axis': 0}
class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1)
}
class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100)
}
class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1)
}
class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 2, 11).astype(self.dtype),
'Y': np.random.rand(10, 1, 11).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 4, 2, 3).astype(self.dtype),
'Y': np.random.rand(10, 4, 1, 3).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(30, 3, 1, 5).astype(self.dtype),
'Y': np.random.rand(30, 1, 4, 1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']}
class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 10).astype(self.dtype),
'Y': np.random.rand(2, 2, 10, 10).astype(self.dtype)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y']
}
class TestElementwiseMulOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of elementwise_mul must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
y1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0))
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1,
y1)
# the input dtype of elementwise_mul must be float32
x2 = fluid.layers.data(
name='x2', shape=[3, 4, 5, 6], dtype="uint8")
y2 = fluid.layers.data(
name='y2', shape=[3, 4, 5, 6], dtype="uint8")
self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2,
y2)
support_types = get_xpu_op_support_types('elementwise_mul')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseMulOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,163 +20,140 @@ import paddle.fluid as fluid ...@@ -20,163 +20,140 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
paddle.enable_static() from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype("float32"),
'Y': np.random.uniform(1, 2, [20, 5]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float32"),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float32),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float32)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float32"),
'Y': np.random.uniform(1, 3, [100]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float32")
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float32")
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'], self.inputs['Y'].reshape(1, 20, 5,
1))
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float32")
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwisePowOpInt(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
@skip_check_grad_ci(reason="XPU does not support grad op currently")
class XPUTestElementwisePowOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_pow'
self.use_dynamic_create_class = False
class TestElementwisePowOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.dtype = self.in_type
self.__class__.no_need_check_grad = True
self.compute_input_output()
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype(self.dtype),
'Y': np.random.uniform(1, 2, [20, 5]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place)
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype(self.dtype),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwisePowOp_scalar(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [1]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_tensor(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype(self.dtype),
'Y': np.random.uniform(1, 3, [100]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1))
}
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [100]).astype(self.dtype)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out':
np.power(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1))
}
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': np.power(self.inputs['X'],
self.inputs['Y'].reshape(1, 20, 5, 1))
}
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def compute_input_output(self):
self.inputs = {
'X':
np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype(self.dtype)
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
class TestElementwisePowOpInt(OpTest):
def setUp(self):
self.op_type = "elementwise_pow"
self.inputs = {
'X': np.asarray([1, 3, 6]),
'Y': np.asarray([1, 1, 1])
}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self):
self.check_output()
support_types = get_xpu_op_support_types('elementwise_pow')
for stype in support_types:
create_test_class(globals(), XPUTestElementwisePowOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -19,191 +19,164 @@ import paddle ...@@ -19,191 +19,164 @@ import paddle
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
import unittest import unittest
paddle.enable_static() from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static()
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseOp(OpTest):
def setUp(self):
self.use_xpu = True
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseSubOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(np.float32),
'Y': np.random.rand(1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.random((100, )).astype("float32"),
'Y': np.random.random((100, )).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(100, 3, 2).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(100).astype(np.float32)
}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(np.float32),
'Y': np.random.rand(10, 12).astype(np.float32)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(np.float32),
'Y': np.random.rand(2, 5, 1, 12).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(np.float32),
'Y': np.random.rand(1, 1, 100).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(np.float32),
'Y': np.random.rand(10, 1, 12, 1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
@unittest.skipIf(not paddle.is_compiled_with_xpu(),
"core is not compiled with XPU")
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.rand(10, 12).astype(np.float32),
'Y': np.random.rand(2, 3, 10, 12).astype(np.float32)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y']
}
class XPUTestElementwiseSubOp(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'elementwise_sub'
self.use_dynamic_create_class = False
class TestElementwiseOp(XPUOpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.use_xpu = True
self.dtype = self.in_type
self.init_input_output()
def init_input_output(self):
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
def test_check_output(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad_normal(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['Y'],
'Out',
max_relative_error=0.005,
no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
self.check_grad_with_place(
place, ['X'],
'Out',
max_relative_error=0.005,
no_grad_set=set('Y'))
@skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseSubOp_scalar(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 4).astype(self.dtype),
'Y': np.random.rand(1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_Vector(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.random((100, )).astype(self.dtype),
'Y': np.random.random((100, )).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(100, 3, 2).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 0}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(100, 1, 1)
}
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 100, 3).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 100, 1)
}
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(100).astype(self.dtype)
}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 100)
}
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 10, 12, 3).astype(self.dtype),
'Y': np.random.rand(10, 12).astype(self.dtype)
}
self.attrs = {'axis': 1}
self.outputs = {
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 10, 12, 1)
}
class TestElementwiseSubOp_broadcast_4(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 5, 3, 12).astype(self.dtype),
'Y': np.random.rand(2, 5, 1, 12).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_1(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(2, 3, 100).astype(self.dtype),
'Y': np.random.rand(1, 1, 100).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_commonuse_2(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 3, 1, 4).astype(self.dtype),
'Y': np.random.rand(10, 1, 12, 1).astype(self.dtype)
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
class TestElementwiseSubOp_xsize_lessthan_ysize(TestElementwiseOp):
def init_input_output(self):
self.inputs = {
'X': np.random.rand(10, 12).astype(self.dtype),
'Y': np.random.rand(2, 3, 10, 12).astype(self.dtype)
}
self.attrs = {'axis': 2}
self.outputs = {
'Out': self.inputs['X'].reshape(1, 1, 10, 12) - self.inputs['Y']
}
support_types = get_xpu_op_support_types('elementwise_sub')
for stype in support_types:
create_test_class(globals(), XPUTestElementwiseSubOp, stype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -18,9 +18,10 @@ import unittest ...@@ -18,9 +18,10 @@ import unittest
import numpy as np import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest from op_test_xpu import XPUOpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
paddle.enable_static() paddle.enable_static()
...@@ -41,249 +42,130 @@ def numpy_topk(x, k=1, axis=-1, largest=True): ...@@ -41,249 +42,130 @@ def numpy_topk(x, k=1, axis=-1, largest=True):
return value, indices return value, indices
class TestTopkOp(OpTest): class XPUTestTopKV2Op(XPUOpTestWrapper):
def init_args(self): def __init__(self):
self.k = 3 self.op_name = 'top_k_v2'
self.axis = 1 self.use_dynamic_create_class = False
self.largest = True
class TestTopkOp(XPUOpTest):
def setUp(self): def init_args(self):
self.op_type = "top_k_v2" self.k = 3
self.dtype = np.float32 self.axis = 1
self.input_data = np.random.rand(10, 20) self.largest = True
self.init_args() self.input_data = np.random.rand(10, 20).astype(self.dtype)
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} def setUp(self):
output, indices = numpy_topk( self.op_type = "top_k_v2"
self.input_data, axis=self.axis, k=self.k, largest=self.largest) self.init_args()
self.outputs = {'Out': output, 'Indices': indices} self.dtype = self.in_type
self.inputs = {'X': self.input_data}
def test_check_output(self): self.attrs = {
if paddle.is_compiled_with_xpu(): 'k': self.k,
place = paddle.XPUPlace(0) 'axis': self.axis,
self.check_output_with_place(place) 'largest': self.largest
}
def test_check_grad(self): output, indices = numpy_topk(
if paddle.is_compiled_with_xpu(): self.input_data, axis=self.axis, k=self.k, largest=self.largest)
place = paddle.XPUPlace(0) self.outputs = {'Out': output, 'Indices': indices}
self.check_grad(set(['X']), 'Out')
def test_check_output(self):
if paddle.is_compiled_with_xpu():
class TestTopkOp1(TestTopkOp): place = paddle.XPUPlace(0)
def init_args(self): self.check_output_with_place(place)
self.k = 3
self.axis = 1 def test_check_grad(self):
self.largest = True if paddle.is_compiled_with_xpu():
place = paddle.XPUPlace(0)
def setUp(self): self.check_grad(set(['X']), 'Out')
self.op_type = "top_k_v2"
self.dtype = np.float32 class TestTopkOp1(TestTopkOp):
self.input_data = np.random.rand(10, 10, 5) def init_args(self):
self.init_args() self.k = 3
self.inputs = {'X': self.input_data} self.axis = 1
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} self.largest = True
output, indices = numpy_topk( self.input_data = np.random.rand(100, 155).astype(self.dtype)
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices} class TestTopkOp2(TestTopkOp):
def init_args(self):
self.k = 3
class TestTopkOp2(TestTopkOp): self.axis = 1
def init_args(self): self.largest = True
self.k = 3 self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
self.axis = 1
self.largest = True class TestTopkOp3(TestTopkOp):
def init_args(self):
def setUp(self): self.k = 5
self.op_type = "top_k_v2" self.axis = 1
self.dtype = np.float32 self.largest = True
self.input_data = np.random.rand(10, 10, 5) self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
self.init_args()
self.inputs = {'X': self.input_data} class TestTopkOp4(TestTopkOp):
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} def init_args(self):
output, indices = numpy_topk( self.k = 1
self.input_data, axis=self.axis, k=self.k, largest=self.largest) self.axis = 1
self.outputs = {'Out': output, 'Indices': indices} self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp3(TestTopkOp): class TestTopkOp5(TestTopkOp):
def init_args(self): def init_args(self):
self.k = 5 self.k = 3
self.axis = 1 self.axis = 2
self.largest = True self.largest = True
self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self):
self.op_type = "top_k_v2" class TestTopkOp6(TestTopkOp):
self.dtype = np.float32 def init_args(self):
self.input_data = np.random.rand(10, 10, 5) self.k = 5
self.init_args() self.axis = 1
self.inputs = {'X': self.input_data} self.largest = True
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest) class TestTopkOp7(TestTopkOp):
self.outputs = {'Out': output, 'Indices': indices} def init_args(self):
self.k = 10
self.axis = 2
class TestTopkOp4(TestTopkOp): self.largest = True
def init_args(self): self.input_data = np.random.rand(8, 5, 10, 16).astype(self.dtype)
self.k = 1
self.axis = 1 class TestTopkOp8(TestTopkOp):
self.largest = True def init_args(self):
self.k = 1
def setUp(self): self.axis = 1
self.op_type = "top_k_v2" self.largest = True
self.dtype = np.float32 self.input_data = np.random.rand(8, 32, 64).astype(self.dtype)
self.input_data = np.random.rand(10, 10, 5)
self.init_args() class TestTopkOp9(TestTopkOp):
self.inputs = {'X': self.input_data} def init_args(self):
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest} self.k = 3
output, indices = numpy_topk( self.axis = 1
self.input_data, axis=self.axis, k=self.k, largest=self.largest) self.largest = True
self.outputs = {'Out': output, 'Indices': indices} self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
class TestTopkOp10(TestTopkOp):
class TestTopkOp5(TestTopkOp): def init_args(self):
def init_args(self): self.k = 3
self.k = 3 self.axis = 1
self.axis = 2 self.largest = True
self.largest = True self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def setUp(self): class TestTopkOp11(TestTopkOp):
self.op_type = "top_k_v2" def init_args(self):
self.dtype = np.float32 self.k = 5
self.input_data = np.random.rand(10, 10, 5) self.axis = 1
self.init_args() self.largest = True
self.inputs = {'X': self.input_data} self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk( class TestTopkOp12(TestTopkOp):
self.input_data, axis=self.axis, k=self.k, largest=self.largest) def init_args(self):
self.outputs = {'Out': output, 'Indices': indices} self.k = 1
self.axis = 1
self.largest = True
class TestTopkOp6(TestTopkOp): self.input_data = np.random.rand(10, 10, 5).astype(self.dtype)
def init_args(self):
self.k = 5
self.axis = 1 support_types = get_xpu_op_support_types('top_k_v2')
self.largest = True for stype in support_types:
create_test_class(globals(), XPUTestTopKV2Op, stype)
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp7(TestTopkOp):
def init_args(self):
self.k = 10
self.axis = 2
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 5, 10, 16)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp8(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(8, 32, 64)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp9(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp10(TestTopkOp):
def init_args(self):
self.k = 3
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp11(TestTopkOp):
def init_args(self):
self.k = 5
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
class TestTopkOp12(TestTopkOp):
def init_args(self):
self.k = 1
self.axis = 1
self.largest = True
def setUp(self):
self.op_type = "top_k_v2"
self.dtype = np.float32
self.input_data = np.random.rand(10, 10, 5)
self.init_args()
self.inputs = {'X': self.input_data}
self.attrs = {'k': self.k, 'axis': self.axis, 'largest': self.largest}
output, indices = numpy_topk(
self.input_data, axis=self.axis, k=self.k, largest=self.largest)
self.outputs = {'Out': output, 'Indices': indices}
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册