提交 3da094fd 编写于 作者: K Kexin Zhao

rearrange test

上级 4bf168b2
...@@ -17,14 +17,14 @@ limitations under the License. */ ...@@ -17,14 +17,14 @@ limitations under the License. */
#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/float16.h"
namespace ops = paddle::operators; namespace ops = paddle::operators;
namespace plat = padddle::platform; namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(
elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>, elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t> ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>); ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL(
elementwise_add_grad, elementwise_add_grad,
ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>,
......
...@@ -21,15 +21,17 @@ class TestElementwiseAddOp(OpTest): ...@@ -21,15 +21,17 @@ class TestElementwiseAddOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_add" self.op_type = "elementwise_add"
self.dtype = np.float32 self.dtype = np.float32
init_dtype() self.axis = -1
self.init_dtype()
self.init_input_output()
self.init_axis()
x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.inputs = { self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(x), 'X': OpTest.np_dtype_to_fluid_dtype(self.x),
'Y': OpTest.np_dtype_to_fluid_dtype(y) 'Y': OpTest.np_dtype_to_fluid_dtype(self.y)
} }
self.outputs = {'Out': np.add(x, y)} self.attrs = {'axis': self.axis}
self.outputs = {'Out': self.out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -51,12 +53,20 @@ class TestElementwiseAddOp(OpTest): ...@@ -51,12 +53,20 @@ class TestElementwiseAddOp(OpTest):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
def init_dtype(): def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype)
self.out = np.add(self.x, self.y)
def init_dtype(self):
pass
def init_axis(self):
pass pass
class TestFP16ElementwiseAddOp(TestElementwiseAddOp): class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
def init_dtype(): def init_dtype(self):
self.dtype = np.float16 self.dtype = np.float16
def test_check_output(self): def test_check_output(self):
...@@ -67,130 +77,179 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): ...@@ -67,130 +77,179 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
class TestElementwiseAddOp_scalar(TestElementwiseAddOp): class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.inputs = { self.y = np.random.rand(1).astype(self.dtype)
'X': np.random.rand(2, 3, 4).astype(np.float32), self.out = self.x + self.y
'Y': np.random.rand(1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} class TestFP16ElementwiseAddOp_scalar(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_scalar2(TestElementwiseAddOp): class TestElementwiseAddOp_scalar2(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.inputs = { self.y = np.random.rand(1, 1).astype(self.dtype)
'X': np.random.rand(2, 3, 4).astype(np.float32), self.out = self.x + self.y
'Y': np.random.rand(1, 1).astype(np.float32)
}
self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} class TestFP16ElementwiseAddOp_scalar2(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
class TestElementwiseAddOp_Vector(TestElementwiseAddOp): class TestElementwiseAddOp_Vector(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.random((32, )).astype(self.dtype)
self.inputs = { self.y = np.random.random((32, )).astype(self.dtype)
'X': np.random.random((32, )).astype("float32"), self.out = np.add(self.x, self.y)
'Y': np.random.random((32, )).astype("float32")
}
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['Y'])} class TestFP16ElementwiseAddOp_Vector(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.random((32, )).astype(self.dtype)
self.y = np.random.random((32, )).astype(self.dtype)
self.out = np.add(self.x, self.y)
class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_0(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.inputs = { self.y = np.random.rand(2).astype(self.dtype)
'X': np.random.rand(2, 3, 4).astype(np.float32), self.out = self.x + self.y.reshape(2, 1, 1)
'Y': np.random.rand(2).astype(np.float32)
}
self.attrs = {'axis': 0} def init_axis(self):
self.outputs = { self.axis = 0
'Out': self.inputs['X'] + self.inputs['Y'].reshape(2, 1, 1)
}
class TestFP16ElementwiseAddOp_broadcast_0(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(2).astype(self.dtype)
self.out = self.x + self.y.reshape(2, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_1(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.inputs = { self.y = np.random.rand(3).astype(self.dtype)
'X': np.random.rand(2, 3, 4).astype(np.float32), self.out = self.x + self.y.reshape(1, 3, 1)
'Y': np.random.rand(3).astype(np.float32)
}
self.attrs = {'axis': 1} def init_axis(self):
self.outputs = { self.axis = 1
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 1)
}
class TestFP16ElementwiseAddOp_broadcast_1(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(3).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 3, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_2(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.inputs = { self.y = np.random.rand(4).astype(self.dtype)
'X': np.random.rand(2, 3, 4).astype(np.float32), self.out = self.x + self.y.reshape(1, 1, 4)
'Y': np.random.rand(4).astype(np.float32)
}
self.outputs = {
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 1, 4) class TestFP16ElementwiseAddOp_broadcast_2(TestFP16ElementwiseAddOp):
} def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(4).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 4)
class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_3(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.inputs = { self.y = np.random.rand(3, 4).astype(self.dtype)
'X': np.random.rand(2, 3, 4, 5).astype(np.float32), self.out = self.x + self.y.reshape(1, 3, 4, 1)
'Y': np.random.rand(3, 4).astype(np.float32)
}
self.attrs = {'axis': 1} def init_axis(self):
self.outputs = { self.axis = 1
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 4, 1)
}
class TestFP16ElementwiseAddOp_broadcast_3(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(3, 4).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 3, 4, 1)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp): class TestElementwiseAddOp_broadcast_4(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.inputs = { self.y = np.random.rand(2, 1).astype(self.dtype)
'X': np.random.rand(2, 3, 4, 5).astype(np.float32), self.out = self.x + self.y.reshape(2, 1, 1, 1)
'Y': np.random.rand(2, 1).astype(np.float32)
} def init_axis(self):
self.axis = 0
self.attrs = {'axis': 0}
self.outputs = { class TestFP16ElementwiseAddOp_broadcast_4(TestFP16ElementwiseAddOp):
'Out': self.inputs['X'] + self.inputs['Y'].reshape(2, 1, 1, 1) def init_input_output(self):
} self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(2, 1).astype(self.dtype)
self.out = self.x + self.y.reshape(2, 1, 1, 1)
def init_axis(self):
self.axis = 0
class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp): class TestElementwiseAddOp_rowwise_add_0(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.inputs = { self.y = np.random.rand(3, 4).astype(self.dtype)
'X': np.random.rand(2, 3, 4).astype(np.float32), self.out = self.x + self.y.reshape(1, 3, 4)
'Y': np.random.rand(3, 4).astype(np.float32)
}
self.attrs = {'axis': 1} def init_axis(self):
self.outputs = { self.axis = 1
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 4)
}
class TestFP16ElementwiseAddOp_rowwise_add_0(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4).astype(self.dtype)
self.y = np.random.rand(3, 4).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 3, 4)
def init_axis(self):
self.axis = 1
class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp): class TestElementwiseAddOp_rowwise_add_1(TestElementwiseAddOp):
def setUp(self): def init_input_output(self):
self.op_type = "elementwise_add" self.x = np.random.rand(2, 1).astype(self.dtype)
self.inputs = { self.y = np.random.rand(1).astype(self.dtype)
'X': np.random.rand(2, 1).astype(np.float32), self.out = self.x + self.y.reshape(1, 1)
'Y': np.random.rand(1).astype(np.float32)
}
self.attrs = {'axis': 1} def init_axis(self):
self.outputs = { self.axis = 1
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 1)
}
class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp):
def init_input_output(self):
self.x = np.random.rand(2, 1).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1)
def init_axis(self):
self.axis = 1
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册