提交 e967d19b 编写于 作者: K Kexin Zhao

add more tests

上级 a13ec343
...@@ -282,7 +282,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> { ...@@ -282,7 +282,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
platform::CUDAPlace gpu = boost::get<platform::CUDAPlace>(ctx.GetPlace()); platform::CUDAPlace gpu = boost::get<platform::CUDAPlace>(ctx.GetPlace());
cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes);
// ------------------- cudnn conv backward data --------------------- // ------------------- cudnn conv backward data ---------------------
T alpha = 1.0f, beta = 0.0f; typename platform::CudnnDataType<T>::ScalingParamType alpha = 1.0f,
beta = 0.0f;
if (input_grad) { if (input_grad) {
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace()); T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset input_grad. // Because beta is zero, it is unnecessary to reset input_grad.
......
...@@ -470,29 +470,26 @@ class OpTest(unittest.TestCase): ...@@ -470,29 +470,26 @@ class OpTest(unittest.TestCase):
return tensor return tensor
@staticmethod @staticmethod
def create_view(input): def np_dtype_to_fluid_dtype(input):
"""Create a view of the input numpy array """Change the dtype of float16 numpy array
numpy float16 is binded to paddle::platform::float16 numpy float16 is binded to paddle::platform::float16
in tensor_py.h via the help of numpy uint16 because in tensor_py.h via the help of uint16 data type since
the internal memory representation of float16 is the internal memory representation of float16 is
uint16_t in paddle or np.uint16 in numpy, which are uint16_t in paddle and np.uint16 in numpy, which are
themselves binded together. themselves binded together by pybind.
Args: Args:
input: input numpy array input: input numpy array
Returns: Returns:
input_view: if the dtype of input is np.float16, input_view input: if the dtype of input is np.float16, its dtype will be
will reinterpret input as with dtype np.uint16. changed to np.uint16 so that the internal memory will be
Otherwise, input_view will be input itself. reinterpreted input as of dtype np.uint16.
""" """
if input.dtype == np.float16: if input.dtype == np.float16:
# view will only reinterpret memory without copying input.dtype = np.uint16
input_view = input.view(np.uint16) return input
else:
input_view = input
return input_view
def _get_gradient(self, input_to_check, place, output_names, no_grad_set): def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
prog = Program() prog = Program()
......
...@@ -82,18 +82,9 @@ class TestConv2dOp(OpTest): ...@@ -82,18 +82,9 @@ class TestConv2dOp(OpTest):
output = conv2d_forward_naive(input, filter, self.groups, output = conv2d_forward_naive(input, filter, self.groups,
conv2d_param).astype(self.dtype) conv2d_param).astype(self.dtype)
# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of numpy uint16 because
# the internal memory representation of float16 is
# uint16_t in paddle or np.uint16 in numpy, which are
# themselves binded together.
self.inputs = { self.inputs = {
#'Input': (input.view(np.uint16) 'Input': OpTest.np_dtype_to_fluid_dtype(input),
# if self.dtype == np.float16 else input), 'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
#'Filter': (filter.view(np.uint16)
# if self.dtype == np.float16 else filter)
'Input': OpTest.create_view(input),
'Filter': OpTest.create_view(filter)
} }
self.attrs = { self.attrs = {
'strides': self.stride, 'strides': self.stride,
...@@ -113,6 +104,8 @@ class TestConv2dOp(OpTest): ...@@ -113,6 +104,8 @@ class TestConv2dOp(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16:
return
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
...@@ -125,6 +118,8 @@ class TestConv2dOp(OpTest): ...@@ -125,6 +118,8 @@ class TestConv2dOp(OpTest):
set(['Input', 'Filter']), 'Output', max_relative_error=0.02) set(['Input', 'Filter']), 'Output', max_relative_error=0.02)
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
...@@ -140,6 +135,8 @@ class TestConv2dOp(OpTest): ...@@ -140,6 +135,8 @@ class TestConv2dOp(OpTest):
no_grad_set=set(['Filter'])) no_grad_set=set(['Filter']))
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
...@@ -259,15 +256,6 @@ class TestFP16CUDNN(TestCUDNN): ...@@ -259,15 +256,6 @@ class TestFP16CUDNN(TestCUDNN):
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2) self.check_output_with_place(place, atol=2e-2)
def test_check_grad(self):
pass
def test_check_grad_no_filter(self):
pass
def test_check_grad_no_input(self):
pass
class TestCUDNNWithPad(TestWithPad): class TestCUDNNWithPad(TestWithPad):
def init_op_type(self): def init_op_type(self):
...@@ -275,30 +263,85 @@ class TestCUDNNWithPad(TestWithPad): ...@@ -275,30 +263,85 @@ class TestCUDNNWithPad(TestWithPad):
self.op_type = "conv2d" self.op_type = "conv2d"
class TestFP16CUDNNWithPad(TestCUDNNWithPad):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWithStride(TestWithStride): class TestCUDNNWithStride(TestWithStride):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d" self.op_type = "conv2d"
class TestFP16CUDNNWithStride(TestCUDNNWithStride):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWithGroup(TestWithGroup): class TestCUDNNWithGroup(TestWithGroup):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d" self.op_type = "conv2d"
class TestFP16CUDNNWithGroup(TestCUDNNWithGroup):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWith1x1(TestWith1x1): class TestCUDNNWith1x1(TestWith1x1):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d" self.op_type = "conv2d"
class TestFP16CUDNNWith1x1(TestCUDNNWith1x1):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
def init_op_type(self): def init_op_type(self):
self.use_cudnn = True self.use_cudnn = True
self.op_type = "conv2d" self.op_type = "conv2d"
class TestFP16CUDNNWithInput1x1Filter1x1(TestCUDNNWithInput1x1Filter1x1):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestDepthwiseConv(TestConv2dOp): class TestDepthwiseConv(TestConv2dOp):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 1] self.pad = [1, 1]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册