From e967d19b0a380d9bb30be2974f05793671fb13b8 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Thu, 15 Mar 2018 18:01:39 -0700 Subject: [PATCH] add more tests --- paddle/fluid/operators/conv_cudnn_op.cu.cc | 3 +- .../paddle/fluid/tests/unittests/op_test.py | 23 +++-- .../fluid/tests/unittests/test_conv2d_op.py | 83 ++++++++++++++----- 3 files changed, 75 insertions(+), 34 deletions(-) diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index bff62f050c7..0ddbfdb4aa9 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -282,7 +282,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { platform::CUDAPlace gpu = boost::get(ctx.GetPlace()); cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv backward data --------------------- - T alpha = 1.0f, beta = 0.0f; + typename platform::CudnnDataType::ScalingParamType alpha = 1.0f, + beta = 0.0f; if (input_grad) { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 6d4684b024d..6a42f763a6c 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -470,29 +470,26 @@ class OpTest(unittest.TestCase): return tensor @staticmethod - def create_view(input): - """Create a view of the input numpy array + def np_dtype_to_fluid_dtype(input): + """Change the dtype of float16 numpy array numpy float16 is binded to paddle::platform::float16 - in tensor_py.h via the help of numpy uint16 because + in tensor_py.h via the help of uint16 data type since the internal memory representation of float16 is - uint16_t in paddle or np.uint16 in numpy, which are - themselves binded together. + uint16_t in paddle and np.uint16 in numpy, which are + themselves binded together by pybind. Args: input: input numpy array Returns: - input_view: if the dtype of input is np.float16, input_view - will reinterpret input as with dtype np.uint16. - Otherwise, input_view will be input itself. + input: if the dtype of input is np.float16, its dtype will be + changed to np.uint16 so that the internal memory will be + reinterpreted input as of dtype np.uint16. """ if input.dtype == np.float16: - # view will only reinterpret memory without copying - input_view = input.view(np.uint16) - else: - input_view = input - return input_view + input.dtype = np.uint16 + return input def _get_gradient(self, input_to_check, place, output_names, no_grad_set): prog = Program() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index badf7a8cb4c..7913b98240f 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -82,18 +82,9 @@ class TestConv2dOp(OpTest): output = conv2d_forward_naive(input, filter, self.groups, conv2d_param).astype(self.dtype) - # numpy float16 is binded to paddle::platform::float16 - # in tensor_py.h via the help of numpy uint16 because - # the internal memory representation of float16 is - # uint16_t in paddle or np.uint16 in numpy, which are - # themselves binded together. self.inputs = { - #'Input': (input.view(np.uint16) - # if self.dtype == np.float16 else input), - #'Filter': (filter.view(np.uint16) - # if self.dtype == np.float16 else filter) - 'Input': OpTest.create_view(input), - 'Filter': OpTest.create_view(filter) + 'Input': OpTest.np_dtype_to_fluid_dtype(input), + 'Filter': OpTest.np_dtype_to_fluid_dtype(filter) } self.attrs = { 'strides': self.stride, @@ -113,6 +104,8 @@ class TestConv2dOp(OpTest): self.check_output() def test_check_grad(self): + if self.dtype == np.float16: + return if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( @@ -125,6 +118,8 @@ class TestConv2dOp(OpTest): set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def test_check_grad_no_filter(self): + if self.dtype == np.float16: + return if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( @@ -140,6 +135,8 @@ class TestConv2dOp(OpTest): no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): + if self.dtype == np.float16: + return if self.use_cudnn: place = core.CUDAPlace(0) self.check_grad_with_place( @@ -259,15 +256,6 @@ class TestFP16CUDNN(TestCUDNN): if core.is_float16_supported(place): self.check_output_with_place(place, atol=2e-2) - def test_check_grad(self): - pass - - def test_check_grad_no_filter(self): - pass - - def test_check_grad_no_input(self): - pass - class TestCUDNNWithPad(TestWithPad): def init_op_type(self): @@ -275,30 +263,85 @@ class TestCUDNNWithPad(TestWithPad): self.op_type = "conv2d" +class TestFP16CUDNNWithPad(TestCUDNNWithPad): + def init_data_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) + + class TestCUDNNWithStride(TestWithStride): def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d" +class TestFP16CUDNNWithStride(TestCUDNNWithStride): + def init_data_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) + + class TestCUDNNWithGroup(TestWithGroup): def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d" +class TestFP16CUDNNWithGroup(TestCUDNNWithGroup): + def init_data_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) + + class TestCUDNNWith1x1(TestWith1x1): def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d" +class TestFP16CUDNNWith1x1(TestCUDNNWith1x1): + def init_data_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) + + class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d" +class TestFP16CUDNNWithInput1x1Filter1x1(TestCUDNNWithInput1x1Filter1x1): + def init_data_type(self): + self.dtype = np.float16 + + def test_check_output(self): + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + self.check_output_with_place(place, atol=2e-2) + + class TestDepthwiseConv(TestConv2dOp): def init_test_case(self): self.pad = [1, 1] -- GitLab