提交 e967d19b 编写于 作者: K Kexin Zhao

add more tests

上级 a13ec343
......@@ -282,7 +282,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
platform::CUDAPlace gpu = boost::get<platform::CUDAPlace>(ctx.GetPlace());
cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes);
// ------------------- cudnn conv backward data ---------------------
T alpha = 1.0f, beta = 0.0f;
typename platform::CudnnDataType<T>::ScalingParamType alpha = 1.0f,
beta = 0.0f;
if (input_grad) {
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset input_grad.
......
......@@ -470,29 +470,26 @@ class OpTest(unittest.TestCase):
return tensor
@staticmethod
def create_view(input):
"""Create a view of the input numpy array
def np_dtype_to_fluid_dtype(input):
"""Change the dtype of float16 numpy array
numpy float16 is binded to paddle::platform::float16
in tensor_py.h via the help of numpy uint16 because
in tensor_py.h via the help of uint16 data type since
the internal memory representation of float16 is
uint16_t in paddle or np.uint16 in numpy, which are
themselves binded together.
uint16_t in paddle and np.uint16 in numpy, which are
themselves binded together by pybind.
Args:
input: input numpy array
Returns:
input_view: if the dtype of input is np.float16, input_view
will reinterpret input as with dtype np.uint16.
Otherwise, input_view will be input itself.
input: if the dtype of input is np.float16, its dtype will be
changed to np.uint16 so that the internal memory will be
reinterpreted input as of dtype np.uint16.
"""
if input.dtype == np.float16:
# view will only reinterpret memory without copying
input_view = input.view(np.uint16)
else:
input_view = input
return input_view
input.dtype = np.uint16
return input
def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
prog = Program()
......
......@@ -82,18 +82,9 @@ class TestConv2dOp(OpTest):
output = conv2d_forward_naive(input, filter, self.groups,
conv2d_param).astype(self.dtype)
# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of numpy uint16 because
# the internal memory representation of float16 is
# uint16_t in paddle or np.uint16 in numpy, which are
# themselves binded together.
self.inputs = {
#'Input': (input.view(np.uint16)
# if self.dtype == np.float16 else input),
#'Filter': (filter.view(np.uint16)
# if self.dtype == np.float16 else filter)
'Input': OpTest.create_view(input),
'Filter': OpTest.create_view(filter)
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
......@@ -113,6 +104,8 @@ class TestConv2dOp(OpTest):
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
......@@ -125,6 +118,8 @@ class TestConv2dOp(OpTest):
set(['Input', 'Filter']), 'Output', max_relative_error=0.02)
def test_check_grad_no_filter(self):
if self.dtype == np.float16:
return
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
......@@ -140,6 +135,8 @@ class TestConv2dOp(OpTest):
no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
if self.dtype == np.float16:
return
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_grad_with_place(
......@@ -259,15 +256,6 @@ class TestFP16CUDNN(TestCUDNN):
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
def test_check_grad(self):
pass
def test_check_grad_no_filter(self):
pass
def test_check_grad_no_input(self):
pass
class TestCUDNNWithPad(TestWithPad):
def init_op_type(self):
......@@ -275,30 +263,85 @@ class TestCUDNNWithPad(TestWithPad):
self.op_type = "conv2d"
class TestFP16CUDNNWithPad(TestCUDNNWithPad):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWithStride(TestWithStride):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"
class TestFP16CUDNNWithStride(TestCUDNNWithStride):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWithGroup(TestWithGroup):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"
class TestFP16CUDNNWithGroup(TestCUDNNWithGroup):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWith1x1(TestWith1x1):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"
class TestFP16CUDNNWith1x1(TestCUDNNWith1x1):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestCUDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
def init_op_type(self):
self.use_cudnn = True
self.op_type = "conv2d"
class TestFP16CUDNNWithInput1x1Filter1x1(TestCUDNNWithInput1x1Filter1x1):
def init_data_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
class TestDepthwiseConv(TestConv2dOp):
def init_test_case(self):
self.pad = [1, 1]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册