From 03789a7df4beb929aa67ea9892c214d68fd6e7d8 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Fri, 27 Oct 2017 14:55:15 -0700 Subject: [PATCH] batch norm fully tortured and passed --- paddle/operators/batch_norm_op.cu | 11 ++++-- .../v2/framework/tests/test_batch_norm_op.py | 35 +++++++++++-------- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/paddle/operators/batch_norm_op.cu b/paddle/operators/batch_norm_op.cu index 6cbbb334385..726d1ea1b8d 100644 --- a/paddle/operators/batch_norm_op.cu +++ b/paddle/operators/batch_norm_op.cu @@ -208,8 +208,15 @@ class BatchNormGradKernel mode_ = CUDNN_BATCHNORM_SPATIAL; #endif - std::vector dims = {N, C, H, W, D}; - std::vector strides = {H * W * C * D, 1, W * D * C, D * C, C}; + std::vector dims; + std::vector strides; + if (tensor_format == TensorFormat::NCHW) { + dims = {N, C, H, W, D}; + strides = {C * H * W * D, H * W * D, W * D, D, 1}; + } else { + dims = {N, C, H, W, D}; + strides = {H * W * C * D, 1, W * D * C, D * C, C}; + } CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); diff --git a/python/paddle/v2/framework/tests/test_batch_norm_op.py b/python/paddle/v2/framework/tests/test_batch_norm_op.py index f0e7f1e5236..fedb48eee81 100644 --- a/python/paddle/v2/framework/tests/test_batch_norm_op.py +++ b/python/paddle/v2/framework/tests/test_batch_norm_op.py @@ -96,22 +96,25 @@ def create_or_get_tensor(scope, var_name, var, place): return tensor -def set_output_grad(scope, outputs, place): - def __set_tensor__(name): +def set_output_grad(scope, outputs, place, feed_dict=None): + def __set_tensor__(name, data=None): out_tensor = scope.find_var(name).get_tensor() grad_tensor = scope.var(grad_var_name(name)).get_tensor() out_dtype = out_tensor.dtype() - if out_dtype == core.DataType.FP64: - data = np.ones(out_tensor.shape(), dtype=np.float64) - elif out_dtype == core.DataType.FP32: - data = np.ones(out_tensor.shape(), dtype=np.float32) - else: - raise ValueError("Not supported data type " + str(out_dtype)) - + if data is None: + if out_dtype == core.DataType.FP64: + data = np.ones(out_tensor.shape(), dtype=np.float64) + elif out_dtype == core.DataType.FP32: + data = np.ones(out_tensor.shape(), dtype=np.float32) + else: + raise ValueError("Not supported data type " + str(out_dtype)) grad_tensor.set(data, place) for output in outputs: - __set_tensor__(output) + data = None + if output in feed_dict: + data = feed_dict[output] + __set_tensor__(output, data) class TestBatchNormOp(OpTest): @@ -119,7 +122,7 @@ class TestBatchNormOp(OpTest): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) def test_python(self): - data_format = "NCHW" + data_format = "NHWC" epsilon = 0.00001 momentum = 0.9 @@ -214,7 +217,10 @@ class TestBatchNormOp(OpTest): saved_variance = 1. / np.sqrt(var_ref + epsilon) # for gradient test - y_grad = np.ones(x_shape).astype(np.float32) + # y_grad = np.ones(x_shape).astype(np.float32) + y_grad = np.zeros(x_shape).astype(np.float32) + y_grad[0, 0, 0, 0] = 1. + # y_grad = np.random.random_sample(x_shape).astype(np.float32) x_grad_ref, scale_grad_ref, bias_grad_ref = _reference_grad( x_val, y_grad, scale_val, saved_mean, var_ref, epsilon, data_format) @@ -283,7 +289,8 @@ class TestBatchNormOp(OpTest): set_output_grad( scope, ["y_out", "mean", "variance", "saved_mean", "saved_variance"], - place) + place, + feed_dict={"y_out": y_grad}) batch_norm_op_grad.run(scope, ctx) x_grad_tensor = create_or_get_tensor(scope, @@ -297,8 +304,6 @@ class TestBatchNormOp(OpTest): None, place) # check gradient output - print 'var x_grad tensor: ', str(place), np.array(x_grad_tensor) - print 'var x_grad by python: ', str(place), x_grad_ref self.__assert_close(x_grad_tensor, x_grad_ref, "x_grad") self.__assert_close(scale_grad_tensor, scale_grad_ref, "scale_grad") self.__assert_close(bias_grad_tensor, bias_grad_ref, "bias_grad") -- GitLab