diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 2f2873c183662430d3e984d685a85d5de9d0f691..91a9d826a0c3c038a487af5e7d7c961f616b89ec 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -187,99 +187,116 @@ def set_output_grad(scope, outputs, place, feed_dict=None): class TestBatchNormOpInference(OpTest): + def setUp(self): + self.dtype = np.float32 + def __assert_close(self, tensor, np_array, msg, atol=1e-4): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) - def test_inference(self): - def test_with_place(place, data_layout, dtype, shape): - epsilon = 0.00001 - if len(shape) == 2: - x_shape = shape - c = x_shape[1] + def check_with_place(place, data_layout, dtype, shape): + epsilon = 0.00001 + if len(shape) == 2: + x_shape = shape + c = x_shape[1] + else: + n, h, w, c = shape[0], shape[1], shape[2], shape[3] + if data_layout == "NHWC": + x_shape = [n, h, w, c] + elif data_layout == "NCHW": + x_shape = [n, c, h, w] else: - n, h, w, c = shape[0], shape[1], shape[2], shape[3] - if data_layout == "NHWC": - x_shape = [n, h, w, c] - elif data_layout == "NCHW": - x_shape = [n, c, h, w] - else: - raise ValueError("Unknown data layout.") - scale_shape = [c] - - x_val = np.random.random_sample(x_shape).astype(dtype) - scale_val = np.random.random_sample(scale_shape).astype(dtype) - bias_val = np.random.random_sample(scale_shape).astype(dtype) - - mean = np.zeros(scale_shape).astype(dtype) - variance = np.ones(scale_shape).astype(dtype) - - y_out = _reference_testing(x_val, scale_val, bias_val, mean, - variance, epsilon, - data_layout).astype(dtype) + raise ValueError("Unknown data layout.") + scale_shape = [c] - scope = core.Scope() + x_val = np.random.random_sample(x_shape).astype(dtype) + scale_val = np.random.random_sample(scale_shape).astype(dtype) + bias_val = np.random.random_sample(scale_shape).astype(dtype) - # create input - x_tensor = create_or_get_tensor( - scope, "x_val", OpTest.np_dtype_to_fluid_dtype(x_val), place) - scale_tensor = create_or_get_tensor( - scope, "scale_val", - OpTest.np_dtype_to_fluid_dtype(scale_val), place) - bias_tensor = create_or_get_tensor( - scope, "bias_val", - OpTest.np_dtype_to_fluid_dtype(bias_val), place) - mean_tensor = create_or_get_tensor( - scope, "mean", OpTest.np_dtype_to_fluid_dtype(mean), place) - variance_tensor = create_or_get_tensor( - scope, "variance", - OpTest.np_dtype_to_fluid_dtype(variance), place) + mean = np.zeros(scale_shape).astype(dtype) + variance = np.ones(scale_shape).astype(dtype) - # create output - y_tensor = create_or_get_tensor(scope, "y_out", None, place) - saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None, - place) - saved_variance_tensor = create_or_get_tensor( - scope, "saved_variance", None, place) - mean_out_tensor = mean_tensor - variance_out_tensor = variance_tensor + y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, + epsilon, data_layout).astype(dtype) + + scope = core.Scope() + + # create input + x_tensor = create_or_get_tensor(scope, "x_val", + OpTest.np_dtype_to_fluid_dtype(x_val), + place) + scale_tensor = create_or_get_tensor( + scope, "scale_val", + OpTest.np_dtype_to_fluid_dtype(scale_val), place) + bias_tensor = create_or_get_tensor( + scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place) + mean_tensor = create_or_get_tensor(scope, "mean", + OpTest.np_dtype_to_fluid_dtype(mean), + place) + variance_tensor = create_or_get_tensor( + scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place) + + # create output + y_tensor = create_or_get_tensor(scope, "y_out", None, place) + saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None, + place) + saved_variance_tensor = create_or_get_tensor(scope, "saved_variance", + None, place) + mean_out_tensor = mean_tensor + variance_out_tensor = variance_tensor + + batch_norm_op = Operator( + "batch_norm", + # inputs + X="x_val", + Scale="scale_val", + Bias="bias_val", + Mean="mean", + Variance="variance", + # outputs + Y="y_out", + MeanOut="mean", + VarianceOut="variance", + SavedMean="saved_mean", + SavedVariance="saved_variance", + # attrs + is_test=True, + data_layout=data_layout, + epsilon=epsilon) + + batch_norm_op.run(scope, place) + + # check inference result + self.__assert_close(y_tensor, y_out, + "inference output are different at " + str(place) + + ", " + data_layout + ", " + str(np.dtype(dtype))) + + def test_check_output(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): + places.append(core.CUDAPlace(0)) - batch_norm_op = Operator( - "batch_norm", - # inputs - X="x_val", - Scale="scale_val", - Bias="bias_val", - Mean="mean", - Variance="variance", - # outputs - Y="y_out", - MeanOut="mean", - VarianceOut="variance", - SavedMean="saved_mean", - SavedVariance="saved_variance", - # attrs - is_test=True, - data_layout=data_layout, - epsilon=epsilon) + for place in places: + for data_format in ["NCHW", "NHWC"]: + check_with_place(place, data_format, self.dtype, [2, 3, 4, 5]) + check_with_place(place, data_format, self.dtype, [2, 3]) - batch_norm_op.run(scope, place) - # check inference result - self.__assert_close( - y_tensor, y_out, "inference output are different at " + - str(place) + ", " + data_layout + ", " + str(np.dtype(dtype))) +class TestFP16BatchNormOpInference(TestBatchNormOpInference): + def setUp(self): + self.dtype = np.float16 - places = [core.CPUPlace()] + def test_check_output(self): + places = [] if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): place = core.CUDAPlace(0) - if self.dtype != np.float16 or core.is_float16_supported(place): + if core.is_float16_supported(place): places.append(place) for place in places: for data_format in ["NCHW", "NHWC"]: - for dtype in [np.float32, np.float16]: - test_with_place(place, data_format, dtype, [2, 3, 4, 5]) - test_with_place(place, data_format, dtype, [2, 3]) + check_output_with_place(place, data_format, self.dtype, + [2, 3, 4, 5]) + check_output_with_place(place, data_format, self.dtype, [2, 3]) class TestBatchNormOpTraining(OpTest):