From 5181aefc6bf6d1af1a769879f8cddc9ae9bc2a20 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 17 Aug 2017 14:18:51 +0800 Subject: [PATCH] tune max relative error for sigmoid op unit test. --- paddle/operators/sigmoid_op.h | 2 +- python/paddle/v2/framework/tests/gradient_checker.py | 12 ++++++------ python/paddle/v2/framework/tests/test_sigmoid_op.py | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h index 11ab923eb34..b01a9b3f232 100644 --- a/paddle/operators/sigmoid_op.h +++ b/paddle/operators/sigmoid_op.h @@ -37,7 +37,7 @@ class SigmoidKernel : public framework::OpKernel { auto Y = EigenVector::Flatten(*output); auto place = context.GetEigenDevice(); - Y.device(place) = 1.0 / (1.0 + (-1.0 * X).exp()); + Y.device(place) = 1. / (1. + (-X).exp()); } }; diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 2c92dfa43e7..12f302fe251 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -188,10 +188,10 @@ class GradientChecker(unittest.TestCase): outputs = backward_op.outputs() out_names = [item for k in outputs for item in outputs[k]] - cpu_grads = self.get_grad(forward_op, backward_op, input_value, - out_names, core.CPUPlace()) - gpu_grads = self.get_grad(forward_op, backward_op, input_value, - out_names, core.GPUPlace(0)) + cpu_grads = self.__get_gradient(forward_op, backward_op, input_value, + out_names, core.CPUPlace()) + gpu_grads = self.__get_gradient(forward_op, backward_op, input_value, + out_names, core.GPUPlace(0)) for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads, out_names): @@ -277,8 +277,8 @@ class GradientChecker(unittest.TestCase): check_names = [grad_var_name(name) for name in inputs_to_check] for place in places: # get analytical gradients according to different device - analytic_grads = self.get_grad(forward_op, backward_op, input_vars, - check_names, place) + analytic_grads = self.__get_gradient(forward_op, backward_op, + input_vars, check_names, place) self.__assert_is_close(numeric_grads, analytic_grads, check_names, max_relative_error, "Gradient Check On %s" % str(place)) diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py index c3bd79f5dc0..273c2e5ab1a 100644 --- a/python/paddle/v2/framework/tests/test_sigmoid_op.py +++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py @@ -14,14 +14,14 @@ class TestSigmoidOp(unittest.TestCase): class TestSigmoidGradOp(GradientChecker): - def test_compare_grad(self): + def test_grad(self): op = create_op("sigmoid") - inputs = {"X": np.random.random((11, 17)).astype("float32")} + inputs = {"X": np.random.uniform(0.1, 1, [11, 17]).astype("float32")} # compare gpu and cpu results for backward op. - # skip this test if only compiling CPU version. + # this test will be skiped if only compiling CPU version. self.compare_grad(op, inputs) # check gradients - self.check_grad(op, inputs, set("X"), "Y") + self.check_grad(op, inputs, set("X"), "Y", max_relative_error=0.007) if __name__ == '__main__': -- GitLab