提交 5181aefc 编写于 作者: D dangqingqing

tune max relative error for sigmoid op unit test.

上级 18dcc1c0
...@@ -37,7 +37,7 @@ class SigmoidKernel : public framework::OpKernel { ...@@ -37,7 +37,7 @@ class SigmoidKernel : public framework::OpKernel {
auto Y = EigenVector<T>::Flatten(*output); auto Y = EigenVector<T>::Flatten(*output);
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
Y.device(place) = 1.0 / (1.0 + (-1.0 * X).exp()); Y.device(place) = 1. / (1. + (-X).exp());
} }
}; };
......
...@@ -188,9 +188,9 @@ class GradientChecker(unittest.TestCase): ...@@ -188,9 +188,9 @@ class GradientChecker(unittest.TestCase):
outputs = backward_op.outputs() outputs = backward_op.outputs()
out_names = [item for k in outputs for item in outputs[k]] out_names = [item for k in outputs for item in outputs[k]]
cpu_grads = self.get_grad(forward_op, backward_op, input_value, cpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.CPUPlace()) out_names, core.CPUPlace())
gpu_grads = self.get_grad(forward_op, backward_op, input_value, gpu_grads = self.__get_gradient(forward_op, backward_op, input_value,
out_names, core.GPUPlace(0)) out_names, core.GPUPlace(0))
for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads, for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads,
...@@ -277,8 +277,8 @@ class GradientChecker(unittest.TestCase): ...@@ -277,8 +277,8 @@ class GradientChecker(unittest.TestCase):
check_names = [grad_var_name(name) for name in inputs_to_check] check_names = [grad_var_name(name) for name in inputs_to_check]
for place in places: for place in places:
# get analytical gradients according to different device # get analytical gradients according to different device
analytic_grads = self.get_grad(forward_op, backward_op, input_vars, analytic_grads = self.__get_gradient(forward_op, backward_op,
check_names, place) input_vars, check_names, place)
self.__assert_is_close(numeric_grads, analytic_grads, check_names, self.__assert_is_close(numeric_grads, analytic_grads, check_names,
max_relative_error, max_relative_error,
"Gradient Check On %s" % str(place)) "Gradient Check On %s" % str(place))
...@@ -14,14 +14,14 @@ class TestSigmoidOp(unittest.TestCase): ...@@ -14,14 +14,14 @@ class TestSigmoidOp(unittest.TestCase):
class TestSigmoidGradOp(GradientChecker): class TestSigmoidGradOp(GradientChecker):
def test_compare_grad(self): def test_grad(self):
op = create_op("sigmoid") op = create_op("sigmoid")
inputs = {"X": np.random.random((11, 17)).astype("float32")} inputs = {"X": np.random.uniform(0.1, 1, [11, 17]).astype("float32")}
# compare gpu and cpu results for backward op. # compare gpu and cpu results for backward op.
# skip this test if only compiling CPU version. # this test will be skiped if only compiling CPU version.
self.compare_grad(op, inputs) self.compare_grad(op, inputs)
# check gradients # check gradients
self.check_grad(op, inputs, set("X"), "Y") self.check_grad(op, inputs, set("X"), "Y", max_relative_error=0.007)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册