From 3d9d32a1c1462780ea1a5682a27ce7da090a4b74 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 4 Sep 2017 16:20:27 -0700 Subject: [PATCH] Invoke check_grad many times for no_grad_set --- .../v2/framework/tests/gradient_checker.py | 23 +++------------- .../paddle/v2/framework/tests/test_mul_op.py | 27 +++++++++++++++---- .../v2/framework/tests/test_rowwise_add_op.py | 16 ++++++++--- 3 files changed, 37 insertions(+), 29 deletions(-) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 82ab7ad39..b8d7e4ea4 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -286,7 +286,7 @@ class GradientChecker(unittest.TestCase): for no_grad in no_grad_set: if no_grad not in in_names: raise ValueError("no_grad should be in in_names") - if name in inputs_to_check: + if no_grad in inputs_to_check: raise ValueError("no_grad should not be in inputs_to_check") backward_op = core.Operator.backward(forward_op, no_grad_set) @@ -304,25 +304,8 @@ class GradientChecker(unittest.TestCase): check_names = [grad_var_name(name) for name in inputs_to_check] for place in places: - # analytic_grads = self.__get_gradient(forward_op, backward_op, - # input_vars, check_names, place) - # In fact, the above two lines can be used to replace following - # codes. But most of the gradient operators need to handle the case - # where one of more of the gradient of the input is not needed. - # We change the unit test framework to explicitly test whether - # the operator correctly handles this through follow codes. - # In addtion, if all the inputs have no gradients, the NOP operator - # will be returned by core.Operator.backward(). The following codes - # do not test this case. - analytic_grads = [] - for name in inputs_to_check: - no_grads = [name for name in no_grad_set] - no_grads.extend(filter(lambda x: x != name, inputs_to_check)) - backward_op = core.Operator.backward(forward_op, set(no_grads)) - # get analytical gradients according to different device - analytic_grads.extend( - self.__get_gradient(forward_op, backward_op, input_vars, - [grad_var_name(name)], place)) + analytic_grads = self.__get_gradient(forward_op, backward_op, + input_vars, check_names, place) self.__assert_is_close(numeric_grads, analytic_grads, check_names, max_relative_error, "Gradient Check On %s" % str(place)) diff --git a/python/paddle/v2/framework/tests/test_mul_op.py b/python/paddle/v2/framework/tests/test_mul_op.py index 81371b1d1..92d2b80e8 100644 --- a/python/paddle/v2/framework/tests/test_mul_op.py +++ b/python/paddle/v2/framework/tests/test_mul_op.py @@ -17,16 +17,33 @@ class TestMulOp(unittest.TestCase): class TestMulGradOp(GradientChecker): - def test_mul(self): - op = create_op("mul") - inputs = { + def setUp(self): + self.op = create_op("mul") + self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } - self.compare_grad(op, inputs) + + def test_normal(self): # mul op will enlarge the relative error self.check_grad( - op, inputs, set(["X", "Y"]), "Out", max_relative_error=0.5) + self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) + + def test_ignore_x(self): + self.check_grad( + self.op, + self.inputs, ["Y"], + "Out", + max_relative_error=0.5, + no_grad_set={"X"}) + + def test_ignore_y(self): + self.check_grad( + self.op, + self.inputs, ["X"], + "Out", + max_relative_error=0.5, + no_grad_set={"Y"}) # TODO(dzh,qijun) : mulgrad test case need transpose feature of blas library diff --git a/python/paddle/v2/framework/tests/test_rowwise_add_op.py b/python/paddle/v2/framework/tests/test_rowwise_add_op.py index 45d569da2..403734e71 100644 --- a/python/paddle/v2/framework/tests/test_rowwise_add_op.py +++ b/python/paddle/v2/framework/tests/test_rowwise_add_op.py @@ -17,13 +17,21 @@ class TestRowwiseAddOp(unittest.TestCase): class RowwiseAddGradOpTest(GradientChecker): - def test_rowwise_add(self): - op = create_op("rowwise_add") - inputs = { + def setUp(self): + self.op = create_op("rowwise_add") + self.inputs = { "X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"), "b": np.random.uniform(0.1, 1, [10]).astype("float32") } - self.check_grad(op, inputs, set(["X", "b"]), "Out") + + def test_normal(self): + self.check_grad(self.op, self.inputs, ["X", "b"], "Out") + + def test_ignore_b(self): + self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) + + def test_ignore_x(self): + self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) if __name__ == '__main__': -- GitLab