From 42e61af861a27d5186e2518ff444b08ab5b572db Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Fri, 25 Jan 2019 10:07:17 +0800 Subject: [PATCH] polish test=develop --- paddle/fluid/imperative/layer.cc | 2 +- paddle/fluid/imperative/layer.h | 5 +++++ .../paddle/fluid/tests/unittests/test_imperative.py | 11 ++++------- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 23a1f0f34..83fc6ee2e 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -211,8 +211,8 @@ std::map> OpBase::ApplyGrad() { std::vector grad_outputs; if (backward_id_ > 0) { - grad_outputs.resize(1); VLOG(3) << "py_layer_grad"; + grad_outputs.resize(1); grad_outputs[0][framework::GradVarName(PyLayer::kFwdOut)] = PyLayer::ApplyGrad( backward_id_, diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 1f4c31b19..dc97433a5 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -199,8 +199,10 @@ class OpBase { // For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_. framework::OpDesc* op_desc_; int forward_id_; + // When has backward, one of `grad_op_descs_` or `backward_id_` is set, // not both. + // Note: each fwd op corresponds to a vector of bwd ops. std::vector grad_op_descs_; int backward_id_; @@ -211,8 +213,11 @@ class OpBase { OpBasePtrMap pre_ops_; std::map> pre_ops_out_idx_; + // Inputs to a vector of bwd ops. std::vector grad_input_vars_; + // Outputs to a vector of bwd ops. std::vector grad_output_vars_; + framework::BlockDesc* block_; }; diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 40f9b325f..adf35c851 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -68,19 +68,16 @@ class MLP(fluid.imperative.Layer): class TestImperative(unittest.TestCase): def test_sum_op(self): + x = np.ones([2, 2], np.float32) with fluid.imperative.guard(): inputs = [] for _ in range(10): - inputs.append( - fluid.imperative.base.to_variable( - np.ones([2, 2], np.float32))) - sys.stderr.write('%s\n' % inputs[0].dtype) + inputs.append(fluid.imperative.base.to_variable(x)) ret = fluid.layers.sums(inputs) - sys.stderr.write('%s\n' % ret.dtype) loss = fluid.layers.reduce_sum(ret) - sys.stderr.write('%s\n' % loss.dtype) loss._backward() - sys.stderr.write('%s %s\n' % (ret._numpy(), inputs[0]._gradient())) + self.assertTrue(np.allclose(ret._numpy(), x * 10)) + self.assertTrue(np.allclose(inputs[0]._gradient(), x)) def test_layer(self): with fluid.imperative.guard(): -- GitLab