From 0d5819eb4f6772f23c50fedf3951c8a3c38ecf18 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Fri, 11 Jan 2019 21:21:59 +0800 Subject: [PATCH] polish imperative codes test=develop --- paddle/fluid/imperative/layer.cc | 5 ++-- paddle/fluid/imperative/layer.h | 3 +++ paddle/fluid/imperative/tracer.h | 1 + python/paddle/fluid/imperative/layers.py | 24 ++++++++++++++++--- .../fluid/tests/unittests/test_imperative.py | 18 ++------------ 5 files changed, 30 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 7594670cd..aaafb4e87 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -131,8 +131,9 @@ std::map> OpBase::ApplyGrad() { std::map> grad_outputs; if (backward_id_ > 0) { VLOG(3) << "py_layer_grad"; - grad_outputs["Out@GRAD"] = - PyLayer::ApplyGrad(backward_id_, grad_input_vars_["X@GRAD"]); + grad_outputs[framework::GradVarName(PyLayer::kFwdOut)] = PyLayer::ApplyGrad( + backward_id_, + grad_input_vars_[framework::GradVarName(PyLayer::kFwdInp)]); } else { VLOG(3) << "op grad " << grad_op_desc_->Type(); for (auto it : grad_output_vars_) { diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index daf56a521..14d89ca40 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -200,6 +200,9 @@ class PyLayer { public: virtual ~PyLayer() {} + static constexpr char* kFwdInp = "X"; + static constexpr char* kFwdOut = "Out"; + static void RegisterFunc(int func_id, const py::object& py_func); static int NumFuncs(); diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index f225d8abe..58d736406 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -48,6 +48,7 @@ class Tracer { std::vector PyTrace(OpBase* op, const std::vector& inputs, bool stop_gradient = false); + private: framework::BlockDesc* root_block_; }; diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index 6d3987c9d..f0fec03db 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -54,6 +54,25 @@ class PyLayer(core.PyLayer): def __init__(self): super(PyLayer, self).__init__() + @classmethod + def _do_forward(cls, inputs): + return cls._to_tuple(cls.forward(inputs)) + + @classmethod + def _do_backward(cls, inputs): + return cls._to_tuple(cls.backward(inputs)) + + @staticmethod + def _to_tuple(inputs): + if not isinstance(inputs, list) and not isinstance(inputs, tuple): + inputs = [inputs] + ret = [] + for inp in inputs: + tensor = core.LoDTensor() + tensor.set(inp, core.CPUPlace()) + ret.append(tensor) + return tuple(ret) + @staticmethod def forward(*inputs): raise NotImplementedError @@ -70,16 +89,15 @@ class PyLayer(core.PyLayer): if not hasattr(cls, 'forward_id'): cls.forward_id = core.PyLayer.num_funcs() + 1 - PyLayer.register_func(cls.forward_id, cls.forward) + PyLayer.register_func(cls.forward_id, cls._do_forward) cls.backward_id = core.PyLayer.num_funcs() + 1 - PyLayer.register_func(cls.backward_id, cls.backward) + PyLayer.register_func(cls.backward_id, cls._do_backward) iop = core.OpBase() iop.forward_id = cls.forward_id iop.backward_id = cls.backward_id block.ops.append(iop) ivars = tracer.py_trace(iop, ivar_inputs, False) - # ivars = core.PyLayer.apply(cls.forward, inputs) ret = [] for ivar in ivars: tensor = ivar.value().get_tensor() diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 86baff3c5..dfe4daca9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -41,26 +41,12 @@ class MyPyLayer(fluid.imperative.PyLayer): @staticmethod def forward(inputs): - sys.stderr.write('before forward\n') - ret = np.tanh(inputs[0]) - sys.stderr.write('after forward: %s\n' % ret) - tensor = core.LoDTensor() - tensor.set(ret, core.CPUPlace()) - return tuple([tensor]) + return np.tanh(inputs[0]) @staticmethod def backward(inputs): - sys.stderr.write('calling into backward: %s\n' % str(inputs)) inp, out, dout = inputs - inp = np.array(inp) - out = np.array(out) - dout = np.array(dout) - sys.stderr.write('calling into backward: %s, %s, %s\n' % - (inp, out, dout)) - ret = np.array(dout) * (1 - np.square(np.array(out))) - tensor = core.LoDTensor() - tensor.set(ret, core.CPUPlace()) - return tuple([tensor]) + return np.array(dout) * (1 - np.square(np.array(out))) class MLP(fluid.imperative.Layer): -- GitLab