From 9597fd05e9b1f669ad6102d67069f1c04e8840f8 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Wed, 9 Jan 2019 15:42:05 +0800 Subject: [PATCH] polish test=develop --- paddle/fluid/imperative/layer.cc | 90 ++++++++++++------- paddle/fluid/imperative/layer.h | 87 +++++------------- paddle/fluid/imperative/tracer.h | 5 +- paddle/fluid/pybind/pybind.cc | 10 ++- python/paddle/fluid/imperative/layers.py | 11 ++- .../fluid/tests/unittests/test_imperative.py | 44 ++++++++- 6 files changed, 136 insertions(+), 111 deletions(-) diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 131e3e1bd..d0aaa00c4 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -128,26 +128,23 @@ std::map> OpBase::ApplyGrad() { return {}; } - std::vector> tmp_vars; std::map> grad_outputs; - for (auto it : grad_output_vars_) { - auto& outputs = grad_outputs[it.first]; - for (size_t i = 0; i < it.second.size(); ++i) { - // Allocate a new variable - Variable* tmp_var = new framework::Variable(); - tmp_var->GetMutable(); - - tmp_vars.emplace_back(tmp_var); - outputs.push_back(tmp_var); - } - } - if (backward_id_ > 0) { VLOG(3) << "py_layer_grad"; - PyLayer::ApplyGrad(backward_id_, grad_input_vars_["X@GRAD"], - &(grad_outputs["Out@GRAD"])); + grad_outputs["Out@GRAD"] = + PyLayer::ApplyGrad(backward_id_, grad_input_vars_["X@GRAD"]); } else { VLOG(3) << "op grad " << grad_op_desc_->Type(); + for (auto it : grad_output_vars_) { + auto& outputs = grad_outputs[it.first]; + for (size_t i = 0; i < it.second.size(); ++i) { + // Allocate a new variable + Variable* tmp_var = new framework::Variable(); + tmp_var->GetMutable(); + outputs.push_back(tmp_var); + } + } + framework::RuntimeContext ctx(grad_input_vars_, grad_outputs); // No need to do compile time infer shape here. @@ -170,10 +167,13 @@ std::map> OpBase::ApplyGrad() { for (auto it : grad_output_vars_) { auto& outputs = grad_outputs[it.first]; auto& origin_outputs = it.second; + PADDLE_ENFORCE_EQ(outputs.size(), origin_outputs.size()); for (size_t i = 0; i < outputs.size(); ++i) { + framework::Variable* grad = outputs[i]; framework::Variable* orig_grad = origin_outputs[i]; - AddTo(outputs[i], orig_grad); + AddTo(grad, orig_grad); + delete grad; } } return input_vars_; @@ -197,30 +197,60 @@ void PyLayer::RegisterFunc(int func_id, const py::object& py_func) { py_funcs_[func_id] = py_func; } +int PyLayer::NumFuncs() { return py_funcs_.size(); } + std::vector PyLayer::Apply(int func_id, const std::vector& inputs) { - std::vector tensor_inputs; - std::vector ret; - + std::vector invars; for (const VarBase* in : inputs) { - tensor_inputs.push_back(in->var_->Get()); + invars.push_back(in->var_); } PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end()); - CallPythonFunc(py_funcs_[func_id], tensor_inputs, &ret); + std::vector outvars = CallPythonFunc(py_funcs_[func_id], invars); + std::vector ret; + for (Variable* v : outvars) { + ret.push_back(new VarBase(v, new Variable())); + } return ret; } -void PyLayer::ApplyGrad(int func_id, - const std::vector& inputs, - std::vector* outputs) { - std::vector tensor_inputs; - std::vector ret; +std::vector PyLayer::ApplyGrad( + int func_id, const std::vector& inputs) { + PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end()); + return CallPythonFunc(py_funcs_[func_id], inputs); +} - for (const Variable* in : inputs) { - tensor_inputs.push_back(in->Get()); +std::vector PyLayer::CallPythonFunc( + const py::object& callable, const std::vector& ins) { + py::gil_scoped_acquire guard; + py::tuple in_args(ins.size()); + for (size_t i = 0; i < ins.size(); ++i) { + const framework::LoDTensor& t = ins[i]->Get(); + in_args[i] = t.IsInitialized() ? py::cast(t) : py::cast(nullptr); } - PADDLE_ENFORCE(py_funcs_.find(func_id) != py_funcs_.end()); - CallPythonFunc(py_funcs_[func_id], tensor_inputs, outputs); + VLOG(3) << "pyfunc in " << py::len(in_args); + + // TODO(panyx0718): Who owns the returned LoDTensor. + auto ret = callable(in_args); + auto ret_tuple = py::cast(ret); + size_t ret_num = py::len(ret_tuple); + std::vector outs; + VLOG(3) << "pyfunc out " << ret_num; + for (size_t i = 0; i < ret_num; ++i) { + try { + auto* py_out_tensor = py::cast(ret_tuple[i]); + PADDLE_ENFORCE_NOT_NULL(py_out_tensor, + "Output tensor %d should not be nullptr", i); + auto* var = new framework::Variable(); + auto* tensor = var->GetMutable(); + tensor->ShareDataWith(*py_out_tensor); + tensor->set_lod(py_out_tensor->lod()); + outs.push_back(var); + } catch (py::cast_error&) { + PADDLE_THROW("The %d-th output must be LoDTensor", i); + } + } + return outs; } } // namespace imperative diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 84e04cb74..4be0614c7 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -87,12 +87,15 @@ class OpBase; class VarBase { public: - VarBase() + VarBase() : VarBase(new framework::Variable(), new framework::Variable()) {} + + // Owns `var` and `grad` + VarBase(framework::Variable* var, framework::Variable* grad) : pre_op_(nullptr), pre_op_out_idx_(-1), var_desc_(nullptr), - var_(new framework::Variable()), - grads_(new framework::Variable()), + var_(var), + grads_(grad), stop_gradient_(false) {} explicit VarBase(bool stop_gradient) @@ -131,8 +134,8 @@ class OpBase { public: OpBase() : op_desc_(nullptr), - grad_op_desc_(nullptr), forward_id_(-1), + grad_op_desc_(nullptr), backward_id_(-1) {} virtual ~OpBase() { @@ -141,10 +144,13 @@ class OpBase { std::map> ApplyGrad(); + // One of `op_desc_` or `forward_id_` is set, not both. + // For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_. framework::OpDesc* op_desc_; - framework::OpDesc* grad_op_desc_; - int forward_id_; + // When has backward, one of `grad_op_desc_` or `backward_id_` is set, + // not both. + framework::OpDesc* grad_op_desc_; int backward_id_; std::map> input_vars_; @@ -167,76 +173,23 @@ class Layer { } }; -static void CallPythonFunc(const py::object& callable, - const std::vector& ins, - std::vector* outs) { - py::gil_scoped_acquire guard; - py::tuple in_args(ins.size()); - for (size_t i = 0; i < ins.size(); ++i) { - in_args[i] = ins[i].IsInitialized() ? py::cast(ins[i]) : py::cast(nullptr); - } - - // TODO(panyx0718): Who owns the returned LoDTensor. - auto ret = callable(in_args); - auto ret_tuple = py::cast(ret); - size_t ret_num = py::len(ret_tuple); - for (size_t i = 0; i < ret_num; ++i) { - try { - auto* py_out_tensor = py::cast(ret_tuple[i]); - PADDLE_ENFORCE_NOT_NULL(py_out_tensor, - "Output tensor %d should not be nullptr", i); - VarBase* var = new VarBase(); - auto* tensor = var->var_->GetMutable(); - tensor->ShareDataWith(*py_out_tensor); - tensor->set_lod(py_out_tensor->lod()); - outs->push_back(var); - } catch (py::cast_error&) { - PADDLE_THROW("The %d-th output must be LoDTensor", i); - } - } -} - -static void CallPythonFunc(const py::object& callable, - const std::vector& ins, - std::vector* outs) { - py::gil_scoped_acquire guard; - py::tuple in_args(ins.size()); - for (size_t i = 0; i < ins.size(); ++i) { - in_args[i] = ins[i].IsInitialized() ? py::cast(ins[i]) : py::cast(nullptr); - } - VLOG(3) << "pyfunc in " << py::len(in_args); - - // TODO(panyx0718): Who owns the returned LoDTensor. - auto ret = callable(in_args); - auto ret_tuple = py::cast(ret); - size_t ret_num = py::len(ret_tuple); - VLOG(3) << "pyfunc out " << ret_num; - for (size_t i = 0; i < ret_num; ++i) { - try { - auto* py_out_tensor = py::cast(ret_tuple[i]); - PADDLE_ENFORCE_NOT_NULL(py_out_tensor, - "Output tensor %d should not be nullptr", i); - auto* tensor = (*outs)[i]->GetMutable(); - tensor->ShareDataWith(*py_out_tensor); - tensor->set_lod(py_out_tensor->lod()); - } catch (py::cast_error&) { - PADDLE_THROW("The %d-th output must be LoDTensor", i); - } - } -} - class PyLayer { public: virtual ~PyLayer() {} static void RegisterFunc(int func_id, const py::object& py_func); + static int NumFuncs(); + static std::vector Apply(int func_id, const std::vector& inputs); - static void ApplyGrad(int func_id, - const std::vector& inputs, - std::vector* outputs); + static std::vector ApplyGrad( + int func_id, const std::vector& inputs); + + private: + static std::vector CallPythonFunc( + const py::object& callable, const std::vector& ins); }; } // namespace imperative diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index f6aebea9b..f68a67e5d 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -132,8 +132,9 @@ class Tracer { if (!stop_gradient) { framework::OpDesc* grad_op_desc; // TODO(panyx): Is this leaked? - auto grad_to_var = new std::unordered_map(); - CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var); + std::unique_ptr> grad_to_var( + new std::unordered_map()); + CreateGradOp(*op_desc, {}, {block}, &grad_op_desc, grad_to_var.get()); op->grad_op_desc_ = grad_op_desc; for (auto it : grad_op_desc->Inputs()) { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 93dd16c8c..e52401ced 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -191,7 +191,7 @@ PYBIND11_MODULE(core, m) { return self.Forward(inputs); }); - py::class_(m, "PyLayer") + py::class_(m, "PyLayer") .def(py::init<>()) .def_static( "apply", @@ -200,9 +200,11 @@ PYBIND11_MODULE(core, m) { return imperative::PyLayer::Apply(func_id, inputs); }, py::return_value_policy::take_ownership) - .def_static("register_func", [](int func_id, const py::object &callable) { - imperative::PyLayer::RegisterFunc(func_id, callable); - }); + .def_static("register_func", + [](int func_id, const py::object &callable) { + imperative::PyLayer::RegisterFunc(func_id, callable); + }) + .def_static("num_funcs", &imperative::PyLayer::NumFuncs); BindTracer(&m); diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index 2b224b8db..8027d9ba3 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -68,12 +68,15 @@ class PyLayer(core.PyLayer): block = framework.default_main_program().current_block() inputs = [x._ivar for x in inputs] - PyLayer.register_func(1, cls.forward) - PyLayer.register_func(2, cls.backward) + if not hasattr(cls, 'forward_id'): + cls.forward_id = core.PyLayer.num_funcs() + 1 + PyLayer.register_func(cls.forward_id, cls.forward) + cls.backward_id = core.PyLayer.num_funcs() + 1 + PyLayer.register_func(cls.backward_id, cls.backward) iop = core.OpBase() - iop.forward_id = 1 - iop.backward_id = 2 + iop.forward_id = cls.forward_id + iop.backward_id = cls.backward_id block.ops.append(iop) ivars = tracer.py_trace(iop, inputs, False) # ivars = core.PyLayer.apply(cls.forward, inputs) diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 9f93ba933..e3e1ce7ca 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -81,14 +81,52 @@ class MLP(fluid.imperative.Layer): class TestImperative(unittest.TestCase): - """ def test_layer(self): with fluid.imperative.guard(): cl = core.Layer() cl.forward([]) l = fluid.imperative.Layer() self.assertRaises(NotImplementedError, l.forward, []) - """ + + def test_pylayer_func_id(self): + + with fluid.imperative.guard(): + + class PyLayer1(fluid.imperative.PyLayer): + def __init__(self): + super(PyLayer1, self).__init__() + + @staticmethod + def forward(inputs): + return inputs + + @staticmethod + def backward(inputs): + return inputs + + class PyLayer2(fluid.imperative.PyLayer): + def __init__(self): + super(PyLayer2, self).__init__() + + @staticmethod + def forward(inputs): + return inputs + + @staticmethod + def backward(inputs): + return inputs + + py_layer_1 = PyLayer1() + py_layer_2 = PyLayer2() + py_layer_1([fluid.imperative.base.to_variable(np.ones([2, 2]))]) + py_layer_2([fluid.imperative.base.to_variable(np.ones([2, 2]))]) + id = py_layer_1.forward_id + self.assertGreater(id, 0) + self.assertEqual(py_layer_1.backward_id, id + 1) + self.assertEqual(py_layer_2.forward_id, id + 2) + self.assertEqual(py_layer_2.backward_id, id + 3) + py_layer_1([fluid.imperative.base.to_variable(np.ones([2, 2]))]) + self.assertEqual(py_layer_1.forward_id, id) def test_pylayer(self): np_inp = np.ones([2, 2], np.float32) @@ -118,7 +156,6 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_out, static_out)) self.assertTrue(np.allclose(dy_grad, static_grad)) - """ def test_layer_in_out(self): np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) with fluid.imperative.guard(): @@ -172,7 +209,6 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_out, static_out)) self.assertTrue(np.allclose(dy_grad, static_grad)) - """ if __name__ == '__main__': -- GitLab