From aeb74af54c2428cdb4d6c541b5996a1a4ddd98a4 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Mon, 26 Nov 2018 15:25:45 +0800 Subject: [PATCH] allow operator to run imperatively --- paddle/fluid/framework/feed_fetch_method.cc | 10 +--- paddle/fluid/framework/ir/graph.cc | 5 +- paddle/fluid/imperative/tracer.h | 43 +++++++++++++- paddle/fluid/pybind/imperative.cc | 16 ++++- paddle/fluid/pybind/pybind.cc | 1 + python/paddle/fluid/imperative/layers.py | 58 ++++++++++++------- python/paddle/fluid/layers/nn.py | 1 + .../fluid/tests/unittests/test_imperative.py | 25 +++----- 8 files changed, 107 insertions(+), 52 deletions(-) diff --git a/paddle/fluid/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc index b13d0d380..6338be75a 100644 --- a/paddle/fluid/framework/feed_fetch_method.cc +++ b/paddle/fluid/framework/feed_fetch_method.cc @@ -58,15 +58,7 @@ LoDTensor& GetFetchVariable(const Scope& scope, const std::string& var_name, LoDTensor& GetVariableTensor(const Scope& scope, const std::string& var_name) { Variable* var = scope.FindVar(var_name); PADDLE_ENFORCE(var, "%s no in scope", var_name); - // TODO(panyx0718): hack, remove it once we run oprerator. - LoDTensor* tensor = var->GetMutable(); - int numel = 10; - float* data = - tensor->mutable_data(framework::make_ddim({numel}), - platform::CPUPlace(), sizeof(float) * numel); - for (size_t i = 0; i < numel; ++i) data[i] = 1; - - PADDLE_ENFORCE(var->IsType(), "Variable is not LoDTensor"); + PADDLE_ENFORCE(var->IsType(), "Only support lod tensor now."); return *var->GetMutable(); } diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index fc91564bb..8679118fe 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -38,9 +38,8 @@ void CheckProgram(const ProgramDesc &program) { switch (role_id) { case _INT(OpRole::kForward): if (visit.find(_INT(OpRole::kBackward)) != visit.end()) { - LOG(ERROR) - << "Cannot add backward operator before forward operator %s." - << op->Type(); + LOG(ERROR) << "Cannot add backward operator before forward operator " + << op->Type(); } break; case _INT(OpRole::kBackward): diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index 91e34a478..8a7a2d700 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -14,8 +14,12 @@ #pragma once +#include #include + #include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/imperative/engine.h" namespace paddle { @@ -26,10 +30,47 @@ class Tracer { Tracer() {} void Trace(framework::OpDesc* op_desc) { - LOG(ERROR) << "tracing " << op_desc->Type(); + LOG(ERROR) << "tracer tracing " << op_desc->Type(); + op_desc->InferShape(*block_); + op_desc->InferVarType(block_); + std::unique_ptr op = + framework::OpRegistry::CreateOp(*op_desc); + for (const std::string& vname : op_desc->InputArgumentNames()) { + framework::Variable* var = scope_->Var(vname); + if (!var->IsInitialized()) { + framework::VarDesc* var_desc = block_->FindVar(vname); + if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { + var->GetMutable(); + } else { + LOG(ERROR) << "tracer doesn't support yet"; + } + } + } + for (const std::string& vname : op_desc->OutputArgumentNames()) { + framework::Variable* var = scope_->Var(vname); + if (!var->IsInitialized()) { + framework::VarDesc* var_desc = block_->FindVar(vname); + if (var_desc->GetType() == framework::proto::VarType::LOD_TENSOR) { + var->GetMutable(); + } else { + LOG(ERROR) << "tracer doesn't support yet"; + } + } + } + op->Run(*scope_, platform::CPUPlace()); } + void SetScope(framework::Scope* scope) { scope_ = scope; } + + void SetBlock(framework::BlockDesc* block) { block_ = block; } + + framework::Scope* Scope() const { return scope_; } + + framework::BlockDesc* Block() const { return block_; } + private: + framework::BlockDesc* block_; + framework::Scope* scope_; std::vector runnables_; }; diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index cd97cd635..0e0f5a69a 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/pybind/imperative.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/imperative/tracer.h" namespace paddle { @@ -22,7 +24,19 @@ namespace pybind { void BindTracer(pybind11::module *m) { pybind11::class_(*m, "Tracer", "") .def(pybind11::init<>()) - .def("trace", &imperative::Tracer::Trace); + .def("trace", &imperative::Tracer::Trace) + .def_property("scope", + [](const imperative::Tracer &self) { return self.Scope(); }, + [](imperative::Tracer &self, framework::Scope *scope) { + self.SetScope(scope); + }, + R"DOC()DOC") + .def_property("block", + [](const imperative::Tracer &self) { return self.Block(); }, + [](imperative::Tracer &self, framework::BlockDesc *block) { + self.SetBlock(block); + }, + R"DOC()DOC"); } } // namespace pybind diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 3cf1ec34a..03fe1402c 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -159,6 +159,7 @@ PYBIND11_MODULE(core, m) { self.mutable_data(place); }) .def("set", PyCPUTensorSetFromArray) + .def("set_float", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index 37b36f2cd..32928347a 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import sys import numpy as np @@ -21,33 +22,46 @@ from paddle.fluid import framework __all__ = ['PyLayer'] +@contextlib.contextmanager +def trace_scope(scope, block): + tmp_scope = framework._imperative_tracer().scope + tmp_block = framework._imperative_tracer().block + framework._imperative_tracer().scope = scope + framework._imperative_tracer().block = block + yield + framework._imperative_tracer().scope = tmp_scope + framework._imperative_tracer().block = tmp_block + + class PyLayer(core.Layer): def __init__(self): self._scope = core.Scope() + self._block = framework.default_main_program().current_block() def __call__(self, inputs): - if not isinstance(inputs, list) and not isinstance(inputs, tuple): - inputs = [inputs] - - var_inputs = [] - for x in inputs: - if isinstance(x, np.ndarray): - tensor = core.LoDTensor() - tensor.set(x, core.CPUPlace()) - x = framework.Variable( - framework.default_main_program().current_block(), - type=core.VarDesc.VarType.LOD_TENSOR, - name=None, - shape=x.shape, - dtype=x.dtype) - elif not isinstance(x, framework.Variable): - raise ValueError("not var or ndarray %s" % type(x)) - self._scope.var(x.name) - var_inputs.append(x) - outputs = self.forward(var_inputs) - for out in outputs: - self._scope.var(out.name) - return outputs + with trace_scope(self._scope, self._block.desc): + if not isinstance(inputs, list) and not isinstance(inputs, tuple): + inputs = [inputs] + + var_inputs = [] + for x in inputs: + if isinstance(x, np.ndarray): + py_var = framework.Variable( + self._block, + type=core.VarDesc.VarType.LOD_TENSOR, + name=None, + shape=x.shape, + dtype=x.dtype) + var = self._scope.var(py_var.name) + tensor = var.get_tensor() + tensor.set_float(x, core.CPUPlace()) + var_inputs.append(py_var) + elif isinstance(x, framework.Variable): + var_inputs.append(x) + else: + raise ValueError("not var or ndarray %s" % type(x)) + outputs = self.forward(var_inputs) + return outputs def forward(self, inputs): print("at python.") diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 35232bd48..69ac96896 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -17,6 +17,7 @@ All layers just related to the neural network. from __future__ import print_function +import sys import numpy as np import os from ..layer_helper import LayerHelper diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index a10b5b34a..af6a2167c 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -31,25 +31,18 @@ class MyLayer(fluid.imperative.PyLayer): class TestImperative(unittest.TestCase): def test_layer(self): - cl = core.Layer() - cl.forward([]) - l = fluid.imperative.PyLayer() - l.forward([]) - - def test_imperative_trace(self): with fluid.imperative.guard(): - self.assertTrue(fluid.imperative.enabled()) - x = fluid.layers.data(name='abc', shape=[3, 4], dtype='float32') - for _ in xrange(2): - x = fluid.layers.relu(x) - x = fluid.layers.elementwise_mul(x, x) - self.assertIsNotNone(x) + cl = core.Layer() + cl.forward([]) + l = fluid.imperative.PyLayer() + l.forward([]) def test_layer_in_out(self): - l = MyLayer() - x = l(np.ones([1], np.float32))[0] - self.assertIsNotNone(x) - sys.stderr.write("%s output: %s\n" % (x, x.numpy(scope=l._scope))) + with fluid.imperative.guard(): + l = MyLayer() + x = l(np.array([1.0, 2.0, -1.0], dtype=np.float32))[0] + self.assertIsNotNone(x) + sys.stderr.write("%s output: %s\n" % (x, x.numpy(scope=l._scope))) if __name__ == '__main__': -- GitLab