From 9abf40c9e2d853f137d1c642892b756e4bdf19b3 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Fri, 22 Feb 2019 18:53:41 +0800 Subject: [PATCH] Add imperative python tracer --- paddle/fluid/imperative/layer.h | 2 ++ paddle/fluid/pybind/pybind.cc | 10 ++++++ python/paddle/fluid/framework.py | 37 ++++++++-------------- python/paddle/fluid/imperative/__init__.py | 4 +++ python/paddle/fluid/imperative/base.py | 3 +- 5 files changed, 31 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index bbf614831ca..8a295341b96 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -205,6 +205,7 @@ class OpBase { : op_desc_(nullptr), forward_id_(-1), backward_id_(-1), + trace_id_(-1), place_(platform::CPUPlace()) {} virtual ~OpBase() { @@ -225,6 +226,7 @@ class OpBase { // Note: each fwd op corresponds to a vector of bwd ops. std::vector grad_op_descs_; int backward_id_; + int trace_id_; platform::Place place_; diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index fd74dd3d0f9..1140c6a803b 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -193,6 +193,16 @@ PYBIND11_MODULE(core, m) { } }, py::return_value_policy::reference) + .def_property("_trace_id", + [](const imperative::OpBase &self) { + pybind11::gil_scoped_release release; + return self.trace_id_; + }, + [](imperative::OpBase &self, int trace_id) { + pybind11::gil_scoped_release release; + self.trace_id_ = trace_id; + }, + py::return_value_policy::reference) .def_property( "forward_id", [](const imperative::OpBase &self) { return self.forward_id_; }, diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 73a94821f22..12de275facf 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1201,13 +1201,13 @@ class Block(object): raise ValueError("Var {0} is not found recursively".format(name)) def _clear_block(self): - # TODO(minqiyang): move this to backward_hooks - self.desc._clear_block() + assert _in_imperative_mode() - for name in self.vars.keys(): - assert self.vars[name].persistable + # TODO(minqiyang): move this to Variable and Operator's __del__ + self.desc._clear_block() - del self.ops[:] + assert len(self.vars) == 0 + assert len(self.ops) == 0 def all_parameters(self): return list(self.iter_parameters()) @@ -1345,26 +1345,13 @@ class Block(object): # # TODO(minqiyang): add op stop_gradient support in static mode too. # currently, we only support stop_gradient in imperative mode. - self._trace_op(op, kwargs.get("stop_gradient", False)) - self.ops.append(op) + _imperative_tracer().trace_op(op, + kwargs.get("stop_gradient", False)) + else: + self.ops.append(op) return op - def _trace_op(self, op, stop_gradient=False): - backward_refs = _imperative_tracer().trace( - op.iop, op.inputs, op.outputs, self.desc, - _imperative_current_expected_place_, stop_gradient) - - # TODO(minqiyang): support backward_hooks to eager remove backward_refs - op.backward_refs = defaultdict(list) - for k, v in six.iteritems(op.inputs): - if k in backward_refs: - op.backward_refs[k] = op.inputs[k] - - for k, v in six.iteritems(op.outputs): - if k in backward_refs: - op.backward_refs[k] = op.outputs[k] - def _insert_op(self, index, *args, **kwargs): """ Insert a Operator according to the giving arguments. @@ -1417,9 +1404,11 @@ class Block(object): inputs=kwargs.get("inputs", None), outputs=kwargs.get("outputs", None), attrs=kwargs.get("attrs", None)) - self.ops.insert(0, op) if _in_imperative_mode(): - self._trace_op(op, kwargs.get("stop_gradient", False)) + _imperative_tracer().trace_op(op, + kwargs.get("stop_gradient", False)) + else: + self.ops.insert(0, op) return op def _sync_with_cpp(self): diff --git a/python/paddle/fluid/imperative/__init__.py b/python/paddle/fluid/imperative/__init__.py index 54dc794ea63..034a11e0a60 100644 --- a/python/paddle/fluid/imperative/__init__.py +++ b/python/paddle/fluid/imperative/__init__.py @@ -23,7 +23,11 @@ from .layers import * from . import nn from .nn import * +from . import tracer +from .tracer import * + __all__ = [] __all__ += layers.__all__ __all__ += base.__all__ __all__ += nn.__all__ +__all__ += tracer.__all__ diff --git a/python/paddle/fluid/imperative/base.py b/python/paddle/fluid/imperative/base.py index d4525233cc6..174f138bfa2 100644 --- a/python/paddle/fluid/imperative/base.py +++ b/python/paddle/fluid/imperative/base.py @@ -16,6 +16,7 @@ import numpy as np from paddle.fluid import core from paddle.fluid import framework +from .tracer import Tracer __all__ = ['enabled', 'guard', 'to_variable'] @@ -28,7 +29,7 @@ def enabled(): def guard(place=None): train = framework.Program() startup = framework.Program() - tracer = core.Tracer(train.current_block().desc) + tracer = Tracer(train.current_block().desc) if place is None: if core.is_compiled_with_cuda(): -- GitLab