diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 9adc81f04ddaa14889c786a54704f0f19b0a3771..b3862f5ed9d28062f221913f79ef142da99ae68a 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -196,6 +196,7 @@ class OpBase { : op_desc_(nullptr), forward_id_(-1), backward_id_(-1), + trace_id_(-1), place_(platform::CPUPlace()) {} virtual ~OpBase() { @@ -216,6 +217,7 @@ class OpBase { // Note: each fwd op corresponds to a vector of bwd ops. std::vector grad_op_descs_; int backward_id_; + int trace_id_; platform::Place place_; diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index d8e57a1ac6ccfc768a7f0604fcd1f32744a126df..1c7b13fd8af67aa672977c056a4c041a628554f9 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -193,6 +193,16 @@ PYBIND11_MODULE(core, m) { } }, py::return_value_policy::reference) + .def_property("_trace_id", + [](const imperative::OpBase &self) { + pybind11::gil_scoped_release release; + return self.trace_id_; + }, + [](imperative::OpBase &self, int trace_id) { + pybind11::gil_scoped_release release; + self.trace_id_ = trace_id; + }, + py::return_value_policy::reference) .def_property( "forward_id", [](const imperative::OpBase &self) { return self.forward_id_; }, diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index ae9bcbbecdf69fdc2074e2eba966556c92b20790..fdb7c0068e782be9cab00e52451e8f1eba532b73 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1193,13 +1193,13 @@ class Block(object): raise ValueError("Var {0} is not found recursively".format(name)) def _clear_block(self): - # TODO(minqiyang): move this to backward_hooks - self.desc._clear_block() + assert _in_imperative_mode() - for name in self.vars.keys(): - assert self.vars[name].persistable + # TODO(minqiyang): move this to Variable and Operator's __del__ + self.desc._clear_block() - del self.ops[:] + assert len(self.vars) == 0 + assert len(self.ops) == 0 def all_parameters(self): return list(self.iter_parameters()) @@ -1337,26 +1337,13 @@ class Block(object): # # TODO(minqiyang): add op stop_gradient support in static mode too. # currently, we only support stop_gradient in imperative mode. - self._trace_op(op, kwargs.get("stop_gradient", False)) - self.ops.append(op) + _imperative_tracer().trace_op(op, + kwargs.get("stop_gradient", False)) + else: + self.ops.append(op) return op - def _trace_op(self, op, stop_gradient=False): - backward_refs = _imperative_tracer().trace( - op.iop, op.inputs, op.outputs, self.desc, - _imperative_current_expected_place_, stop_gradient) - - # TODO(minqiyang): support backward_hooks to eager remove backward_refs - op.backward_refs = defaultdict(list) - for k, v in six.iteritems(op.inputs): - if k in backward_refs: - op.backward_refs[k] = op.inputs[k] - - for k, v in six.iteritems(op.outputs): - if k in backward_refs: - op.backward_refs[k] = op.outputs[k] - def _insert_op(self, index, *args, **kwargs): """ Insert a Operator according to the giving arguments. @@ -1409,9 +1396,11 @@ class Block(object): inputs=kwargs.get("inputs", None), outputs=kwargs.get("outputs", None), attrs=kwargs.get("attrs", None)) - self.ops.insert(0, op) if _in_imperative_mode(): - self._trace_op(op, kwargs.get("stop_gradient", False)) + _imperative_tracer().trace_op(op, + kwargs.get("stop_gradient", False)) + else: + self.ops.insert(0, op) return op def _sync_with_cpp(self): diff --git a/python/paddle/fluid/imperative/__init__.py b/python/paddle/fluid/imperative/__init__.py index 54dc794ea6392fac6f266477fe045b37001a8666..034a11e0a6049c17800c8fd5aab5bc2291320169 100644 --- a/python/paddle/fluid/imperative/__init__.py +++ b/python/paddle/fluid/imperative/__init__.py @@ -23,7 +23,11 @@ from .layers import * from . import nn from .nn import * +from . import tracer +from .tracer import * + __all__ = [] __all__ += layers.__all__ __all__ += base.__all__ __all__ += nn.__all__ +__all__ += tracer.__all__ diff --git a/python/paddle/fluid/imperative/base.py b/python/paddle/fluid/imperative/base.py index d4525233cc681720404770ef1d0c5d3006607a2e..174f138bfa2d3cfaa433c3235c2b0f9a5650e756 100644 --- a/python/paddle/fluid/imperative/base.py +++ b/python/paddle/fluid/imperative/base.py @@ -16,6 +16,7 @@ import numpy as np from paddle.fluid import core from paddle.fluid import framework +from .tracer import Tracer __all__ = ['enabled', 'guard', 'to_variable'] @@ -28,7 +29,7 @@ def enabled(): def guard(place=None): train = framework.Program() startup = framework.Program() - tracer = core.Tracer(train.current_block().desc) + tracer = Tracer(train.current_block().desc) if place is None: if core.is_compiled_with_cuda():