From e395f2c6a337edc7c413645eab0bbb76c6c408db Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Wed, 16 Jan 2019 17:25:15 +0800 Subject: [PATCH] polish codes test=develop --- paddle/fluid/imperative/layer.cc | 8 ++-- paddle/fluid/imperative/layer.h | 46 ++++++++++++------- paddle/fluid/imperative/tracer.cc | 22 ++++----- paddle/fluid/pybind/pybind.cc | 6 +-- python/paddle/fluid/framework.py | 4 +- python/paddle/fluid/imperative/layers.py | 4 ++ python/paddle/fluid/imperative/nn.py | 1 + .../tests/unittests/test_imperative_gan.py | 9 +--- 8 files changed, 53 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 426644ca9..b7df4b888 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -57,15 +57,15 @@ class Autograd { Autograd() {} void RunBackward(VarBase* var) { - if (var->stop_gradient_) { + if (var->IsStopGradient()) { return; } VLOG(3) << "start autograd"; std::deque ready; - ready.push_back(var->pre_op_); + ready.push_back(var->PreOp()); - std::map dep_counts = ComputeDepCounts(var->pre_op_); + std::map dep_counts = ComputeDepCounts(var->PreOp()); while (!ready.empty()) { OpBase* ready_op = ready.front(); @@ -77,7 +77,7 @@ class Autograd { const std::vector& ingrads = it.second; for (size_t i = 0; i < ingrads.size(); ++i) { if (!ingrads[i]) continue; - if (ready_op->input_vars_[it.first][i]->stop_gradient_) { + if (ready_op->input_vars_[it.first][i]->IsStopGradient()) { continue; } OpBase* pre_op = ready_op->pre_ops_[it.first][i]; diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 2289da090..0b1077c64 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -100,20 +100,20 @@ class VarBase { // Owns `var` and `grad` VarBase(framework::Variable* var, VarBase* grad) - : pre_op_(nullptr), - pre_op_out_idx_(-1), - var_desc_(nullptr), + : var_desc_(nullptr), var_(var), grads_(grad), - stop_gradient_(false) {} + stop_gradient_(false), + pre_op_(nullptr), + pre_op_out_idx_(-1) {} explicit VarBase(bool stop_gradient) - : pre_op_(nullptr), - pre_op_out_idx_(-1), - var_desc_(nullptr), + : var_desc_(nullptr), var_(new framework::Variable()), grads_(stop_gradient ? nullptr : new VarBase(true)), - stop_gradient_(stop_gradient) {} + stop_gradient_(stop_gradient), + pre_op_(nullptr), + pre_op_out_idx_(-1) {} virtual ~VarBase() { if (var_) { @@ -125,15 +125,27 @@ class VarBase { } } - void Clear() { + OpBase* PreOp() const { return pre_op_; } + int PreOpOutIdx() const { return pre_op_out_idx_; } + + void SetStopGradient(bool stop_gradient) { stop_gradient_ = stop_gradient; } + bool IsStopGradient() const { return stop_gradient_; } + + void RunBackward(); + + void TrackPreOp(OpBase* pre_op, const std::string& pre_op_out_name, + int pre_op_out_idx, bool stop_gradient) { + pre_op_ = pre_op; + pre_op_out_name_ = pre_op_out_name; + pre_op_out_idx_ = pre_op_out_idx; + stop_gradient_ = stop_gradient; + } + + void ClearGradient() { delete grads_; grads_ = new VarBase(true); - pre_op_ = nullptr; - pre_op_out_name_ = ""; } - void RunBackward(); - framework::LoDTensor& GradValue(); inline std::string GradName() const { @@ -143,16 +155,16 @@ class VarBase { return string::Sprintf("%s@IGrad", var_desc_->Name()); } - OpBase* pre_op_; - std::string pre_op_out_name_; - int pre_op_out_idx_; - framework::VarDesc* var_desc_; framework::Variable* var_; VarBase* grads_; + private: bool stop_gradient_; + OpBase* pre_op_; + std::string pre_op_out_name_; + int pre_op_out_idx_; }; /* The wrapper for OpDesc which holds a OpDesc and a OpDesc of its diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 2878f5be8..843fee41f 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -63,9 +63,9 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, invars.push_back(inp->var_); vars[inp->var_desc_->Name()] = inp; - if (inp->pre_op_) { - op->pre_ops_[it.first].push_back(inp->pre_op_); - op->pre_ops_out_idx_[it.first].push_back(inp->pre_op_out_idx_); + if (inp->PreOp()) { + op->pre_ops_[it.first].push_back(inp->PreOp()); + op->pre_ops_out_idx_[it.first].push_back(inp->PreOpOutIdx()); } else { op->pre_ops_[it.first].push_back(nullptr); } @@ -89,10 +89,7 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, } else { LOG(ERROR) << "tracer doesn't support yet"; } - out->stop_gradient_ = stop_gradient; - out->pre_op_ = op; - out->pre_op_out_name_ = it.first; - out->pre_op_out_idx_ = i; + out->TrackPreOp(op, it.first, i, stop_gradient); VLOG(3) << "output vname " << out->var_desc_->Name() << " " << out->var_->IsInitialized(); @@ -167,9 +164,9 @@ std::vector Tracer::PyTrace(OpBase* op, op->input_vars_[PyLayer::kFwdInp] = inputs; op->output_vars_[PyLayer::kFwdOut] = PyLayer::Apply(op->forward_id_, inputs); for (VarBase* inp : inputs) { - if (inp->pre_op_) { - op->pre_ops_[PyLayer::kFwdInp].push_back(inp->pre_op_); - op->pre_ops_out_idx_[PyLayer::kFwdInp].push_back(inp->pre_op_out_idx_); + if (inp->PreOp()) { + op->pre_ops_[PyLayer::kFwdInp].push_back(inp->PreOp()); + op->pre_ops_out_idx_[PyLayer::kFwdInp].push_back(inp->PreOpOutIdx()); } else { op->pre_ops_[PyLayer::kFwdInp].push_back(nullptr); } @@ -178,10 +175,7 @@ std::vector Tracer::PyTrace(OpBase* op, auto& outputs = op->output_vars_[PyLayer::kFwdOut]; for (size_t i = 0; i < outputs.size(); ++i) { VarBase* out = outputs[i]; - out->stop_gradient_ = stop_gradient; - out->pre_op_ = op; - out->pre_op_out_name_ = PyLayer::kFwdOut; - out->pre_op_out_idx_ = i; + out->TrackPreOp(op, PyLayer::kFwdOut, i, stop_gradient); } if (!stop_gradient) { auto& grad_input_vars = diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index efe70a075..96fa428ee 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -133,7 +133,7 @@ PYBIND11_MODULE(core, m) { [](imperative::VarBase &self) { self.RunBackward(); }) .def("_grad_name", &imperative::VarBase::GradName) .def("_grad_value", &imperative::VarBase::GradValue) - .def("_clear", &imperative::VarBase::Clear) + .def("_clear_gradient", &imperative::VarBase::ClearGradient) .def("_grad_ivar", [](const imperative::VarBase &self) { return self.grads_; }, py::return_value_policy::reference) @@ -148,9 +148,9 @@ PYBIND11_MODULE(core, m) { py::return_value_policy::reference) .def_property( "stop_gradient", - [](const imperative::VarBase &self) { return self.stop_gradient_; }, + [](const imperative::VarBase &self) { return self.IsStopGradient(); }, [](imperative::VarBase &self, bool stop_gradient) { - self.stop_gradient_ = stop_gradient; + self.SetStopGradient(stop_gradient); }); py::class_(m, "OpBase", R"DOC()DOC") diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index e737b9bc6..eedfd4a60 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -388,8 +388,8 @@ class Variable(object): def _gradient(self): return np.array(self._ivar._grad_value()) - def _clear(self): - self._ivar._clear() + def _clear_gradient(self): + self._ivar._clear_gradient() def __str__(self): return self.to_string(True) diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index ed67dda63..6cd0c2975 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -33,6 +33,10 @@ class Layer(core.Layer): def parameters(self): return [] + def clear_gradients(self): + for p in self.parameters(): + p._clear() + def _build_once(self, inputs): pass diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 95b594876..79986070c 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -48,6 +48,7 @@ class Conv2D(layers.Layer): assert param_attr is not False, "param_attr should not be False here." super(Conv2D, self).__init__(name=name, dtype=dtype) + # TODO(minqiyang): Move this to the top. from ..layer_helper import LayerHelper self._helper = LayerHelper( type(self).__name__, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index e0507e0b9..4fe286f85 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -133,9 +133,6 @@ class TestImperativeMnist(unittest.TestCase): for param in generate_p.global_block().all_parameters(): static_params[param.name] = np.array( scope.find_var(param.name).get_tensor()) - sys.stderr.write( - 'static_param_loss: %s: %s\n' % - (param.name, np.sum(static_params[param.name]))) dy_params = dict() with fluid.imperative.guard(): @@ -160,10 +157,8 @@ class TestImperativeMnist(unittest.TestCase): d_loss = d_loss_real + d_loss_fake d_loss._backward() sgd.minimize(d_loss) - for p in discriminator.parameters(): - p._clear() - for p in generator.parameters(): - p._clear() + discriminator.clear_gradients() + generator.clear_gradients() d_fake = discriminator( generator(to_variable(np.ones([2, 2], np.float32)))) -- GitLab