From 5338417b4754d1ed9dd46d5c1e951aeea46d4888 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Tue, 14 Aug 2018 17:12:24 +0800 Subject: [PATCH] Polish code style --- paddle/fluid/framework/op_desc.cc | 6 +++--- paddle/fluid/framework/op_desc.h | 2 +- python/paddle/fluid/backward.py | 3 +-- python/paddle/fluid/parallel_executor.py | 6 ++---- .../fluid/transpiler/memory_optimization_transpiler.py | 6 +++--- 5 files changed, 10 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index b0a07ccad6..9399b8675e 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -209,7 +209,7 @@ void OpDesc::SetAttr(const std::string &name, const Attribute &v) { if (attr_type == proto::AttrType::INTS && boost::get>(v).size() == 0u) { // Find current attr via attr name and set the correct attribute value - const proto::OpProto::Attr& attr = GetProtoAttr(name); + const proto::OpProto::Attr &attr = GetProtoAttr(name); switch (attr.type()) { case proto::AttrType::BOOLEANS: { VLOG(11) << "SetAttr: " << Type() << ", " << name @@ -275,8 +275,8 @@ Attribute OpDesc::GetAttr(const std::string &name) const { return it->second; } -const proto::OpProto::Attr& OpDesc::GetProtoAttr(const std::string &name) { - proto::OpProto& proto = OpInfoMap::Instance().Get(Type()).Proto(); +const proto::OpProto::Attr &OpDesc::GetProtoAttr(const std::string &name) { + proto::OpProto &proto = OpInfoMap::Instance().Get(Type()).Proto(); for (int i = 0; i != proto.attrs_size(); ++i) { const proto::OpProto::Attr &attr = proto.attrs(i); if (attr.name() == name) { diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 0421f36a35..6805d25934 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -81,7 +81,7 @@ class OpDesc { Attribute GetAttr(const std::string &name) const; - const proto::OpProto::Attr& GetProtoAttr(const std::string &name) const; + const proto::OpProto::Attr &GetProtoAttr(const std::string &name) const; Attribute GetNullableAttr(const std::string &name) const; diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 1c10d06c51..e552b79219 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -364,8 +364,7 @@ def _append_backward_ops_(block, # Getting op's corresponding grad_op grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op.desc, - cpt.to_text(no_grad_dict[block.idx]), grad_sub_block_list) + op.desc, cpt.to_text(no_grad_dict[block.idx]), grad_sub_block_list) grad_op_descs.extend(grad_op_desc) grad_to_var.update(op_grad_to_var) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 6654e33847..35c3ab59c2 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -159,8 +159,7 @@ class ParallelExecutor(object): for p in main.global_block().iter_parameters() if not p.stop_gradient ]), - set(cpt.to_text(var) - for var in self.persistable_vars), main.desc, + set(cpt.to_text(var) for var in self.persistable_vars), main.desc, cpt.to_text(loss_name) if loss_name else six.u(''), scope, local_scopes, exec_strategy, build_strategy, num_trainers, trainer_id) @@ -274,8 +273,7 @@ class ParallelExecutor(object): self.executor.feed_tensors_into_local_scopes(res) fetch_var_name = '@FETCHED_VAR_NAME@' - self.executor.run( - cpt.to_text(fetch_list), cpt.to_text(fetch_var_name)) + self.executor.run(cpt.to_text(fetch_list), cpt.to_text(fetch_var_name)) arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array() if self.is_dist: diff --git a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py index 06cb16db6f..293c7841ec 100644 --- a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py +++ b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py @@ -259,9 +259,9 @@ class ControlFlowGraph(object): # Rename the var to the cache var already with # memory allocated in order to reuse the memory. _rename_arg_(self._ops, x, cache_var, begin_idx=i) - self._program.block(block_desc.id).var( - cpt.to_text(x)).desc = self._find_var( - block_desc, cache_var, is_forward) + self._program.block(block_desc.id).var(cpt.to_text( + x)).desc = self._find_var(block_desc, cache_var, + is_forward) self._update_graph(x, cache_var, begin_idx=i) break -- GitLab