diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index b0a07ccad6883a6189a20d32d414c8444eea5d90..9399b8675e1422d5a0e6784d6ad98c5d7568ed0c 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -209,7 +209,7 @@ void OpDesc::SetAttr(const std::string &name, const Attribute &v) { if (attr_type == proto::AttrType::INTS && boost::get>(v).size() == 0u) { // Find current attr via attr name and set the correct attribute value - const proto::OpProto::Attr& attr = GetProtoAttr(name); + const proto::OpProto::Attr &attr = GetProtoAttr(name); switch (attr.type()) { case proto::AttrType::BOOLEANS: { VLOG(11) << "SetAttr: " << Type() << ", " << name @@ -275,8 +275,8 @@ Attribute OpDesc::GetAttr(const std::string &name) const { return it->second; } -const proto::OpProto::Attr& OpDesc::GetProtoAttr(const std::string &name) { - proto::OpProto& proto = OpInfoMap::Instance().Get(Type()).Proto(); +const proto::OpProto::Attr &OpDesc::GetProtoAttr(const std::string &name) { + proto::OpProto &proto = OpInfoMap::Instance().Get(Type()).Proto(); for (int i = 0; i != proto.attrs_size(); ++i) { const proto::OpProto::Attr &attr = proto.attrs(i); if (attr.name() == name) { diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 0421f36a358524f4166aa4777a93876a1ff18c14..6805d25934b42a5752cbb54174d3017cf63e4b23 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -81,7 +81,7 @@ class OpDesc { Attribute GetAttr(const std::string &name) const; - const proto::OpProto::Attr& GetProtoAttr(const std::string &name) const; + const proto::OpProto::Attr &GetProtoAttr(const std::string &name) const; Attribute GetNullableAttr(const std::string &name) const; diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 1c10d06c51d0a25a76251566d7dddfaf3d772223..e552b79219805d20b8db24ba6a567affaf12f3aa 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -364,8 +364,7 @@ def _append_backward_ops_(block, # Getting op's corresponding grad_op grad_op_desc, op_grad_to_var = core.get_grad_op_desc( - op.desc, - cpt.to_text(no_grad_dict[block.idx]), grad_sub_block_list) + op.desc, cpt.to_text(no_grad_dict[block.idx]), grad_sub_block_list) grad_op_descs.extend(grad_op_desc) grad_to_var.update(op_grad_to_var) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 6654e33847bc9007f8dd09fefad56dcdf186d223..35c3ab59c222b4050288f78ef5e488eb2ba8d2a7 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -159,8 +159,7 @@ class ParallelExecutor(object): for p in main.global_block().iter_parameters() if not p.stop_gradient ]), - set(cpt.to_text(var) - for var in self.persistable_vars), main.desc, + set(cpt.to_text(var) for var in self.persistable_vars), main.desc, cpt.to_text(loss_name) if loss_name else six.u(''), scope, local_scopes, exec_strategy, build_strategy, num_trainers, trainer_id) @@ -274,8 +273,7 @@ class ParallelExecutor(object): self.executor.feed_tensors_into_local_scopes(res) fetch_var_name = '@FETCHED_VAR_NAME@' - self.executor.run( - cpt.to_text(fetch_list), cpt.to_text(fetch_var_name)) + self.executor.run(cpt.to_text(fetch_list), cpt.to_text(fetch_var_name)) arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array() if self.is_dist: diff --git a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py index 06cb16db6f70680177dc9ed7d9caba377f05a570..293c7841eca7f7873744b87e5f739221045e9f23 100644 --- a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py +++ b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py @@ -259,9 +259,9 @@ class ControlFlowGraph(object): # Rename the var to the cache var already with # memory allocated in order to reuse the memory. _rename_arg_(self._ops, x, cache_var, begin_idx=i) - self._program.block(block_desc.id).var( - cpt.to_text(x)).desc = self._find_var( - block_desc, cache_var, is_forward) + self._program.block(block_desc.id).var(cpt.to_text( + x)).desc = self._find_var(block_desc, cache_var, + is_forward) self._update_graph(x, cache_var, begin_idx=i) break