From 8147063539dcaf137152a413f22e0f12fc5c3386 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 25 May 2018 11:26:00 +0800 Subject: [PATCH] follow comments --- .../reader/create_custom_reader_op.cc | 25 ++++++++----------- python/paddle/fluid/layers/io.py | 15 +++++------ 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/operators/reader/create_custom_reader_op.cc b/paddle/fluid/operators/reader/create_custom_reader_op.cc index 2bf3230db2..4ecbf8ed4f 100644 --- a/paddle/fluid/operators/reader/create_custom_reader_op.cc +++ b/paddle/fluid/operators/reader/create_custom_reader_op.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -148,35 +149,31 @@ void CustomReader::ReadNext(std::vector* out) { // There is not next data. return; } - PADDLE_ENFORCE( - source_var_names_.size() == underlying_outs.size() && - sink_var_names_.size() == underlying_outs.size(), - "The size of source_var_names(%d), the size of sink_var_names(%d) and " - "the size of underlying_outs(%d) are not consistent. Each feeding " - "element must have its own source and sink variable.", - source_var_names_.size(), sink_var_names_.size(), underlying_outs.size()); + PADDLE_ENFORCE(source_var_names_.size() == underlying_outs.size(), + "The size of source_var_names(%d) and the size of " + "underlying_outs(%d) are not consistent. Each feeding element " + "must have its own source variable.", + source_var_names_.size(), underlying_outs.size()); // The scope for CustomReader's sub-block should be independent and shouldn't // be any other computation scope's child. Otherwise, data preprocessing and // compution cannot be concurrent. - auto* scope = new framework::Scope(); + framework::Scope scope; // 1. Copy LoDTensors from underlying reader's output to source variables. for (size_t i = 0; i < source_var_names_.size(); ++i) { - framework::Variable* var = scope->Var(source_var_names_[i]); + framework::Variable* var = scope.Var(source_var_names_[i]); framework::LoDTensor* tensor = var->GetMutable(); tensor->ShareDataWith(underlying_outs[i]); tensor->set_lod(underlying_outs[i].lod()); } // 2. Run the sub-block. - exe_.Run(program_, scope, sub_block_id_, false, true); + exe_.Run(program_, &scope, sub_block_id_, false, true); // 3. Copy LoDTensors from sink variables to out. out->resize(sink_var_names_.size()); for (size_t i = 0; i < sink_var_names_.size(); ++i) { - framework::Variable* var = scope->FindVar(sink_var_names_[i]); - PADDLE_ENFORCE_NOT_NULL(var); - const framework::LoDTensor& tensor = var->Get(); + const auto& tensor = detail::Ref(scope.FindVar(sink_var_names_[i])) + .Get(); framework::TensorCopySync(tensor, platform::CPUPlace(), &(*out)[i]); } - delete scope; } } // namespace reader diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index b48bfc9ece..07ee18eb53 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -559,15 +559,16 @@ class Preprocessor(object): source_shapes = self.underlying_reader.desc.shapes() source_dtypes = self.underlying_reader.desc.dtypes() source_lod_levels = self.underlying_reader.desc.lod_levels() - self.source_var_names = [] + self.source_var_names = [ + unique_name("preprocessor_source") + for _ in xrange(len(source_shapes)) + ] source_vars = [] - for idx in xrange(len(source_shapes)): - self.source_var_names.append(unique_name("preprocessor_source")) + for var_name, shape, dtype, lod_level in zip( + self.source_var_names, source_shapes, source_dtypes, + source_lod_levels): source_vars.append(self.main_prog.current_block().create_var( - name=self.source_var_names[-1], - shape=source_shapes[idx], - dtype=source_dtypes[idx], - lod_level=source_lod_levels[idx])) + name=var_name, shape=shape, dtype=dtype, lod_level=lod_level)) return source_vars def outputs(self, *outs): -- GitLab