diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 51ddb7e58e2505b7df48a9e365854371fcc0a12c..ee0df039acd446bc7952186d0870688f9b2dfb46 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -74,16 +74,6 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope) { for (auto& device_context : device_contexts_) { device_context->Wait(); } - // // print tensor value - // for (auto& var : block.vars()) { - // std::cout << var.name() << std::endl; - // auto v = scope->FindVar(var.name()); - // const LoDTensor& t = v->Get(); - // for (int i = 0; i < t.numel(); ++i) { - // std::cout << t.data()[i] << " "; - // } - // std::cout << std::endl; - // } } } // namespace framework diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index d3ea18d1546059d5e3e601cde5823c7d89e6a226..5e327cc893e3a393b758a06f6a6f33173d0e86db 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -130,6 +130,7 @@ std::once_flag set_variable_flag; template void set_feed_variable(const std::vector>& inputs) { typedef std::vector FeedInputs; + // Tensors in feed value variable will only be in CPUPlace Variable* g_feed_value = GetScope()->FindVar("feed_value"); FeedInputs& feed_inputs = *(g_feed_value->GetMutable()); auto size = inputs.size(); @@ -144,6 +145,7 @@ void set_feed_variable(const std::vector>& inputs) { template std::vector> get_fetch_variable() { typedef std::vector FetchOutputs; + // Tensors in fetch value variable will only be in CPUPlace Variable* g_fetch_value = GetScope()->FindVar("fetch_value"); FetchOutputs& fetch_outputs = *(g_fetch_value->GetMutable()); diff --git a/paddle/framework/scope.cc b/paddle/framework/scope.cc index 2c416570cf62c8a4375293062cfc1e47f1011024..b6a9d7fbc29c58fc986b910f9b510f25e2e42ad9 100644 --- a/paddle/framework/scope.cc +++ b/paddle/framework/scope.cc @@ -66,15 +66,10 @@ void Scope::DropKids() { std::once_flag feed_variable_flag; -template -std::unique_ptr make_unique(Args&&... args) { - return std::unique_ptr(new T(std::forward(args)...)); -} - framework::Scope* GetScope() { - static std::unique_ptr g_scope = - make_unique(); + static std::unique_ptr g_scope{nullptr}; std::call_once(feed_variable_flag, [&]() { + g_scope.reset(new framework::Scope()); g_scope->NewVar("feed_value"); g_scope->NewVar("fetch_value"); }); diff --git a/paddle/operators/feed_op.cc b/paddle/operators/feed_op.cc index d40db3ff2ed8799504a9d6e816d73adb63abf2e6..f2c498e2e2ccb26e29e9d1364ec0aca4fbe105f7 100644 --- a/paddle/operators/feed_op.cc +++ b/paddle/operators/feed_op.cc @@ -33,7 +33,7 @@ class FeedOp : public framework::OperatorWithKernel { auto in_dim = tensors[col].dims(); ctx->SetOutputDim("Out", in_dim); - // TODO(qijun) need to handle LodTensor later + // TODO(qijun): need to handle LodTensor later } framework::DataType IndicateDataType( diff --git a/paddle/operators/fetch_op.cc b/paddle/operators/fetch_op.cc index a885deacc84379389302d1420303ca8b608c9455..f6882cbd03462942f67b2ec8e32256d35991b6b2 100644 --- a/paddle/operators/fetch_op.cc +++ b/paddle/operators/fetch_op.cc @@ -39,7 +39,7 @@ class FetchOp : public framework::OperatorWithKernel { tmp.Resize(input_dim); (*tensors)[col].Resize(input_dim); - // TODO(qijun) need to handle LodTensor later + // TODO(qijun): need to handle LodTensor later } framework::DataType IndicateDataType(