未验证 提交 47d6bc86 编写于 作者: T Tomasz Socha 提交者: GitHub

Fix copy elision warning (#35885)

* Fix copy elision warning

* Remove redundand code
上级 736a7388
...@@ -215,7 +215,7 @@ class GlooWrapper { ...@@ -215,7 +215,7 @@ class GlooWrapper {
#else #else
LOG(WARNING) << "AllGather does nothing when WITH_GLOO=OFF"; LOG(WARNING) << "AllGather does nothing when WITH_GLOO=OFF";
#endif #endif
return std::move(ret); return ret;
} }
protected: protected:
......
...@@ -88,7 +88,7 @@ std::unique_ptr<ir::Graph> PrepareGraph( ...@@ -88,7 +88,7 @@ std::unique_ptr<ir::Graph> PrepareGraph(
nullptr, false, false, activation, gate_activation); nullptr, false, false, activation, gate_activation);
std::unique_ptr<ir::Graph> graph(new ir::Graph(layers.main_program())); std::unique_ptr<ir::Graph> graph(new ir::Graph(layers.main_program()));
return std::move(graph); return graph;
} }
} // namespace fc_gru_test } // namespace fc_gru_test
} // namespace ir } // namespace ir
......
...@@ -91,7 +91,7 @@ std::unique_ptr<ir::Graph> PrepareGraph( ...@@ -91,7 +91,7 @@ std::unique_ptr<ir::Graph> PrepareGraph(
false, gate_activation, cell_activation, candidate_activation); false, gate_activation, cell_activation, candidate_activation);
std::unique_ptr<ir::Graph> graph(new ir::Graph(layers.main_program())); std::unique_ptr<ir::Graph> graph(new ir::Graph(layers.main_program()));
return std::move(graph); return graph;
} }
} // namespace fc_lstm_test } // namespace fc_lstm_test
......
...@@ -25,7 +25,7 @@ AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicDeps( ...@@ -25,7 +25,7 @@ AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicDeps(
working_dependecy_count[i] = working_dependecy_count[i] =
std::make_unique<std::atomic<size_t>>(dependecy_count[i]); std::make_unique<std::atomic<size_t>>(dependecy_count[i]);
} }
return std::move(working_dependecy_count); return working_dependecy_count;
} }
AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicVarRef( AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicVarRef(
...@@ -36,7 +36,7 @@ AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicVarRef( ...@@ -36,7 +36,7 @@ AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicVarRef(
working_var_ref[i] = working_var_ref[i] =
std::make_unique<std::atomic<size_t>>(vec_meta_info[i].var_ref_count_); std::make_unique<std::atomic<size_t>>(vec_meta_info[i].var_ref_count_);
} }
return std::move(working_var_ref); return working_var_ref;
} }
bool var_can_be_deleted(const std::string& name, const BlockDesc& block) { bool var_can_be_deleted(const std::string& name, const BlockDesc& block) {
......
...@@ -147,7 +147,7 @@ std::unique_ptr<WorkQueue> CreateSingleThreadedWorkQueue( ...@@ -147,7 +147,7 @@ std::unique_ptr<WorkQueue> CreateSingleThreadedWorkQueue(
"For a SingleThreadedWorkQueue, " "For a SingleThreadedWorkQueue, "
"WorkQueueOptions.num_threads must equals to 1.")); "WorkQueueOptions.num_threads must equals to 1."));
std::unique_ptr<WorkQueue> ptr(new WorkQueueImpl(options)); std::unique_ptr<WorkQueue> ptr(new WorkQueueImpl(options));
return std::move(ptr); return ptr;
} }
std::unique_ptr<WorkQueue> CreateMultiThreadedWorkQueue( std::unique_ptr<WorkQueue> CreateMultiThreadedWorkQueue(
...@@ -158,7 +158,7 @@ std::unique_ptr<WorkQueue> CreateMultiThreadedWorkQueue( ...@@ -158,7 +158,7 @@ std::unique_ptr<WorkQueue> CreateMultiThreadedWorkQueue(
"WorkQueueOptions.num_threads must be " "WorkQueueOptions.num_threads must be "
"greater than 1.")); "greater than 1."));
std::unique_ptr<WorkQueue> ptr(new WorkQueueImpl(options)); std::unique_ptr<WorkQueue> ptr(new WorkQueueImpl(options));
return std::move(ptr); return ptr;
} }
std::unique_ptr<WorkQueueGroup> CreateWorkQueueGroup( std::unique_ptr<WorkQueueGroup> CreateWorkQueueGroup(
...@@ -168,7 +168,7 @@ std::unique_ptr<WorkQueueGroup> CreateWorkQueueGroup( ...@@ -168,7 +168,7 @@ std::unique_ptr<WorkQueueGroup> CreateWorkQueueGroup(
"For a WorkQueueGroup, the number of WorkQueueOptions " "For a WorkQueueGroup, the number of WorkQueueOptions "
"must be greater than 1.")); "must be greater than 1."));
std::unique_ptr<WorkQueueGroup> ptr(new WorkQueueGroupImpl(queues_options)); std::unique_ptr<WorkQueueGroup> ptr(new WorkQueueGroupImpl(queues_options));
return std::move(ptr); return ptr;
} }
} // namespace framework } // namespace framework
......
...@@ -192,14 +192,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() { ...@@ -192,14 +192,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
LOG(ERROR) << "fail to call Init"; LOG(ERROR) << "fail to call Init";
return nullptr; return nullptr;
} }
#ifdef __clang__
// fix clang compile error
return cls; return cls;
#else
// fix manylinux compile error.
return std::move(cls);
#endif
} }
bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs, bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
...@@ -390,12 +383,7 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor< ...@@ -390,12 +383,7 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) { if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
return nullptr; return nullptr;
} }
#ifdef __clang__
// fix clang compile error
return predictor; return predictor;
#else
return std::move(predictor);
#endif
} }
template <> template <>
......
...@@ -59,7 +59,7 @@ std::string GetIPCName() { ...@@ -59,7 +59,7 @@ std::string GetIPCName() {
#endif #endif
handle += "_"; handle += "_";
handle += std::to_string(rd()); handle += std::to_string(rd());
return std::move(handle); return handle;
} }
std::shared_ptr<MemoryMapWriterAllocation> AllocateMemoryMapWriterAllocation( std::shared_ptr<MemoryMapWriterAllocation> AllocateMemoryMapWriterAllocation(
......
...@@ -324,7 +324,7 @@ void BindInferenceApi(py::module *m) { ...@@ -324,7 +324,7 @@ void BindInferenceApi(py::module *m) {
auto pred = auto pred =
std::unique_ptr<paddle_infer::Predictor>( std::unique_ptr<paddle_infer::Predictor>(
new paddle_infer::Predictor(config)); new paddle_infer::Predictor(config));
return std::move(pred); return pred;
}); });
m->def("copy_tensor", &CopyPaddleInferTensor); m->def("copy_tensor", &CopyPaddleInferTensor);
m->def("paddle_dtype_size", &paddle::PaddleDtypeSize); m->def("paddle_dtype_size", &paddle::PaddleDtypeSize);
......
...@@ -53,7 +53,7 @@ template <class... ARGS> ...@@ -53,7 +53,7 @@ template <class... ARGS>
std::string format_string(const char* fmt, ARGS&&... args) { std::string format_string(const char* fmt, ARGS&&... args) {
std::string str; std::string str;
format_string_append(str, fmt, args...); format_string_append(str, fmt, args...);
return std::move(str); return str;
} }
template <class... ARGS> template <class... ARGS>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册