未验证 提交 47d6bc86 编写于 作者: T Tomasz Socha 提交者: GitHub

Fix copy elision warning (#35885)

* Fix copy elision warning

* Remove redundand code
上级 736a7388
......@@ -215,7 +215,7 @@ class GlooWrapper {
#else
LOG(WARNING) << "AllGather does nothing when WITH_GLOO=OFF";
#endif
return std::move(ret);
return ret;
}
protected:
......
......@@ -88,7 +88,7 @@ std::unique_ptr<ir::Graph> PrepareGraph(
nullptr, false, false, activation, gate_activation);
std::unique_ptr<ir::Graph> graph(new ir::Graph(layers.main_program()));
return std::move(graph);
return graph;
}
} // namespace fc_gru_test
} // namespace ir
......
......@@ -91,7 +91,7 @@ std::unique_ptr<ir::Graph> PrepareGraph(
false, gate_activation, cell_activation, candidate_activation);
std::unique_ptr<ir::Graph> graph(new ir::Graph(layers.main_program()));
return std::move(graph);
return graph;
}
} // namespace fc_lstm_test
......
......@@ -25,7 +25,7 @@ AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicDeps(
working_dependecy_count[i] =
std::make_unique<std::atomic<size_t>>(dependecy_count[i]);
}
return std::move(working_dependecy_count);
return working_dependecy_count;
}
AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicVarRef(
......@@ -36,7 +36,7 @@ AtomicVectorSizeT AsyncWorkQueue::PrepareAtomicVarRef(
working_var_ref[i] =
std::make_unique<std::atomic<size_t>>(vec_meta_info[i].var_ref_count_);
}
return std::move(working_var_ref);
return working_var_ref;
}
bool var_can_be_deleted(const std::string& name, const BlockDesc& block) {
......
......@@ -147,7 +147,7 @@ std::unique_ptr<WorkQueue> CreateSingleThreadedWorkQueue(
"For a SingleThreadedWorkQueue, "
"WorkQueueOptions.num_threads must equals to 1."));
std::unique_ptr<WorkQueue> ptr(new WorkQueueImpl(options));
return std::move(ptr);
return ptr;
}
std::unique_ptr<WorkQueue> CreateMultiThreadedWorkQueue(
......@@ -158,7 +158,7 @@ std::unique_ptr<WorkQueue> CreateMultiThreadedWorkQueue(
"WorkQueueOptions.num_threads must be "
"greater than 1."));
std::unique_ptr<WorkQueue> ptr(new WorkQueueImpl(options));
return std::move(ptr);
return ptr;
}
std::unique_ptr<WorkQueueGroup> CreateWorkQueueGroup(
......@@ -168,7 +168,7 @@ std::unique_ptr<WorkQueueGroup> CreateWorkQueueGroup(
"For a WorkQueueGroup, the number of WorkQueueOptions "
"must be greater than 1."));
std::unique_ptr<WorkQueueGroup> ptr(new WorkQueueGroupImpl(queues_options));
return std::move(ptr);
return ptr;
}
} // namespace framework
......
......@@ -192,14 +192,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
LOG(ERROR) << "fail to call Init";
return nullptr;
}
#ifdef __clang__
// fix clang compile error
return cls;
#else
// fix manylinux compile error.
return std::move(cls);
#endif
}
bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
......@@ -390,12 +383,7 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
return nullptr;
}
#ifdef __clang__
// fix clang compile error
return predictor;
#else
return std::move(predictor);
#endif
}
template <>
......
......@@ -59,7 +59,7 @@ std::string GetIPCName() {
#endif
handle += "_";
handle += std::to_string(rd());
return std::move(handle);
return handle;
}
std::shared_ptr<MemoryMapWriterAllocation> AllocateMemoryMapWriterAllocation(
......
......@@ -324,7 +324,7 @@ void BindInferenceApi(py::module *m) {
auto pred =
std::unique_ptr<paddle_infer::Predictor>(
new paddle_infer::Predictor(config));
return std::move(pred);
return pred;
});
m->def("copy_tensor", &CopyPaddleInferTensor);
m->def("paddle_dtype_size", &paddle::PaddleDtypeSize);
......
......@@ -53,7 +53,7 @@ template <class... ARGS>
std::string format_string(const char* fmt, ARGS&&... args) {
std::string str;
format_string_append(str, fmt, args...);
return std::move(str);
return str;
}
template <class... ARGS>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册