提交 ca08360c 编写于 作者: J jiweibo

update nameing conventions. test=develop

上级 281d7c34
...@@ -242,11 +242,11 @@ class CxxPaddleApiImpl : public lite_api::PaddlePredictor { ...@@ -242,11 +242,11 @@ class CxxPaddleApiImpl : public lite_api::PaddlePredictor {
#ifdef LITE_WITH_CUDA #ifdef LITE_WITH_CUDA
// Cuda related environment initialization, including setting stream pointers, // Cuda related environment initialization, including setting stream pointers,
// initializing synchronization events, setting predictor_id, etc. // initializing synchronization events, setting predictor_id, etc.
void CudaEnvInit(std::vector<std::string>* passes); void InitCudaEnv(std::vector<std::string>* passes);
// Due to the asynchronous nature of cuda kernel execution, synchronization is // Due to the asynchronous nature of cuda kernel execution, synchronization is
// required before setting input and getting output. // required before setting input and getting output.
void InputSync(); void SyncInputs();
void OutputSync(); void SyncOutputs();
#endif #endif
private: private:
......
...@@ -43,7 +43,7 @@ void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) { ...@@ -43,7 +43,7 @@ void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) {
// otherwise skip this step. // otherwise skip this step.
for (auto &p : places) { for (auto &p : places) {
if (p.target == TARGET(kCUDA)) { if (p.target == TARGET(kCUDA)) {
CudaEnvInit(&passes); InitCudaEnv(&passes);
break; break;
} }
} }
...@@ -88,7 +88,7 @@ void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) { ...@@ -88,7 +88,7 @@ void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) {
} }
#ifdef LITE_WITH_CUDA #ifdef LITE_WITH_CUDA
void CxxPaddleApiImpl::CudaEnvInit(std::vector<std::string> *passes) { void CxxPaddleApiImpl::InitCudaEnv(std::vector<std::string> *passes) {
Env<TARGET(kCUDA)>::Init(); Env<TARGET(kCUDA)>::Init();
// init two streams for each predictor. // init two streams for each predictor.
...@@ -131,7 +131,7 @@ void CxxPaddleApiImpl::CudaEnvInit(std::vector<std::string> *passes) { ...@@ -131,7 +131,7 @@ void CxxPaddleApiImpl::CudaEnvInit(std::vector<std::string> *passes) {
TargetWrapperCuda::CreateEventWithFlags(&input_event_); TargetWrapperCuda::CreateEventWithFlags(&input_event_);
} }
void CxxPaddleApiImpl::InputSync() { void CxxPaddleApiImpl::SyncInputs() {
TargetWrapperCuda::RecordEvent(input_event_, *io_stream_); TargetWrapperCuda::RecordEvent(input_event_, *io_stream_);
if (multi_stream_) { if (multi_stream_) {
for (int i = 0; i < lite::kMaxStream; ++i) { for (int i = 0; i < lite::kMaxStream; ++i) {
...@@ -142,7 +142,7 @@ void CxxPaddleApiImpl::InputSync() { ...@@ -142,7 +142,7 @@ void CxxPaddleApiImpl::InputSync() {
} }
} }
void CxxPaddleApiImpl::OutputSync() { void CxxPaddleApiImpl::SyncOutputs() {
if (multi_stream_) { if (multi_stream_) {
for (size_t i = 0; i < output_events_.size(); ++i) { for (size_t i = 0; i < output_events_.size(); ++i) {
TargetWrapperCuda::RecordEvent(output_events_[i], *exec_streams_[i]); TargetWrapperCuda::RecordEvent(output_events_[i], *exec_streams_[i]);
...@@ -193,13 +193,13 @@ void CxxPaddleApiImpl::Run() { ...@@ -193,13 +193,13 @@ void CxxPaddleApiImpl::Run() {
lite::DeviceInfo::Global().SetRunMode(mode_, threads_); lite::DeviceInfo::Global().SetRunMode(mode_, threads_);
#endif #endif
#ifdef LITE_WITH_CUDA #ifdef LITE_WITH_CUDA
InputSync(); SyncInputs();
#endif #endif
raw_predictor_->Run(); raw_predictor_->Run();
#ifdef LITE_WITH_CUDA #ifdef LITE_WITH_CUDA
OutputSync(); SyncOutputs();
#endif #endif
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册