From 518aca15901de48c424b5a4b53e5e19e6684b369 Mon Sep 17 00:00:00 2001 From: hjchen2 Date: Mon, 17 Dec 2018 14:21:28 +0800 Subject: [PATCH] Fix opencl compile --- src/framework/executor.cpp | 5 ++- src/io/paddle_mobile.cpp | 34 ++++++++++---------- src/operators/kernel/cl/feed_kernel.cpp | 5 +-- src/operators/kernel/cl/fusion_fc_kernel.cpp | 5 +-- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/src/framework/executor.cpp b/src/framework/executor.cpp index 54527a1c61..3641cb963d 100644 --- a/src/framework/executor.cpp +++ b/src/framework/executor.cpp @@ -456,9 +456,8 @@ void Executor::LoadMemory(const VarDesc var_desc, float *tensorInput, char **data) {} template <> -void Executor::LoadMemory(const VarDesc var_desc, - float *tensorInput, - char **data) { +void Executor::LoadMemory(const VarDesc var_desc, + float *tensorInput, char **data) { // 1. version uint32_t version = *reinterpret_cast(*data); diff --git a/src/io/paddle_mobile.cpp b/src/io/paddle_mobile.cpp index 155b0cdd9e..bd27c219ea 100644 --- a/src/io/paddle_mobile.cpp +++ b/src/io/paddle_mobile.cpp @@ -202,50 +202,50 @@ double PaddleMobile::GetPredictTime() { #endif #ifdef PADDLE_MOBILE_FPGA -template -void PaddleMobile::InjectVariable(const framework::Tensor &t, +template +void PaddleMobile::InjectVariable(const framework::Tensor &t, std::string var_name) { executor_->InjectVariable(t, var_name); } -template -void PaddleMobile::FeedData(const framework::Tensor &t) { +template +void PaddleMobile::FeedData(const framework::Tensor &t) { executor_->FeedData(t); } -template -std::shared_ptr PaddleMobile::FetchResult( +template +std::shared_ptr PaddleMobile::FetchResult( int id) { return executor_->FetchResult(id); } -template -void PaddleMobile::Predict_From_To(int start, int end) { +template +void PaddleMobile::Predict_From_To(int start, int end) { executor_->Predict_From_To(start, end); } -template -void PaddleMobile::Predict_From(int start) { +template +void PaddleMobile::Predict_From(int start) { executor_->Predict_From(start); } -template -void PaddleMobile::Predict_To(int end) { +template +void PaddleMobile::Predict_To(int end) { executor_->Predict_To(end); } #endif #ifdef PADDLE_MOBILE_CL static std::mutex lc; -template -void PaddleMobile::SetCLPath(std::string path) { +template +void PaddleMobile::SetCLPath(std::string path) { std::lock_guard lock(lc); if (framework::CLEngine::Instance()->GetCLPath() == "") { framework::CLEngine::Instance()->setClPath(path); } } template <> -double PaddleMobile::GetPredictTime() { +double PaddleMobile::GetPredictTime() { cl_int status; cl_uint nPlatform; clGetPlatformIDs(0, NULL, &nPlatform); @@ -443,8 +443,8 @@ double PaddleMobile::GetPredictTime() { return -1; } } -template -int PaddleMobile::readText( +template +int PaddleMobile::readText( const char *kernelPath, char **pcode) { // 读取文本文件放入 pcode,返回字符串长度 FILE *fp; diff --git a/src/operators/kernel/cl/feed_kernel.cpp b/src/operators/kernel/cl/feed_kernel.cpp index e813d08e6b..3f33a863db 100644 --- a/src/operators/kernel/cl/feed_kernel.cpp +++ b/src/operators/kernel/cl/feed_kernel.cpp @@ -14,6 +14,7 @@ limitations under the License. */ #include "operators/kernel/feed_kernel.h" #include "framework/cl/cl_tensor.h" + namespace paddle_mobile { namespace operators { @@ -43,8 +44,8 @@ void FeedKernel::Compute(const FeedParam ¶m) { const int Stride2 = out_C * out_H * out_W; const int Stride1 = out_H * out_W; const int Stride0 = out_W; - CLTensor input_cl_tensor(this->cl_helper_.CLContext(), - this->cl_helper_.CLCommandQueue()); + framework::CLTensor input_cl_tensor(this->cl_helper_.CLContext(), + this->cl_helper_.CLCommandQueue()); input_cl_tensor.Resize(input->dims()); cl_mem inputBuffer = input_cl_tensor.mutable_with_data(input_data); diff --git a/src/operators/kernel/cl/fusion_fc_kernel.cpp b/src/operators/kernel/cl/fusion_fc_kernel.cpp index 34f36b56bc..a9d6b80608 100644 --- a/src/operators/kernel/cl/fusion_fc_kernel.cpp +++ b/src/operators/kernel/cl/fusion_fc_kernel.cpp @@ -94,8 +94,9 @@ void FusionFcCompute(const FusionFcParam ¶m, cl_context context, memory::Copy(out_data + i * classes, input_z_data, sizeof(float) * classes); } - math::MatMul(x_matrix, false, y_matrix, false, static_cast(1), - out, static_cast(1), false); + math::MatMul(x_matrix, false, y_matrix, false, + static_cast(1), out, static_cast(1), + false); out_image->InitEmptyImage(context, commandQueue, out->dims()); framework::TensorToCLImage(out, out_image, context, commandQueue, kernel1); -- GitLab