diff --git a/src/io/api_paddle_mobile.cc b/src/io/api_paddle_mobile.cc index 144cf127a44c78279ca1d95815646a4f01fed6bd..c5da3993d18d6c21c46c923e99609b4c290fb668 100644 --- a/src/io/api_paddle_mobile.cc +++ b/src/io/api_paddle_mobile.cc @@ -52,6 +52,10 @@ bool PaddleMobilePredictor::Init(const PaddleMobileConfig &config) { paddle_mobile_->SetThreadNum(config.thread_num); return true; } +template +double PaddleMobilePredictor::CaculatePredictTime() { + return paddle_mobile_->GetPredictTime(); +}; template bool PaddleMobilePredictor::Run( diff --git a/src/io/api_paddle_mobile.h b/src/io/api_paddle_mobile.h index bdeb7e18653843ec9547f027068768532ba04fb2..d8e5f856c6bae870f89d6957aafa97c34bfad5dd 100644 --- a/src/io/api_paddle_mobile.h +++ b/src/io/api_paddle_mobile.h @@ -40,6 +40,8 @@ class PaddleMobilePredictor : public PaddlePredictor { std::vector* output_data, int batch_size = -1) override; + double CaculatePredictTime() override; + ~PaddleMobilePredictor() override; private: diff --git a/src/io/paddle_inference_api.h b/src/io/paddle_inference_api.h index 3c9ffa00c7e749d1c9d77562b2db0b42ee605164..33a166f2c5cb9d668a411db1a03e1a766b3cfe9d 100644 --- a/src/io/paddle_inference_api.h +++ b/src/io/paddle_inference_api.h @@ -98,7 +98,7 @@ class PaddlePredictor { virtual bool Run(const std::vector& inputs, std::vector* output_data, int batch_size = -1) = 0; - + virtual double CaculatePredictTime() = 0; // Destroy the Predictor. virtual ~PaddlePredictor() = default; diff --git a/src/io/paddle_mobile.cpp b/src/io/paddle_mobile.cpp index cd49532045ca839e0d5b120f5b424473dede6ab9..fca870860ec1156aa7d3d8503951cfb8a2e84821 100644 --- a/src/io/paddle_mobile.cpp +++ b/src/io/paddle_mobile.cpp @@ -13,9 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "io/paddle_mobile.h" +#ifdef PADDLE_MOBILE_CL #include -#include "common/common.h" #include "framework/cl/cl_tensor.h" +#endif +#include "common/common.h" #include "operators/math/gemm.h" namespace paddle_mobile { @@ -123,7 +125,7 @@ void PaddleMobile::Clear() { } template -double PaddleMobile::GetCPUPredictTime() { +double PaddleMobile::GetPredictTime() { int m = 32; int n = 224 * 224; int k = 27; @@ -204,8 +206,8 @@ void PaddleMobile::SetCLPath(std::string path) { framework::CLEngine::Instance()->setClPath(path); } } -template -double PaddleMobile::GetGPUPredictTime() { +template <> +double PaddleMobile::GetPredictTime() { cl_int status; cl_uint nPlatform; clGetPlatformIDs(0, NULL, &nPlatform); diff --git a/src/io/paddle_mobile.h b/src/io/paddle_mobile.h index 5f058ebf285026326ca3fb3caaa83e26013b98b2..ab148e7361c160bc658403d4696b806323595c54 100644 --- a/src/io/paddle_mobile.h +++ b/src/io/paddle_mobile.h @@ -65,7 +65,7 @@ class PaddleMobile { void SetThreadNum(int num); void Clear(); - double GetCPUPredictTime(); + double GetPredictTime(); ~PaddleMobile(); @@ -81,7 +81,6 @@ class PaddleMobile { #ifdef PADDLE_MOBILE_CL public: void SetCLPath(std::string cl_path); - double GetGPUPredictTime(); int readText(const char *kernelPath, char **pcode); // 读取文本文件放入 pcode,返回字符串长度 #endif diff --git a/test/net/test_yologpu.cpp b/test/net/test_yologpu.cpp index d0797de908f5167eb4ba22f23acda9a000abb8dd..e77861cabad8baca7bfe5bf673ba9b01af97498d 100644 --- a/test/net/test_yologpu.cpp +++ b/test/net/test_yologpu.cpp @@ -18,16 +18,17 @@ limitations under the License. */ #include "../test_helper.h" #include "../test_include.h" void t1() { - paddle_mobile::PaddleMobile paddle_mobile; + paddle_mobile::PaddleMobile paddle_mobile_gpu; + paddle_mobile::PaddleMobile paddle_mobile_cpu; // paddle_mobile.SetThreadNum(4); #ifdef PADDLE_MOBILE_CL - paddle_mobile.SetCLPath("/data/local/tmp/bin"); + paddle_mobile_gpu.SetCLPath("/data/local/tmp/bin"); #endif - printf("cpu time:%f\n", paddle_mobile.GetCPUPredictTime()); - printf("gpu time:%f\n", paddle_mobile.GetGPUPredictTime()); + printf("cpu time:%f\n", paddle_mobile_cpu.GetPredictTime()); + printf("gpu time:%f\n", paddle_mobile_gpu.GetPredictTime()); auto time1 = paddle_mobile::time(); - auto isok = paddle_mobile.Load(std::string(g_yolo_mul) + "/model", - std::string(g_yolo_mul) + "/params", true); + auto isok = paddle_mobile_gpu.Load(std::string(g_yolo_mul) + "/model", + std::string(g_yolo_mul) + "/params", true); // auto isok = paddle_mobile.Load(std::string(g_yolo_mul), true); if (isok) { @@ -45,7 +46,7 @@ void t1() { auto time3 = paddle_mobile::time(); int max = 10; for (int i = 0; i < max; ++i) { - vec_result = paddle_mobile.Predict(input, dims); + vec_result = paddle_mobile_gpu.Predict(input, dims); } auto time4 = paddle_mobile::time(); @@ -173,10 +174,12 @@ void t3() { int main() { // std::thread th1(t1); - // std::thread th2(t2); - std::thread th1(t1); + // std::thread th2(t2); + std::thread th3(t3); + // std::thread th1(t1); + // th1.join(); + // th2.join(); + th3.join(); // th1.join(); - // th2.join(); - th1.join(); return 0; }