diff --git a/src/io/api_paddle_mobile.cc b/src/io/api_paddle_mobile.cc index c5da3993d18d6c21c46c923e99609b4c290fb668..8088f0b8c9f600ce2422af500ab66a68e1341fc8 100644 --- a/src/io/api_paddle_mobile.cc +++ b/src/io/api_paddle_mobile.cc @@ -52,11 +52,6 @@ bool PaddleMobilePredictor::Init(const PaddleMobileConfig &config) { paddle_mobile_->SetThreadNum(config.thread_num); return true; } -template -double PaddleMobilePredictor::CaculatePredictTime() { - return paddle_mobile_->GetPredictTime(); -}; - template bool PaddleMobilePredictor::Run( const std::vector &inputs, diff --git a/src/io/api_paddle_mobile.h b/src/io/api_paddle_mobile.h index d8e5f856c6bae870f89d6957aafa97c34bfad5dd..bdeb7e18653843ec9547f027068768532ba04fb2 100644 --- a/src/io/api_paddle_mobile.h +++ b/src/io/api_paddle_mobile.h @@ -40,8 +40,6 @@ class PaddleMobilePredictor : public PaddlePredictor { std::vector* output_data, int batch_size = -1) override; - double CaculatePredictTime() override; - ~PaddleMobilePredictor() override; private: diff --git a/src/io/paddle_inference_api.h b/src/io/paddle_inference_api.h index 33a166f2c5cb9d668a411db1a03e1a766b3cfe9d..5326f864a4b5238c8498ee1fe9e5810ca0a657cf 100644 --- a/src/io/paddle_inference_api.h +++ b/src/io/paddle_inference_api.h @@ -98,7 +98,6 @@ class PaddlePredictor { virtual bool Run(const std::vector& inputs, std::vector* output_data, int batch_size = -1) = 0; - virtual double CaculatePredictTime() = 0; // Destroy the Predictor. virtual ~PaddlePredictor() = default; diff --git a/src/io/paddle_test_inference_api.cpp b/src/io/paddle_test_inference_api.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d69856653ab00ab498271b13c249d645eff6dfb --- /dev/null +++ b/src/io/paddle_test_inference_api.cpp @@ -0,0 +1,34 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "io/paddle_test_inference_api.h" +namespace paddle_mobile { +template +double PaddleTester::CaculatePredictTime(std::string *cl_path) { + PaddleMobile paddle_mobile; +#ifdef PADDLE_MOBILE_CL + if (cl_path) { + paddle_mobile.SetCLPath(*cl_path); + } + +#endif + return paddle_mobile.GetPredictTime(); +} +template class PaddleTester; +template class PaddleTester; +template class PaddleTester; + +template class PaddleTester; + +} // namespace paddle_mobile diff --git a/src/io/paddle_test_inference_api.h b/src/io/paddle_test_inference_api.h new file mode 100644 index 0000000000000000000000000000000000000000..528647aae06433c8ddc2db3ca2c720fa358081d0 --- /dev/null +++ b/src/io/paddle_test_inference_api.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains the definition of a simple Inference API for Paddle. + * + * ATTENTION: It requires some C++ features, for lower version C++ or C, we + * might release another API. + */ + +#pragma once +#include "common/types.h" +#include "io/paddle_mobile.h" +#include "string" + +namespace paddle_mobile { +template +class PaddleTester { + public: + double CaculatePredictTime(std::string *cl_path = nullptr); +}; + +} // namespace paddle_mobile diff --git a/test/net/test_yologpu.cpp b/test/net/test_yologpu.cpp index e77861cabad8baca7bfe5bf673ba9b01af97498d..0215ded59e5f74f0c103d4b51abe06b487bd50ab 100644 --- a/test/net/test_yologpu.cpp +++ b/test/net/test_yologpu.cpp @@ -15,17 +15,21 @@ limitations under the License. */ #include #include #include "../../src/common/types.h" +#include "../../src/io/paddle_test_inference_api.h" #include "../test_helper.h" #include "../test_include.h" void t1() { paddle_mobile::PaddleMobile paddle_mobile_gpu; paddle_mobile::PaddleMobile paddle_mobile_cpu; + paddle_mobile::PaddleTester paddle_test_cpu; + paddle_mobile::PaddleTester paddle_test_gpu; + printf("cpu time:%f\n", paddle_test_cpu.CaculatePredictTime()); + std::string path = "/data/local/tmp/bin"; + printf("gpu time:%f\n", paddle_test_gpu.CaculatePredictTime(&path)); // paddle_mobile.SetThreadNum(4); #ifdef PADDLE_MOBILE_CL paddle_mobile_gpu.SetCLPath("/data/local/tmp/bin"); #endif - printf("cpu time:%f\n", paddle_mobile_cpu.GetPredictTime()); - printf("gpu time:%f\n", paddle_mobile_gpu.GetPredictTime()); auto time1 = paddle_mobile::time(); auto isok = paddle_mobile_gpu.Load(std::string(g_yolo_mul) + "/model", std::string(g_yolo_mul) + "/params", true); @@ -175,11 +179,11 @@ void t3() { int main() { // std::thread th1(t1); // std::thread th2(t2); - std::thread th3(t3); - // std::thread th1(t1); + // std::thread th3(t3); + std::thread th1(t1); // th1.join(); // th2.join(); - th3.join(); - // th1.join(); + // th3.join(); + th1.join(); return 0; }