提交 c04adf19 编写于 作者: Y yangfei

add cpu and gpu predict function

上级 70966b6f
...@@ -52,11 +52,6 @@ bool PaddleMobilePredictor<Dtype, P>::Init(const PaddleMobileConfig &config) { ...@@ -52,11 +52,6 @@ bool PaddleMobilePredictor<Dtype, P>::Init(const PaddleMobileConfig &config) {
paddle_mobile_->SetThreadNum(config.thread_num); paddle_mobile_->SetThreadNum(config.thread_num);
return true; return true;
} }
template <typename Dtype, Precision P>
double PaddleMobilePredictor<Dtype, P>::CaculatePredictTime() {
return paddle_mobile_->GetPredictTime();
};
template <typename Dtype, Precision P> template <typename Dtype, Precision P>
bool PaddleMobilePredictor<Dtype, P>::Run( bool PaddleMobilePredictor<Dtype, P>::Run(
const std::vector<PaddleTensor> &inputs, const std::vector<PaddleTensor> &inputs,
......
...@@ -40,8 +40,6 @@ class PaddleMobilePredictor : public PaddlePredictor { ...@@ -40,8 +40,6 @@ class PaddleMobilePredictor : public PaddlePredictor {
std::vector<PaddleTensor>* output_data, std::vector<PaddleTensor>* output_data,
int batch_size = -1) override; int batch_size = -1) override;
double CaculatePredictTime() override;
~PaddleMobilePredictor() override; ~PaddleMobilePredictor() override;
private: private:
......
...@@ -98,7 +98,6 @@ class PaddlePredictor { ...@@ -98,7 +98,6 @@ class PaddlePredictor {
virtual bool Run(const std::vector<PaddleTensor>& inputs, virtual bool Run(const std::vector<PaddleTensor>& inputs,
std::vector<PaddleTensor>* output_data, std::vector<PaddleTensor>* output_data,
int batch_size = -1) = 0; int batch_size = -1) = 0;
virtual double CaculatePredictTime() = 0;
// Destroy the Predictor. // Destroy the Predictor.
virtual ~PaddlePredictor() = default; virtual ~PaddlePredictor() = default;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io/paddle_test_inference_api.h"
namespace paddle_mobile {
template <typename Dtype, Precision P>
double PaddleTester<Dtype, P>::CaculatePredictTime(std::string *cl_path) {
PaddleMobile<Dtype, P> paddle_mobile;
#ifdef PADDLE_MOBILE_CL
if (cl_path) {
paddle_mobile.SetCLPath(*cl_path);
}
#endif
return paddle_mobile.GetPredictTime();
}
template class PaddleTester<CPU, Precision::FP32>;
template class PaddleTester<FPGA, Precision::FP32>;
template class PaddleTester<GPU_MALI, Precision::FP32>;
template class PaddleTester<GPU_CL, Precision::FP32>;
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/*
* This file contains the definition of a simple Inference API for Paddle.
*
* ATTENTION: It requires some C++ features, for lower version C++ or C, we
* might release another API.
*/
#pragma once
#include "common/types.h"
#include "io/paddle_mobile.h"
#include "string"
namespace paddle_mobile {
template <typename Dtype, Precision P = Precision::FP32>
class PaddleTester {
public:
double CaculatePredictTime(std::string *cl_path = nullptr);
};
} // namespace paddle_mobile
...@@ -15,17 +15,21 @@ limitations under the License. */ ...@@ -15,17 +15,21 @@ limitations under the License. */
#include <iostream> #include <iostream>
#include <thread> #include <thread>
#include "../../src/common/types.h" #include "../../src/common/types.h"
#include "../../src/io/paddle_test_inference_api.h"
#include "../test_helper.h" #include "../test_helper.h"
#include "../test_include.h" #include "../test_include.h"
void t1() { void t1() {
paddle_mobile::PaddleMobile<paddle_mobile::GPU_CL> paddle_mobile_gpu; paddle_mobile::PaddleMobile<paddle_mobile::GPU_CL> paddle_mobile_gpu;
paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile_cpu; paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile_cpu;
paddle_mobile::PaddleTester<paddle_mobile::CPU> paddle_test_cpu;
paddle_mobile::PaddleTester<paddle_mobile::GPU_CL> paddle_test_gpu;
printf("cpu time:%f\n", paddle_test_cpu.CaculatePredictTime());
std::string path = "/data/local/tmp/bin";
printf("gpu time:%f\n", paddle_test_gpu.CaculatePredictTime(&path));
// paddle_mobile.SetThreadNum(4); // paddle_mobile.SetThreadNum(4);
#ifdef PADDLE_MOBILE_CL #ifdef PADDLE_MOBILE_CL
paddle_mobile_gpu.SetCLPath("/data/local/tmp/bin"); paddle_mobile_gpu.SetCLPath("/data/local/tmp/bin");
#endif #endif
printf("cpu time:%f\n", paddle_mobile_cpu.GetPredictTime());
printf("gpu time:%f\n", paddle_mobile_gpu.GetPredictTime());
auto time1 = paddle_mobile::time(); auto time1 = paddle_mobile::time();
auto isok = paddle_mobile_gpu.Load(std::string(g_yolo_mul) + "/model", auto isok = paddle_mobile_gpu.Load(std::string(g_yolo_mul) + "/model",
std::string(g_yolo_mul) + "/params", true); std::string(g_yolo_mul) + "/params", true);
...@@ -175,11 +179,11 @@ void t3() { ...@@ -175,11 +179,11 @@ void t3() {
int main() { int main() {
// std::thread th1(t1); // std::thread th1(t1);
// std::thread th2(t2); // std::thread th2(t2);
std::thread th3(t3); // std::thread th3(t3);
// std::thread th1(t1); std::thread th1(t1);
// th1.join(); // th1.join();
// th2.join(); // th2.join();
th3.join(); // th3.join();
// th1.join(); th1.join();
return 0; return 0;
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册