提交 dded92f9 编写于 作者: J Jiaying Zhao 提交者: GitHub

[mobile]Add multi input interface in inference api. (#2634)

* [mobile]Add multi input interface in inference api.

* [Mobile]Open switch of mobile/test/CMakeLists.txt to build all test.

* [Mobile]Fix ci compile error.

* [Mobile]Change test_inference_api_v2 to test multi input and output.
上级 810eb089
...@@ -262,6 +262,37 @@ void PaddleMobilePredictor<Device, T>::Predict_From_To(int start, int end) { ...@@ -262,6 +262,37 @@ void PaddleMobilePredictor<Device, T>::Predict_From_To(int start, int end) {
paddle_mobile_->Predict_From_To(start, end); paddle_mobile_->Predict_From_To(start, end);
} }
#else
template <typename Device, typename T>
void PaddleMobilePredictor<Device, T>::Feed(const std::string &var_name,
const PaddleTensor &input) {
framework::DDim ddim = framework::make_ddim(input.shape);
framework::Tensor input_tensor(static_cast<T *>(input.data.data()), ddim);
paddle_mobile_->Feed(var_name, input_tensor);
}
template <typename Device, typename T>
void PaddleMobilePredictor<Device, T>::Fetch(const std::string &var_name,
PaddleTensor *output) {
auto output_tensor = paddle_mobile_->Fetch(var_name);
auto ddim = output_tensor->dims();
output->shape.clear();
for (int i = 0; i < ddim.size(); i++) {
output->shape.push_back(static_cast<int>(ddim[i]));
}
int length = output_tensor->numel() * sizeof(T);
if (output->data.length() < length) {
output->data.Resize(length);
}
memcpy(output->data.data(), output_tensor->template data<T>(), length);
}
template <typename Device, typename T>
bool PaddleMobilePredictor<Device, T>::Run() {
paddle_mobile_->Predict();
}
#endif #endif
template <typename Device, typename T> template <typename Device, typename T>
PaddleMobilePredictor<Device, T>::~PaddleMobilePredictor() { PaddleMobilePredictor<Device, T>::~PaddleMobilePredictor() {
......
...@@ -39,7 +39,10 @@ class PaddleMobilePredictor : public PaddlePredictor { ...@@ -39,7 +39,10 @@ class PaddleMobilePredictor : public PaddlePredictor {
void FetchPaddleTensors(std::vector<PaddleTensor>* outputs) override; void FetchPaddleTensors(std::vector<PaddleTensor>* outputs) override;
void FetchPaddleTensors(PaddleTensor* outputs, int id) override; void FetchPaddleTensors(PaddleTensor* outputs, int id) override;
void GetPaddleTensor(const std::string& name, PaddleTensor* output) override; void GetPaddleTensor(const std::string& name, PaddleTensor* output) override;
#else
void Feed(const std::string& var_name, const PaddleTensor& input);
void Fetch(const std::string& var_name, PaddleTensor* output);
bool Run();
#endif #endif
~PaddleMobilePredictor() override; ~PaddleMobilePredictor() override;
......
...@@ -191,6 +191,10 @@ class PaddlePredictor { ...@@ -191,6 +191,10 @@ class PaddlePredictor {
virtual void FetchPaddleTensors(PaddleTensor* outputs, int id) = 0; virtual void FetchPaddleTensors(PaddleTensor* outputs, int id) = 0;
virtual void GetPaddleTensor(const std::string& name, virtual void GetPaddleTensor(const std::string& name,
PaddleTensor* output) = 0; PaddleTensor* output) = 0;
#else
virtual void Feed(const std::string& var_name, const PaddleTensor& input) = 0;
virtual void Fetch(const std::string& var_name, PaddleTensor* output) = 0;
virtual bool Run() = 0;
#endif #endif
protected: protected:
......
...@@ -542,6 +542,9 @@ if (ENABLE_ALL_TEST) ...@@ -542,6 +542,9 @@ if (ENABLE_ALL_TEST)
# gen test # gen test
ADD_EXECUTABLE(test-net-performance net/test_net_performance.cpp test_helper.h test_include.h executor_for_test.h) ADD_EXECUTABLE(test-net-performance net/test_net_performance.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-net-performance paddle-mobile) target_link_libraries(test-net-performance paddle-mobile)
ADD_EXECUTABLE(test-inference-api-v2 net/test_inference_api_v2.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-inference-api-v2 paddle-mobile)
endif () endif ()
else() else()
# gen test # gen test
...@@ -550,4 +553,7 @@ else() ...@@ -550,4 +553,7 @@ else()
ADD_EXECUTABLE(test-net-benchmark net/test_net_benchmark.cpp test_helper.h test_include.h) ADD_EXECUTABLE(test-net-benchmark net/test_net_benchmark.cpp test_helper.h test_include.h)
target_link_libraries(test-net-benchmark paddle-mobile) target_link_libraries(test-net-benchmark paddle-mobile)
ADD_EXECUTABLE(test-inference-api-v2 net/test_inference_api_v2.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-inference-api-v2 paddle-mobile)
endif() endif()
...@@ -14,9 +14,9 @@ limitations under the License. */ ...@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <memory>
#include "common/log.h" #include "common/log.h"
#include "framework/executor.h" #include "framework/executor.h"
#include "framework/op_registry.h" #include "framework/op_registry.h"
......
...@@ -15,10 +15,11 @@ limitations under the License. */ ...@@ -15,10 +15,11 @@ limitations under the License. */
#pragma once #pragma once
#ifdef PADDLE_MOBILE_CL #ifdef PADDLE_MOBILE_CL
#include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include <memory>
#include "./test_helper.h"
#include "common/log.h" #include "common/log.h"
#include "framework/cl/cl_helper.h" #include "framework/cl/cl_helper.h"
#include "framework/cl/cl_tensor.h" #include "framework/cl/cl_tensor.h"
...@@ -26,18 +27,17 @@ limitations under the License. */ ...@@ -26,18 +27,17 @@ limitations under the License. */
#include "framework/op_registry.h" #include "framework/op_registry.h"
#include "operators/feed_op.h" #include "operators/feed_op.h"
#include "operators/fetch_op.h" #include "operators/fetch_op.h"
#include "./test_helper.h"
using paddle_mobile::framework::AttributeMap;
using paddle_mobile::framework::BlockDesc; using paddle_mobile::framework::BlockDesc;
using paddle_mobile::framework::DDim; using paddle_mobile::framework::DDim;
using paddle_mobile::framework::Executor; using paddle_mobile::framework::Executor;
using paddle_mobile::framework::LoDTensor; using paddle_mobile::framework::LoDTensor;
using paddle_mobile::framework::OpDesc; using paddle_mobile::framework::OpDesc;
using paddle_mobile::framework::OperatorBase;
using paddle_mobile::framework::Program; using paddle_mobile::framework::Program;
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
using paddle_mobile::framework::Variable; using paddle_mobile::framework::Variable;
using paddle_mobile::framework::OperatorBase;
using paddle_mobile::framework::AttributeMap;
using std::string; using std::string;
using std::vector; using std::vector;
namespace paddle_mobile { namespace paddle_mobile {
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <iostream>
#include "../test_helper.h"
#include "io/paddle_inference_api.h"
using namespace paddle_mobile; // NOLINT
PaddleMobileConfig GetConfig() {
PaddleMobileConfig config;
config.precision = PaddleMobileConfig::FP32;
config.device = PaddleMobileConfig::kGPU_CL;
config.pre_post_type = PaddleMobileConfig::NONE_PRE_POST;
config.prog_file = "../models/ercy/model";
config.param_file = "../models/ercy/params";
config.lod_mode = false;
config.load_when_predict = false;
return config;
}
int main() {
PaddleMobileConfig config = GetConfig();
auto predictor =
CreatePaddlePredictor<PaddleMobileConfig,
PaddleEngineKind::kPaddleMobile>(config);
// reliable
int re_len = 1 * 1 * 64 * 72;
std::vector<float> re_v;
std::vector<int64_t> re_dims{1, 1, 64, 72};
GetInput<float>(g_test_image_1x3x224x224, &re_v, re_dims);
PaddleTensor re;
re.shape = std::vector<int>({1, 1, 64, 72});
re.data = PaddleBuf(re_v.data(), re_len * sizeof(float));
re.dtype = PaddleDType::FLOAT32;
re.layout = LayoutType::LAYOUT_CHW;
// grid
int grid_len = 1 * 64 * 72 * 2;
std::vector<float> grid_v;
std::vector<int64_t> grid_dims{1, 64, 72, 2};
GetInput<float>(g_test_image_1x3x224x224, &grid_v, grid_dims);
PaddleTensor grid;
grid.shape = std::vector<int>({1, 64, 72, 2});
grid.data = PaddleBuf(grid_v.data(), grid_len * sizeof(float));
grid.dtype = PaddleDType::FLOAT32;
grid.layout = LayoutType::LAYOUT_CHW;
// last_input
int last_len = 1 * 128 * 64 * 72;
std::vector<float> last_v;
std::vector<int64_t> last_dims{1, 128, 64, 72};
GetInput<float>(g_test_image_1x3x224x224, &last_v, last_dims);
PaddleTensor last;
last.shape = std::vector<int>({1, 128, 64, 72});
last.data = PaddleBuf(last_v.data(), last_len * sizeof(float));
last.dtype = PaddleDType::FLOAT32;
last.layout = LayoutType::LAYOUT_CHW;
// input_rgb
int input_rgb_len = 1 * 4 * 256 * 288;
std::vector<float> input_rgb_v;
std::vector<int64_t> input_rgb_dims{1, 4, 256, 288};
GetInput<float>(g_test_image_1x3x224x224, &input_rgb_v, input_rgb_dims);
PaddleTensor input_rgb;
input_rgb.shape = std::vector<int>({1, 4, 256, 288});
input_rgb.data = PaddleBuf(input_rgb_v.data(), input_rgb_len * sizeof(float));
input_rgb.dtype = PaddleDType::FLOAT32;
input_rgb.layout = LayoutType::LAYOUT_CHW;
PaddleTensor output0;
output0.shape = std::vector<int>({});
output0.data = PaddleBuf();
output0.dtype = PaddleDType::FLOAT32;
output0.layout = LayoutType::LAYOUT_CHW;
PaddleTensor output1;
output1.shape = std::vector<int>({});
output1.data = PaddleBuf();
output1.dtype = PaddleDType::FLOAT32;
output1.layout = LayoutType::LAYOUT_CHW;
predictor->Feed("reliable", re);
predictor->Feed("grid", grid);
predictor->Feed("last_input", last);
predictor->Feed("input_rgb", input_rgb);
predictor->Run();
predictor->Fetch("save_infer_model/scale_0", &output0);
predictor->Fetch("save_infer_model/scale_1", &output1);
float* out_ptr0 = reinterpret_cast<float*>(output0.data.data());
float* out_ptr1 = reinterpret_cast<float*>(output1.data.data());
std::cout << " print output0 : " << std::endl;
int numel = output0.data.length() / sizeof(float);
int stride = numel / 20;
stride = stride > 0 ? stride : 1;
for (size_t j = 0; j < numel; j += stride) {
std::cout << out_ptr0[j] << " ";
}
std::cout << std::endl;
std::cout << " print output1 : " << std::endl;
numel = output1.data.length() / sizeof(float);
stride = numel / 20;
stride = stride > 0 ? stride : 1;
for (size_t j = 0; j < numel; j += stride) {
std::cout << out_ptr1[j] << " ";
}
std::cout << std::endl;
return 0;
}
...@@ -49,4 +49,7 @@ int main() { ...@@ -49,4 +49,7 @@ int main() {
tester.Predict("expend", in_dims, out_dims, inputs, outputs, attrs); tester.Predict("expend", in_dims, out_dims, inputs, outputs, attrs);
} }
#endif #endif
#else
int main() {}
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册