diff --git a/mobile/src/io/api_paddle_mobile.cc b/mobile/src/io/api_paddle_mobile.cc index 8bfc91998f600726c1bcf8fe932372928928e334..b01407bb3759eb18552a8d51f4826f69bb1bbe5f 100644 --- a/mobile/src/io/api_paddle_mobile.cc +++ b/mobile/src/io/api_paddle_mobile.cc @@ -262,6 +262,37 @@ void PaddleMobilePredictor::Predict_From_To(int start, int end) { paddle_mobile_->Predict_From_To(start, end); } +#else +template +void PaddleMobilePredictor::Feed(const std::string &var_name, + const PaddleTensor &input) { + framework::DDim ddim = framework::make_ddim(input.shape); + framework::Tensor input_tensor(static_cast(input.data.data()), ddim); + paddle_mobile_->Feed(var_name, input_tensor); +} + +template +void PaddleMobilePredictor::Fetch(const std::string &var_name, + PaddleTensor *output) { + auto output_tensor = paddle_mobile_->Fetch(var_name); + auto ddim = output_tensor->dims(); + + output->shape.clear(); + for (int i = 0; i < ddim.size(); i++) { + output->shape.push_back(static_cast(ddim[i])); + } + + int length = output_tensor->numel() * sizeof(T); + if (output->data.length() < length) { + output->data.Resize(length); + } + memcpy(output->data.data(), output_tensor->template data(), length); +} + +template +bool PaddleMobilePredictor::Run() { + paddle_mobile_->Predict(); +} #endif template PaddleMobilePredictor::~PaddleMobilePredictor() { diff --git a/mobile/src/io/api_paddle_mobile.h b/mobile/src/io/api_paddle_mobile.h index 63718acd990de664bc06f1af973755aa4336a184..6a33e2812a0a8726d8db83d51a5ea2400633e30e 100644 --- a/mobile/src/io/api_paddle_mobile.h +++ b/mobile/src/io/api_paddle_mobile.h @@ -39,7 +39,10 @@ class PaddleMobilePredictor : public PaddlePredictor { void FetchPaddleTensors(std::vector* outputs) override; void FetchPaddleTensors(PaddleTensor* outputs, int id) override; void GetPaddleTensor(const std::string& name, PaddleTensor* output) override; - +#else + void Feed(const std::string& var_name, const PaddleTensor& input); + void Fetch(const std::string& var_name, PaddleTensor* output); + bool Run(); #endif ~PaddleMobilePredictor() override; diff --git a/mobile/src/io/paddle_inference_api.h b/mobile/src/io/paddle_inference_api.h index c89b998144badcf7b88dbbfcaa631a25df7892d5..6f3ba182f6f3ff41763ec950f2632ae288bdf03b 100644 --- a/mobile/src/io/paddle_inference_api.h +++ b/mobile/src/io/paddle_inference_api.h @@ -191,6 +191,10 @@ class PaddlePredictor { virtual void FetchPaddleTensors(PaddleTensor* outputs, int id) = 0; virtual void GetPaddleTensor(const std::string& name, PaddleTensor* output) = 0; +#else + virtual void Feed(const std::string& var_name, const PaddleTensor& input) = 0; + virtual void Fetch(const std::string& var_name, PaddleTensor* output) = 0; + virtual bool Run() = 0; #endif protected: diff --git a/mobile/test/CMakeLists.txt b/mobile/test/CMakeLists.txt index b2c7fb98f84ca225d1a4e360403a09c16366c409..4a8729551e69fa114b1c90c589832f775e3f8497 100644 --- a/mobile/test/CMakeLists.txt +++ b/mobile/test/CMakeLists.txt @@ -542,6 +542,9 @@ if (ENABLE_ALL_TEST) # gen test ADD_EXECUTABLE(test-net-performance net/test_net_performance.cpp test_helper.h test_include.h executor_for_test.h) target_link_libraries(test-net-performance paddle-mobile) + + ADD_EXECUTABLE(test-inference-api-v2 net/test_inference_api_v2.cpp test_helper.h test_include.h executor_for_test.h) + target_link_libraries(test-inference-api-v2 paddle-mobile) endif () else() # gen test @@ -550,4 +553,7 @@ else() ADD_EXECUTABLE(test-net-benchmark net/test_net_benchmark.cpp test_helper.h test_include.h) target_link_libraries(test-net-benchmark paddle-mobile) + + ADD_EXECUTABLE(test-inference-api-v2 net/test_inference_api_v2.cpp test_helper.h test_include.h executor_for_test.h) + target_link_libraries(test-inference-api-v2 paddle-mobile) endif() diff --git a/mobile/test/executor_for_test.h b/mobile/test/executor_for_test.h index 6f1680c5135d3d7a02f572b36b8256000a6d6dee..0a67eea5d5da3f8c7f155768640e7ba53b89abee 100644 --- a/mobile/test/executor_for_test.h +++ b/mobile/test/executor_for_test.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once +#include #include #include -#include #include "common/log.h" #include "framework/executor.h" #include "framework/op_registry.h" diff --git a/mobile/test/executor_for_test_opencl.h b/mobile/test/executor_for_test_opencl.h index bc24541f13a4c619964e72fd43cfa1de0c771875..3a8af875928898135a55884df58e3067f146a4f2 100644 --- a/mobile/test/executor_for_test_opencl.h +++ b/mobile/test/executor_for_test_opencl.h @@ -15,10 +15,11 @@ limitations under the License. */ #pragma once #ifdef PADDLE_MOBILE_CL +#include #include #include -#include +#include "./test_helper.h" #include "common/log.h" #include "framework/cl/cl_helper.h" #include "framework/cl/cl_tensor.h" @@ -26,18 +27,17 @@ limitations under the License. */ #include "framework/op_registry.h" #include "operators/feed_op.h" #include "operators/fetch_op.h" -#include "./test_helper.h" +using paddle_mobile::framework::AttributeMap; using paddle_mobile::framework::BlockDesc; using paddle_mobile::framework::DDim; using paddle_mobile::framework::Executor; using paddle_mobile::framework::LoDTensor; using paddle_mobile::framework::OpDesc; +using paddle_mobile::framework::OperatorBase; using paddle_mobile::framework::Program; using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Variable; -using paddle_mobile::framework::OperatorBase; -using paddle_mobile::framework::AttributeMap; using std::string; using std::vector; namespace paddle_mobile { diff --git a/mobile/test/net/test_inference_api_v2.cpp b/mobile/test/net/test_inference_api_v2.cpp new file mode 100644 index 0000000000000000000000000000000000000000..76997bcb8f7f8fdd9f96f6c8b403006823c4724b --- /dev/null +++ b/mobile/test/net/test_inference_api_v2.cpp @@ -0,0 +1,129 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "../test_helper.h" +#include "io/paddle_inference_api.h" + +using namespace paddle_mobile; // NOLINT + +PaddleMobileConfig GetConfig() { + PaddleMobileConfig config; + config.precision = PaddleMobileConfig::FP32; + config.device = PaddleMobileConfig::kGPU_CL; + config.pre_post_type = PaddleMobileConfig::NONE_PRE_POST; + + config.prog_file = "../models/ercy/model"; + config.param_file = "../models/ercy/params"; + config.lod_mode = false; + config.load_when_predict = false; + return config; +} + +int main() { + PaddleMobileConfig config = GetConfig(); + auto predictor = + CreatePaddlePredictor(config); + + // reliable + int re_len = 1 * 1 * 64 * 72; + std::vector re_v; + std::vector re_dims{1, 1, 64, 72}; + GetInput(g_test_image_1x3x224x224, &re_v, re_dims); + + PaddleTensor re; + re.shape = std::vector({1, 1, 64, 72}); + re.data = PaddleBuf(re_v.data(), re_len * sizeof(float)); + re.dtype = PaddleDType::FLOAT32; + re.layout = LayoutType::LAYOUT_CHW; + + // grid + int grid_len = 1 * 64 * 72 * 2; + std::vector grid_v; + std::vector grid_dims{1, 64, 72, 2}; + GetInput(g_test_image_1x3x224x224, &grid_v, grid_dims); + + PaddleTensor grid; + grid.shape = std::vector({1, 64, 72, 2}); + grid.data = PaddleBuf(grid_v.data(), grid_len * sizeof(float)); + grid.dtype = PaddleDType::FLOAT32; + grid.layout = LayoutType::LAYOUT_CHW; + + // last_input + int last_len = 1 * 128 * 64 * 72; + std::vector last_v; + std::vector last_dims{1, 128, 64, 72}; + GetInput(g_test_image_1x3x224x224, &last_v, last_dims); + + PaddleTensor last; + last.shape = std::vector({1, 128, 64, 72}); + last.data = PaddleBuf(last_v.data(), last_len * sizeof(float)); + last.dtype = PaddleDType::FLOAT32; + last.layout = LayoutType::LAYOUT_CHW; + + // input_rgb + int input_rgb_len = 1 * 4 * 256 * 288; + std::vector input_rgb_v; + std::vector input_rgb_dims{1, 4, 256, 288}; + GetInput(g_test_image_1x3x224x224, &input_rgb_v, input_rgb_dims); + + PaddleTensor input_rgb; + input_rgb.shape = std::vector({1, 4, 256, 288}); + input_rgb.data = PaddleBuf(input_rgb_v.data(), input_rgb_len * sizeof(float)); + input_rgb.dtype = PaddleDType::FLOAT32; + input_rgb.layout = LayoutType::LAYOUT_CHW; + + PaddleTensor output0; + output0.shape = std::vector({}); + output0.data = PaddleBuf(); + output0.dtype = PaddleDType::FLOAT32; + output0.layout = LayoutType::LAYOUT_CHW; + + PaddleTensor output1; + output1.shape = std::vector({}); + output1.data = PaddleBuf(); + output1.dtype = PaddleDType::FLOAT32; + output1.layout = LayoutType::LAYOUT_CHW; + + predictor->Feed("reliable", re); + predictor->Feed("grid", grid); + predictor->Feed("last_input", last); + predictor->Feed("input_rgb", input_rgb); + predictor->Run(); + predictor->Fetch("save_infer_model/scale_0", &output0); + predictor->Fetch("save_infer_model/scale_1", &output1); + + float* out_ptr0 = reinterpret_cast(output0.data.data()); + float* out_ptr1 = reinterpret_cast(output1.data.data()); + std::cout << " print output0 : " << std::endl; + int numel = output0.data.length() / sizeof(float); + int stride = numel / 20; + stride = stride > 0 ? stride : 1; + for (size_t j = 0; j < numel; j += stride) { + std::cout << out_ptr0[j] << " "; + } + std::cout << std::endl; + + std::cout << " print output1 : " << std::endl; + numel = output1.data.length() / sizeof(float); + stride = numel / 20; + stride = stride > 0 ? stride : 1; + for (size_t j = 0; j < numel; j += stride) { + std::cout << out_ptr1[j] << " "; + } + std::cout << std::endl; + + return 0; +} diff --git a/mobile/test/operators/test_expend_op.cpp b/mobile/test/operators/test_expend_op.cpp index 80b5a461afe0b292e9e9cfee93061fe1acaa9090..cbe307ac696b1ced89fdc644590f6a83cb56b644 100644 --- a/mobile/test/operators/test_expend_op.cpp +++ b/mobile/test/operators/test_expend_op.cpp @@ -49,4 +49,7 @@ int main() { tester.Predict("expend", in_dims, out_dims, inputs, outputs, attrs); } #endif + +#else +int main() {} #endif