From 8d02789f0f148ae1dc3ffbfaac09940113a84f9a Mon Sep 17 00:00:00 2001 From: Yanzhan Yang Date: Mon, 12 Aug 2019 16:31:52 +0800 Subject: [PATCH] add wrap interface (#1788) * add wrap interface * fix style * fix fpga compilation error * fix style --- src/io/paddle_mobile_wrap.cpp | 216 ++++++++++++++++++++++++++++++++++ src/io/paddle_mobile_wrap.h | 113 ++++++++++++++++++ test/net/test_wrap.cpp | 50 ++++++++ 3 files changed, 379 insertions(+) create mode 100644 src/io/paddle_mobile_wrap.cpp create mode 100644 src/io/paddle_mobile_wrap.h create mode 100644 test/net/test_wrap.cpp diff --git a/src/io/paddle_mobile_wrap.cpp b/src/io/paddle_mobile_wrap.cpp new file mode 100644 index 0000000000..d84d9dd4cd --- /dev/null +++ b/src/io/paddle_mobile_wrap.cpp @@ -0,0 +1,216 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "io/paddle_mobile_wrap.h" + +#include "io/api_paddle_mobile.h" +#include "io/paddle_mobile.h" + +namespace paddle_mobile { +namespace wrap { + +#ifndef PADDLE_MOBILE_FPGA + +// ddim class +int DDim::size() { return dims.size(); } + +int64_t &DDim::operator[](int idx) { + if (0 <= idx && idx < dims.size()) { + return dims[idx]; + } + int64_t non_exist = 0; + return non_exist; +} + +int64_t DDim::operator[](int idx) const { + if (0 <= idx && idx < dims.size()) { + return dims[idx]; + } + return 0; +} + +DDim make_ddim(const std::vector &dims) { + DDim ddim; + for (auto dim : dims) { + ddim.dims.push_back(dim); + } + return ddim; +} + +// tensor class +Tensor::Tensor(float *data, DDim ddim) { + this->data_ = data; + this->ddim_ = ddim; +} + +template +float *Tensor::data() const { + return this->data_; +} + +DDim Tensor::dims() const { return this->ddim_; } + +// net class +template +void Net::SetThreadNum(int threads) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + engine->SetThreadNum(threads); + } +} + +template +PMStatus Net::Load(const std::string &dirname, const bool optimize, + const bool quantification, const int batch_size, + const bool lod_mode) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + paddle_mobile::PMStatus status = + engine->Load(dirname, false, false, 1, true); + return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError; + } + return PMUnKownError; +} + +template +PMStatus Net::Load(const std::string &model_path, + const std::string ¶_path, const bool optimize, + const bool quantification, const int batch_size, + const bool lod_mode) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + paddle_mobile::PMStatus status = + engine->Load(model_path, para_path, false, false, 1, true); + return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError; + } + return PMUnKownError; +} + +template +bool Net::LoadCombinedMemory(size_t model_len, const uint8_t *model_buf, + size_t combined_params_len, + uint8_t *combined_params_buf, + bool optimize, bool quantification, + int batch_size, bool lod_mode) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + bool status = + engine->LoadCombinedMemory(model_len, model_buf, combined_params_len, + combined_params_buf, false, false, 1, true); + return status; + } + return false; +} + +template +PMStatus Net::Predict(const Tensor &input) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + auto input_data = input.data(); + auto input_dims = input.dims(); + std::vector input_dims_as_vector = input_dims.dims; + paddle_mobile::framework::Tensor input_inner( + input_data, paddle_mobile::framework::make_ddim(input_dims_as_vector)); + paddle_mobile::PMStatus status = engine->Predict(input_inner); + return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError; + } + return PMUnKownError; +} + +template +std::vector Net::Predict(const std::vector &input, + const std::vector &dims) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + auto result = engine->Predict(input, dims); + return result; + } + return std::vector(); +} + +template +PMStatus Net::Predict() { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + paddle_mobile::PMStatus status = engine->Predict(); + return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError; + } + return PMUnKownError; +} + +template +void Net::Feed(const std::string &var_name, const Tensor &input) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + auto input_data = input.data(); + auto input_dims = input.dims(); + std::vector input_dims_as_vector = input_dims.dims; + paddle_mobile::framework::Tensor input_inner( + input_data, paddle_mobile::framework::make_ddim(input_dims_as_vector)); + engine->Feed(var_name, input_inner); + } +} + +template +std::shared_ptr Net::Fetch(const std::string &var_name) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + if (engine != nullptr) { + auto output_inner = engine->Fetch(var_name); + auto ddim_inner = output_inner->dims(); + std::vector ddim_as_vector; + for (int i = 0; i < ddim_inner.size(); i++) { + ddim_as_vector.push_back(ddim_inner[i]); + } + auto ddim = make_ddim(ddim_as_vector); + auto output_data = output_inner->data(); + std::shared_ptr ptr(new Tensor(output_data, ddim)); + return ptr; + } + return nullptr; +} + +template +Net::Net() { + if (this->engine_ == nullptr) { + PaddleMobileConfigInternal config; + this->engine_ = new paddle_mobile::PaddleMobile(config); + } +} + +template +Net::~Net() { + if (this->engine_ != nullptr) { + auto engine = + (paddle_mobile::PaddleMobile *)this->engine_; + delete engine; + this->engine_ = nullptr; + } +} + +template class Net; +template float *Tensor::data() const; + +#endif + +} // namespace wrap +} // namespace paddle_mobile diff --git a/src/io/paddle_mobile_wrap.h b/src/io/paddle_mobile_wrap.h new file mode 100644 index 0000000000..7a827bd13b --- /dev/null +++ b/src/io/paddle_mobile_wrap.h @@ -0,0 +1,113 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace paddle_mobile { +namespace wrap { + +#ifndef PADDLE_MOBILE_FPGA + +// device type +enum DeviceTypeEnum { + kINVALID = -1, + kCPU = 0, + kFPGA = 1, + kGPU_MALI = 2, + kGPU_CL = 3 +}; + +template +struct DeviceType {}; + +typedef DeviceType CPU; +typedef DeviceType FPGA; +typedef DeviceType GPU_MALI; +typedef DeviceType GPU_CL; + +// ddim class +class DDim { + public: + int size(); + int64_t &operator[](int idx); + int64_t operator[](int idx) const; + + std::vector dims; +}; +DDim make_ddim(const std::vector &dims); + +// tensor class +class Tensor { + public: + Tensor(float *data, DDim ddim); + + template + float *data() const; + DDim dims() const; + + float *data_; + DDim ddim_; +}; + +// pm status +enum PMStatus { + PMSuccess = 0xFF, /*!< No errors */ + PMNotInitialized = 0x01, /*!< Data not initialized. */ + PMInvalidValue = 0x02, /*!< Incorrect variable value. */ + PMMemAllocFailed = 0x03, /*!< Memory allocation error. */ + PMUnKownError = 0x04, /*!< Unknown error. */ + PMOutOfAuthority = 0x05, /*!< Try to modified data not your own*/ + PMOutOfMem = 0x06, /*!< OOM error*/ + PMUnImplError = 0x07, /*!< Unimplement error. */ + PMWrongDevice = 0x08 /*!< un-correct device. */ +}; + +// net class +template +class Net { + public: + Net(); + ~Net(); + void SetThreadNum(int thread_num); + PMStatus Load(const std::string &dirname, const bool optimize = false, + const bool quantification = false, const int batch_size = 1, + const bool lod_mode = false); + PMStatus Load(const std::string &model_path, const std::string ¶_path, + const bool optimize = false, const bool quantification = false, + const int batch_size = 1, const bool lod_mode = false); + bool LoadCombinedMemory(size_t model_len, const uint8_t *model_buf, + size_t combined_params_len, + uint8_t *combined_params_buf, bool optimize = false, + bool quantification = false, int batch_size = 1, + bool lod_mode = false); + PMStatus Predict(const Tensor &input); + std::vector Predict(const std::vector &input, + const std::vector &dims); + PMStatus Predict(); + void Feed(const std::string &var_name, const Tensor &input); + std::shared_ptr Fetch(const std::string &var_name); + void *engine_ = nullptr; +}; + +#endif + +} // namespace wrap +} // namespace paddle_mobile diff --git a/test/net/test_wrap.cpp b/test/net/test_wrap.cpp new file mode 100644 index 0000000000..0e507ce950 --- /dev/null +++ b/test/net/test_wrap.cpp @@ -0,0 +1,50 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "../test_helper.h" +#include "../test_include.h" +#include "io/paddle_mobile_wrap.h" + +int main(int argc, char *argv[]) { +#ifndef PADDLE_MOBILE_FPGA + paddle_mobile::wrap::Net *net = + new paddle_mobile::wrap::Net(); + net->Load("./checked_model/model", "./checked_model/params", false, false, 1, + true); + int size = 1 * 3 * 64 * 64; + float *data = new float[size]; + for (int i = 0; i < size; i++) { + data[i] = 0.0; + } + std::vector shape{1, 3, 64, 64}; + paddle_mobile::wrap::Tensor input(data, + paddle_mobile::wrap::make_ddim(shape)); + net->Feed("data", input); + net->Predict(); + auto output = net->Fetch("save_infer_model/scale_0"); + int output_size = 1; + std::cout << "output shape: "; + for (int i = 0; i < output->dims().size(); i++) { + std::cout << output->dims()[i] << " "; + output_size *= output->dims()[i]; + } + std::cout << std::endl; + std::cout << "output data: "; + for (int i = 0; i < output_size; i++) { + std::cout << output->data()[i] << std::endl; + } +#endif +} -- GitLab