提交 8d02789f 编写于 作者: Y Yanzhan Yang 提交者: zp7

add wrap interface (#1788)

* add wrap interface

* fix style

* fix fpga compilation error

* fix style
上级 a66ee2d5
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "io/paddle_mobile_wrap.h"
#include "io/api_paddle_mobile.h"
#include "io/paddle_mobile.h"
namespace paddle_mobile {
namespace wrap {
#ifndef PADDLE_MOBILE_FPGA
// ddim class
int DDim::size() { return dims.size(); }
int64_t &DDim::operator[](int idx) {
if (0 <= idx && idx < dims.size()) {
return dims[idx];
}
int64_t non_exist = 0;
return non_exist;
}
int64_t DDim::operator[](int idx) const {
if (0 <= idx && idx < dims.size()) {
return dims[idx];
}
return 0;
}
DDim make_ddim(const std::vector<int64_t> &dims) {
DDim ddim;
for (auto dim : dims) {
ddim.dims.push_back(dim);
}
return ddim;
}
// tensor class
Tensor::Tensor(float *data, DDim ddim) {
this->data_ = data;
this->ddim_ = ddim;
}
template <typename T>
float *Tensor::data() const {
return this->data_;
}
DDim Tensor::dims() const { return this->ddim_; }
// net class
template <typename Device>
void Net<Device>::SetThreadNum(int threads) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
engine->SetThreadNum(threads);
}
}
template <typename Device>
PMStatus Net<Device>::Load(const std::string &dirname, const bool optimize,
const bool quantification, const int batch_size,
const bool lod_mode) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
paddle_mobile::PMStatus status =
engine->Load(dirname, false, false, 1, true);
return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError;
}
return PMUnKownError;
}
template <typename Device>
PMStatus Net<Device>::Load(const std::string &model_path,
const std::string &para_path, const bool optimize,
const bool quantification, const int batch_size,
const bool lod_mode) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
paddle_mobile::PMStatus status =
engine->Load(model_path, para_path, false, false, 1, true);
return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError;
}
return PMUnKownError;
}
template <typename Device>
bool Net<Device>::LoadCombinedMemory(size_t model_len, const uint8_t *model_buf,
size_t combined_params_len,
uint8_t *combined_params_buf,
bool optimize, bool quantification,
int batch_size, bool lod_mode) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
bool status =
engine->LoadCombinedMemory(model_len, model_buf, combined_params_len,
combined_params_buf, false, false, 1, true);
return status;
}
return false;
}
template <typename Device>
PMStatus Net<Device>::Predict(const Tensor &input) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
auto input_data = input.data<float>();
auto input_dims = input.dims();
std::vector<int64_t> input_dims_as_vector = input_dims.dims;
paddle_mobile::framework::Tensor input_inner(
input_data, paddle_mobile::framework::make_ddim(input_dims_as_vector));
paddle_mobile::PMStatus status = engine->Predict(input_inner);
return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError;
}
return PMUnKownError;
}
template <typename Device>
std::vector<float> Net<Device>::Predict(const std::vector<float> &input,
const std::vector<int64_t> &dims) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
auto result = engine->Predict(input, dims);
return result;
}
return std::vector<float>();
}
template <typename Device>
PMStatus Net<Device>::Predict() {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
paddle_mobile::PMStatus status = engine->Predict();
return status == paddle_mobile::PMSuccess ? PMSuccess : PMUnKownError;
}
return PMUnKownError;
}
template <typename Device>
void Net<Device>::Feed(const std::string &var_name, const Tensor &input) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
auto input_data = input.data<float>();
auto input_dims = input.dims();
std::vector<int64_t> input_dims_as_vector = input_dims.dims;
paddle_mobile::framework::Tensor input_inner(
input_data, paddle_mobile::framework::make_ddim(input_dims_as_vector));
engine->Feed(var_name, input_inner);
}
}
template <typename Device>
std::shared_ptr<Tensor> Net<Device>::Fetch(const std::string &var_name) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
if (engine != nullptr) {
auto output_inner = engine->Fetch(var_name);
auto ddim_inner = output_inner->dims();
std::vector<int64_t> ddim_as_vector;
for (int i = 0; i < ddim_inner.size(); i++) {
ddim_as_vector.push_back(ddim_inner[i]);
}
auto ddim = make_ddim(ddim_as_vector);
auto output_data = output_inner->data<float>();
std::shared_ptr<Tensor> ptr(new Tensor(output_data, ddim));
return ptr;
}
return nullptr;
}
template <typename Device>
Net<Device>::Net() {
if (this->engine_ == nullptr) {
PaddleMobileConfigInternal config;
this->engine_ = new paddle_mobile::PaddleMobile<paddle_mobile::CPU>(config);
}
}
template <typename Device>
Net<Device>::~Net() {
if (this->engine_ != nullptr) {
auto engine =
(paddle_mobile::PaddleMobile<paddle_mobile::CPU> *)this->engine_;
delete engine;
this->engine_ = nullptr;
}
}
template class Net<CPU>;
template float *Tensor::data<float>() const;
#endif
} // namespace wrap
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstdint>
#include <map>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace paddle_mobile {
namespace wrap {
#ifndef PADDLE_MOBILE_FPGA
// device type
enum DeviceTypeEnum {
kINVALID = -1,
kCPU = 0,
kFPGA = 1,
kGPU_MALI = 2,
kGPU_CL = 3
};
template <DeviceTypeEnum T>
struct DeviceType {};
typedef DeviceType<kCPU> CPU;
typedef DeviceType<kFPGA> FPGA;
typedef DeviceType<kGPU_MALI> GPU_MALI;
typedef DeviceType<kGPU_CL> GPU_CL;
// ddim class
class DDim {
public:
int size();
int64_t &operator[](int idx);
int64_t operator[](int idx) const;
std::vector<int64_t> dims;
};
DDim make_ddim(const std::vector<int64_t> &dims);
// tensor class
class Tensor {
public:
Tensor(float *data, DDim ddim);
template <typename T>
float *data() const;
DDim dims() const;
float *data_;
DDim ddim_;
};
// pm status
enum PMStatus {
PMSuccess = 0xFF, /*!< No errors */
PMNotInitialized = 0x01, /*!< Data not initialized. */
PMInvalidValue = 0x02, /*!< Incorrect variable value. */
PMMemAllocFailed = 0x03, /*!< Memory allocation error. */
PMUnKownError = 0x04, /*!< Unknown error. */
PMOutOfAuthority = 0x05, /*!< Try to modified data not your own*/
PMOutOfMem = 0x06, /*!< OOM error*/
PMUnImplError = 0x07, /*!< Unimplement error. */
PMWrongDevice = 0x08 /*!< un-correct device. */
};
// net class
template <typename Device>
class Net {
public:
Net();
~Net();
void SetThreadNum(int thread_num);
PMStatus Load(const std::string &dirname, const bool optimize = false,
const bool quantification = false, const int batch_size = 1,
const bool lod_mode = false);
PMStatus Load(const std::string &model_path, const std::string &para_path,
const bool optimize = false, const bool quantification = false,
const int batch_size = 1, const bool lod_mode = false);
bool LoadCombinedMemory(size_t model_len, const uint8_t *model_buf,
size_t combined_params_len,
uint8_t *combined_params_buf, bool optimize = false,
bool quantification = false, int batch_size = 1,
bool lod_mode = false);
PMStatus Predict(const Tensor &input);
std::vector<float> Predict(const std::vector<float> &input,
const std::vector<int64_t> &dims);
PMStatus Predict();
void Feed(const std::string &var_name, const Tensor &input);
std::shared_ptr<Tensor> Fetch(const std::string &var_name);
void *engine_ = nullptr;
};
#endif
} // namespace wrap
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <iostream>
#include <sstream>
#include "../test_helper.h"
#include "../test_include.h"
#include "io/paddle_mobile_wrap.h"
int main(int argc, char *argv[]) {
#ifndef PADDLE_MOBILE_FPGA
paddle_mobile::wrap::Net<paddle_mobile::wrap::CPU> *net =
new paddle_mobile::wrap::Net<paddle_mobile::wrap::CPU>();
net->Load("./checked_model/model", "./checked_model/params", false, false, 1,
true);
int size = 1 * 3 * 64 * 64;
float *data = new float[size];
for (int i = 0; i < size; i++) {
data[i] = 0.0;
}
std::vector<int64_t> shape{1, 3, 64, 64};
paddle_mobile::wrap::Tensor input(data,
paddle_mobile::wrap::make_ddim(shape));
net->Feed("data", input);
net->Predict();
auto output = net->Fetch("save_infer_model/scale_0");
int output_size = 1;
std::cout << "output shape: ";
for (int i = 0; i < output->dims().size(); i++) {
std::cout << output->dims()[i] << " ";
output_size *= output->dims()[i];
}
std::cout << std::endl;
std::cout << "output data: ";
for (int i = 0; i < output_size; i++) {
std::cout << output->data<float>()[i] << std::endl;
}
#endif
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册