api_paddle_mobile.cc 4.4 KB
Newer Older
N
nhzlx 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "io/api_paddle_mobile.h"
#include <vector>
#include "framework/tensor.h"

namespace paddle_mobile {

template <typename Dtype, Precision P>
PaddleMobilePredictor<Dtype, P>::PaddleMobilePredictor(
    const PaddleMobileConfig &config) {
  PADDLE_MOBILE_ENFORCE(Init(config) == true,
                        "paddle mobile predictor init failed!");
  config_ = config;
}

template <typename Dtype, Precision P>
bool PaddleMobilePredictor<Dtype, P>::Init(const PaddleMobileConfig &config) {
  paddle_mobile_.reset(new PaddleMobile<Dtype, P>());
  if (!config.model_dir.empty()) {
    paddle_mobile_->Load(config.model_dir, config.optimize,
                         config.quantification, config.batch_size);
  } else if (!config.prog_file.empty() && !config.param_file.empty()) {
    paddle_mobile_->Load(config.prog_file, config.param_file, config.optimize,
                         config.quantification, config.batch_size);
  } else {
    LOG(kLOG_ERROR) << "fail to load inference model!";
    return false;
  }
  // If the openmp is open, set the thread num
  paddle_mobile_->SetThreadNum(config.thread_num);
  return true;
}

template <typename Dtype, Precision P>
bool PaddleMobilePredictor<Dtype, P>::Run(
    const std::vector<PaddleTensor> &inputs,
    std::vector<PaddleTensor> *output_data, int batch_size) {
  if (inputs.empty()) {
    LOG(kLOG_ERROR) << "At least one output should be set with tensors' names.";
    return false;
  }
  auto input = inputs[0];

  if (input.shape.size() != 4) {
    LOG(kLOG_ERROR) << "input shape not equal to 4!";
    return false;
  }
  std::vector<int64_t> dims;
  for (auto d : input.shape) {
    dims.push_back(static_cast<int64_t>(d));
  }

  // use tensor
  framework::DDim ddim =
      framework::make_ddim({dims[0], dims[1], dims[2], dims[3]});

  framework::Tensor input_tensor;
  input_tensor.Resize(ddim);
  int input_length = framework::product(ddim);
N
nhzlx 已提交
73 74
  typedef typename PrecisionTrait<P>::ptype PType;
  auto input_ptr = input_tensor.mutable_data<PType>();
N
nhzlx 已提交
75

N
nhzlx 已提交
76 77
  memcpy(input_ptr, static_cast<PType *>(input.data.data()),
         input_length * sizeof(PType));
N
nhzlx 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
  auto output_tensor = paddle_mobile_->Predict(input_tensor);

  if (output_data->empty()) {
    LOG(kLOG_ERROR) << "At least one output should be set with tensors' names.";
    return false;
  }

  auto &output = (*output_data)[0];
  int output_length = output_tensor->numel();
  std::vector<int64_t> tensor_shape =
      framework::vectorize(output_tensor->dims());

  for (auto d : tensor_shape) {
    output.shape.push_back(static_cast<int>(d));
  }

N
nhzlx 已提交
94 95
  if (output.data.length() < output_length * sizeof(PType)) {
    output.data.Resize(output_length * sizeof(PType));
N
nhzlx 已提交
96 97
  }

N
nhzlx 已提交
98 99
  memcpy(output.data.data(), output_tensor->template data<PType>(),
         output_length * sizeof(PType));
N
nhzlx 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116

  return true;
}

// A factory to help create difference predictor.
template <>
std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<PaddleMobileConfig, PaddleEngineKind::kPaddleMobile>(
    const PaddleMobileConfig &config) {
  std::unique_ptr<PaddlePredictor> x;
  if (config.precision == PaddleMobileConfig::FP32) {
    if (config.device == PaddleMobileConfig::kCPU) {
      x.reset(new PaddleMobilePredictor<CPU, Precision::FP32>(config));
    } else if (config.device == PaddleMobileConfig::kFPGA) {
      x.reset(new PaddleMobilePredictor<FPGA, Precision::FP32>(config));
    } else if (config.device == PaddleMobileConfig::kGPU_MALI) {
      x.reset(new PaddleMobilePredictor<GPU_MALI, Precision::FP32>(config));
L
liuruilong 已提交
117 118
    } else if (config.device == PaddleMobileConfig::kGPU_CL) {
      x.reset(new PaddleMobilePredictor<GPU_CL, Precision::FP32>(config));
N
nhzlx 已提交
119 120 121 122 123 124 125 126 127 128 129 130
    } else {
      LOG(kLOG_ERROR) << "unsupport device type!";
      return nullptr;
    }
  } else {
    LOG(kLOG_ERROR) << "unsupport precision type!";
    return nullptr;
  }
  return std::move(x);
}

}  // namespace paddle_mobile