paddle_mobile.h 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>
#include <string>
19
#include <utility>
20 21
#include <vector>
#include "common/types.h"
22
#include "framework/executor.h"
H
hjchen2 已提交
23
#include "framework/load_ops.h"
24
#include "framework/loader.h"
25
#include "framework/tensor.h"
26
#include "io/paddle_inference_api.h"
Y
yangfei 已提交
27 28 29
#ifdef PADDLE_MOBILE_CL
#include "framework/cl/cl_engine.h"
#endif
30 31 32

namespace paddle_mobile {

33
template <typename Device, typename T = float>
34 35
class PaddleMobile {
 public:
36 37
  PaddleMobile() {
#ifndef PADDLE_MOBILE_CL
38 39
    bool is_gpu = std::is_same<DeviceType<kGPU_CL>, Device>::value;
    PADDLE_MOBILE_ENFORCE(!is_gpu, "Please recompile with GPU_CL is on");
40 41
#endif
  }
42 43 44 45
  ~PaddleMobile() {}

  PMStatus Load(const std::string &dirname, const bool optimize = false,
                const bool quantification = false, const int batch_size = 1,
46
                const bool lod_mode = false);
47 48
  PMStatus Load(const std::string &model_path, const std::string &para_path,
                const bool optimize = false, const bool quantification = false,
49 50 51
                const int batch_size = 1, const bool lod_mode = false);

  PMStatus Load(const PaddleMobileConfig &config);
52 53 54

  PMStatus Predict(const framework::Tensor &input);
  PMStatus Predict(const framework::LoDTensor &input);
55

56 57 58 59
  PMStatus Predict(
      const std::vector<std::pair<std::string, framework::Tensor>> &inputs);
  PMStatus Predict(
      const std::vector<std::pair<std::string, framework::LoDTensor>> &inputs);
60

61 62 63
  std::vector<T> Predict(const std::vector<T> &input,
                         const std::vector<int64_t> &dims);
  PMStatus Predict();
64

65 66
  void Feed(const framework::LoDTensor &input, const std::string &var_name);
  void Feed(const framework::Tensor &input, const std::string &var_name);
xiebaiyuan's avatar
xiebaiyuan 已提交
67

68 69 70 71
  typedef std::shared_ptr<framework::LoDTensor> LoDTensorPtr;
  LoDTensorPtr Fetch(const std::string &var_name);

  LoDTensorPtr Fetch() { return Fetch("fetch"); }
72

73 74
  bool LoadCombinedMemory(size_t model_len, const uint8_t *model_buf,
                          size_t combined_params_len,
L
liuruilong 已提交
75
                          uint8_t *combined_params_buf);
76

77
  void SetThreadNum(int count);
78
  void Clear();
79
  double GetPredictTime();
80

81
#ifdef PADDLE_MOBILE_FPGA
H
hjchen2 已提交
82
  void InjectVariable(const framework::Tensor &t, std::string var_name);
83 84 85 86 87 88 89
  void FeedData(const framework::Tensor &t);
  std::shared_ptr<framework::Tensor> FetchResult(int id = -1);
  void Predict_From_To(int start = 0, int end = -1);
  void Predict_From(int start);
  void Predict_To(int end);
#endif

Y
yangfei 已提交
90
#ifdef PADDLE_MOBILE_CL
91
 public:  // NOLINT
Y
yangfei 已提交
92
  void SetCLPath(std::string cl_path);
93 94
  int readText(const char *kernelPath,
               char **pcode);  // 读取文本文件放入 pcode,返回字符串长度
Y
yangfei 已提交
95 96
#endif

97
 private:
98 99
  std::shared_ptr<framework::Loader<Device, T>> loader_;
  std::shared_ptr<framework::Executor<Device, T>> executor_;
100 101 102
};

}  // namespace paddle_mobile